xenium
growing_circular_array.hpp
1 //
2 // Copyright (c) 2018-2020 Manuel Pöter.
3 // Licensed under the MIT License. See LICENSE file in the project root for full license information.
4 //
5 
6 #ifndef XENIUM_GROWING_CIRCULAR_ARRAY_HPP
7 #define XENIUM_GROWING_CIRCULAR_ARRAY_HPP
8 
9 #include <xenium/utils.hpp>
10 
11 #include <atomic>
12 #include <cassert>
13 #include <cstdint>
14 
15 namespace xenium::detail {
16 template <class T, std::size_t MinCapacity = 64, std::size_t MaxCapacity = static_cast<std::size_t>(1) << 31>
17 struct growing_circular_array {
18  static constexpr std::size_t min_capacity = MinCapacity;
19  static constexpr std::size_t max_capacity = MaxCapacity;
20  static constexpr std::size_t num_buckets = utils::find_last_bit_set(max_capacity);
21 
22  static_assert(utils::is_power_of_two(min_capacity), "MinCapacity must be a power of two");
23  static_assert(utils::is_power_of_two(max_capacity), "MaxCapacity must be a power of two");
24  static_assert(min_capacity < max_capacity, "MaxCapacity must be greater than MinCapacity");
25 
26  growing_circular_array();
27  ~growing_circular_array();
28 
29  [[nodiscard]] std::size_t capacity() const { return _capacity.load(std::memory_order_relaxed); }
30 
31  T* get(std::size_t idx, std::memory_order order) {
32  // (1) - this acquire-load synchronizes-with the release-store (2)
33  auto capacitiy = _capacity.load(std::memory_order_acquire);
34  return get_entry(idx, capacitiy).load(order);
35  }
36 
37  void put(std::size_t idx, T* value, std::memory_order order) {
38  auto capacitiy = _capacity.load(std::memory_order_relaxed);
39  get_entry(idx, capacitiy).store(value, order);
40  }
41 
42  bool can_grow() { return capacity() < max_capacity; }
43 
44  void grow(std::size_t bottom, std::size_t top);
45 
46 private:
47  using entry = std::atomic<T*>;
48 
49  entry& get_entry(std::size_t idx, std::size_t capacity) {
50  idx = idx & (capacity - 1);
51  std::size_t bucket = utils::find_last_bit_set(idx);
52  assert(bucket < num_buckets);
53  // bucket can be zero, so we have to use two shifts here.
54  idx ^= (1 << bucket) >> 1;
55  return _data[bucket][idx];
56  }
57 
58  static constexpr std::size_t initial_buckets = utils::find_last_bit_set(MinCapacity);
59 
60  std::size_t _buckets;
61  std::atomic<std::size_t> _capacity;
62  entry* _data[num_buckets];
63 };
64 
65 template <class T, std::size_t MinCapacity, std::size_t Buckets>
66 growing_circular_array<T, MinCapacity, Buckets>::growing_circular_array() :
67  _buckets(initial_buckets),
68  _capacity(MinCapacity),
69  _data() {
70  auto* ptr = new entry[MinCapacity];
71  _data[0] = ptr++;
72  for (std::size_t i = 1; i < _buckets; ++i) {
73  _data[i] = ptr;
74  ptr += static_cast<std::size_t>(1) << (i - 1);
75  }
76 }
77 
78 template <class T, std::size_t MinCapacity, std::size_t Buckets>
79 growing_circular_array<T, MinCapacity, Buckets>::~growing_circular_array() {
80  delete[](_data[0]);
81  for (std::size_t i = initial_buckets; i < num_buckets; ++i) {
82  delete[](_data[i]);
83  }
84 }
85 
86 template <class T, std::size_t MinCapacity, std::size_t Buckets>
87 void growing_circular_array<T, MinCapacity, Buckets>::grow(std::size_t bottom, std::size_t top) {
88  assert(can_grow());
89 
90  auto capacity = this->capacity();
91  auto mod_mask = capacity - 1;
92  assert((capacity & mod_mask) == 0);
93 
94  _data[_buckets] = new entry[capacity];
95  _buckets++;
96  auto new_capacity = capacity * 2;
97  auto new_mod_mask = new_capacity - 1;
98 
99  auto start = top;
100  auto start_mod = top & mod_mask;
101  if (start_mod == (top & new_mod_mask)) {
102  // Make sure we don't iterate through useless indices
103  start += capacity - start_mod;
104  }
105 
106  for (std::size_t i = start; i < bottom; i++) {
107  auto oldI = i & mod_mask;
108  auto newI = i % new_mod_mask;
109  if (oldI != newI) {
110  auto oldBit = utils::find_last_bit_set(oldI);
111  auto newBit = utils::find_last_bit_set(newI);
112  auto* v = _data[oldBit][oldI ^ ((1 << (oldBit)) >> 1)].load(std::memory_order_relaxed);
113  _data[newBit][newI ^ ((1 << (newBit)) >> 1)].store(v, std::memory_order_relaxed);
114  } else {
115  // Make sure we don't iterate through useless indices
116  break;
117  }
118  }
119 
120  // (2) - this release-store synchronizes-with the acquire-load (1)
121  _capacity.store(new_capacity, std::memory_order_release);
122 }
123 } // namespace xenium::detail
124 
125 #endif