xenium
thread_block_list.hpp
1 //
2 // Copyright (c) 2018-2020 Manuel Pöter.
3 // Licensed under the MIT License. See LICENSE file in the project root for full license information.
4 //
5 
6 #ifndef XENIUM_DETAIL_THREAD_BLOCK_LIST_HPP
7 #define XENIUM_DETAIL_THREAD_BLOCK_LIST_HPP
8 
9 #include <atomic>
10 #include <iterator>
11 
12 #ifdef _MSC_VER
13  #pragma warning(push)
14  #pragma warning(disable : 4324) // structure was padded due to alignment specifier
15 #endif
16 
17 namespace xenium::reclamation::detail {
18 
19 template <typename T, typename DeletableObject = detail::deletable_object>
20 class thread_block_list {
21  enum class entry_state { free, inactive, active };
22 
23 public:
24  struct entry {
25  entry() : next_entry(nullptr), state(entry_state::active) {}
26 
27  // Normally this load operation can use relaxed semantic, as all reclamation schemes
28  // that use it have an acquire-fence that is sequenced-after calling is_active.
29  // However, TSan does not support acquire-fences, so in order to avoid false
30  // positives we have to allow other memory orders as well.
31  [[nodiscard]] bool is_active(std::memory_order memory_order = std::memory_order_relaxed) const {
32  return state.load(memory_order) == entry_state::active;
33  }
34 
35  void abandon() {
36  assert(is_active());
37  // (1) - this release-store synchronizes-with the acquire-CAS (2)
38  // or any acquire-fence that is sequenced-after calling is_active.
39  state.store(entry_state::free, std::memory_order_release);
40  }
41 
42  void activate() {
43  assert(state.load(std::memory_order_relaxed) == entry_state::inactive);
44  state.store(entry_state::active, std::memory_order_release);
45  }
46 
47  private:
48  friend class thread_block_list;
49 
50  bool try_adopt(entry_state initial_state) {
51  if (state.load(std::memory_order_relaxed) == entry_state::free) {
52  auto expected = entry_state::free;
53  // (2) - this acquire-CAS synchronizes-with the release-store (1)
54  return state.compare_exchange_strong(expected, initial_state, std::memory_order_acquire);
55  }
56  return false;
57  }
58 
59  // next_entry is only set once when it gets inserted into the list and is never changed afterwards
60  // -> therefore it does not have to be atomic
61  T* next_entry;
62 
63  // state is used to manage ownership and active status of entries
64  std::atomic<entry_state> state;
65  };
66 
67  class iterator {
68  T* ptr = nullptr;
69 
70  explicit iterator(T* ptr) : ptr(ptr) {}
71 
72  public:
73  using iterator_category = std::forward_iterator_tag;
74  using value_type = T;
75  using difference_type = std::ptrdiff_t;
76  using pointer = T*;
77  using reference = T&;
78 
79  iterator() = default;
80 
81  void swap(iterator& other) noexcept { std::swap(ptr, other.ptr); }
82 
83  iterator& operator++() {
84  assert(ptr != nullptr);
85  ptr = ptr->next_entry;
86  return *this;
87  }
88 
89  iterator operator++(int) {
90  assert(ptr != nullptr);
91  iterator tmp(*this);
92  ptr = ptr->next_entry;
93  return tmp;
94  }
95 
96  bool operator==(const iterator& rhs) const { return ptr == rhs.ptr; }
97 
98  bool operator!=(const iterator& rhs) const { return ptr != rhs.ptr; }
99 
100  T& operator*() const {
101  assert(ptr != nullptr);
102  return *ptr;
103  }
104 
105  T* operator->() const {
106  assert(ptr != nullptr);
107  return ptr;
108  }
109 
110  friend class thread_block_list;
111  };
112 
113  T* acquire_entry() { return adopt_or_create_entry(entry_state::active); }
114 
115  T* acquire_inactive_entry() { return adopt_or_create_entry(entry_state::inactive); }
116 
117  void release_entry(T* entry) { entry->abandon(); }
118 
119  iterator begin() {
120  // (3) - this acquire-load synchronizes-with the release-CAS (6)
121  return iterator{head.load(std::memory_order_acquire)};
122  }
123 
124  iterator end() { return iterator{}; }
125 
126  void abandon_retired_nodes(DeletableObject* obj) {
127  auto* last = obj;
128  auto* next = last->next;
129  while (next) {
130  last = next;
131  next = last->next;
132  }
133 
134  auto* h = abandoned_retired_nodes.load(std::memory_order_relaxed);
135  do {
136  last->next = h;
137  // (4) - this releas-CAS synchronizes-with the acquire-exchange (5)
138  } while (
139  !abandoned_retired_nodes.compare_exchange_weak(h, obj, std::memory_order_release, std::memory_order_relaxed));
140  }
141 
142  DeletableObject* adopt_abandoned_retired_nodes() {
143  if (abandoned_retired_nodes.load(std::memory_order_relaxed) == nullptr) {
144  return nullptr;
145  }
146 
147  // (5) - this acquire-exchange synchronizes-with the release-CAS (4)
148  return abandoned_retired_nodes.exchange(nullptr, std::memory_order_acquire);
149  }
150 
151 private:
152  void add_entry(T* node) {
153  auto* h = head.load(std::memory_order_relaxed);
154  do {
155  node->next_entry = h;
156  // (6) - this release-CAS synchronizes-with the acquire-loads (3, 7)
157  } while (!head.compare_exchange_weak(h, node, std::memory_order_release, std::memory_order_relaxed));
158  }
159 
160  T* adopt_or_create_entry(entry_state initial_state) {
161  static_assert(std::is_base_of<entry, T>::value, "T must derive from entry.");
162 
163  // (7) - this acquire-load synchronizes-with the release-CAS (6)
164  T* result = head.load(std::memory_order_acquire);
165  while (result) {
166  if (result->try_adopt(initial_state)) {
167  return result;
168  }
169 
170  result = result->next_entry;
171  }
172 
173  result = new T();
174  result->state.store(initial_state, std::memory_order_relaxed);
175  add_entry(result);
176  return result;
177  }
178 
179  std::atomic<T*> head{nullptr};
180 
181  alignas(64) std::atomic<DeletableObject*> abandoned_retired_nodes{nullptr};
182 };
183 
184 } // namespace xenium::reclamation::detail
185 
186 #ifdef _MSC_VER
187  #pragma warning(pop)
188 #endif
189 
190 #endif