xenium
hazard_pointer.hpp
1 //
2 // Copyright (c) 2018-2020 Manuel Pöter.
3 // Licensed under the MIT License. See LICENSE file in the project root for full license information.
4 //
5 
6 #ifndef HAZARD_POINTER_IMPL
7  #error "This is an impl file and must not be included directly!"
8 #endif
9 
10 #include <xenium/aligned_object.hpp>
11 #include <xenium/detail/port.hpp>
12 
13 #include <algorithm>
14 #include <new>
15 #include <vector>
16 
17 #ifdef _MSC_VER
18  #pragma warning(push)
19  #pragma warning(disable : 4324) // structure was padded due to alignment specifier
20 #endif
21 
22 namespace xenium::reclamation {
23 
24 template <class Traits>
25 template <class T, class MarkedPtr>
26 hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::guard_ptr(const MarkedPtr& p) : base(p), hp() {
27  if (this->ptr.get() != nullptr) {
28  hp = local_thread_data.alloc_hazard_pointer();
29  hp->set_object(this->ptr.get());
30  }
31 }
32 
33 template <class Traits>
34 template <class T, class MarkedPtr>
35 hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::guard_ptr(const guard_ptr& p) : guard_ptr(p.ptr) {}
36 
37 template <class Traits>
38 template <class T, class MarkedPtr>
39 hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::guard_ptr(guard_ptr&& p) noexcept : base(p.ptr), hp(p.hp) {
40  p.ptr.reset();
41  p.hp = nullptr;
42 }
43 
44 template <class Traits>
45 template <class T, class MarkedPtr>
46 auto hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::operator=(const guard_ptr& p) -> guard_ptr& {
47  if (&p == this) {
48  return *this;
49  }
50 
51  if (hp == nullptr) {
52  hp = local_thread_data.alloc_hazard_pointer();
53  }
54  this->ptr = p.ptr;
55  hp->set_object(this->ptr.get());
56  return *this;
57 }
58 
59 template <class Traits>
60 template <class T, class MarkedPtr>
61 auto hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::operator=(guard_ptr&& p) noexcept -> guard_ptr& {
62  if (&p == this) {
63  return *this;
64  }
65 
66  reset();
67  this->ptr = std::move(p.ptr);
68  hp = p.hp;
69  p.ptr.reset();
70  p.hp = nullptr;
71  return *this;
72 }
73 
74 template <class Traits>
75 template <class T, class MarkedPtr>
76 void hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::acquire(const concurrent_ptr<T>& p, std::memory_order order) {
77  auto p1 = p.load(std::memory_order_relaxed);
78  if (p1 == this->ptr) {
79  return;
80  }
81  if (p1 != nullptr && hp == nullptr) {
82  hp = local_thread_data.alloc_hazard_pointer();
83  }
84  auto p2 = p1;
85  do {
86  if (p2 == nullptr) {
87  reset();
88  return;
89  }
90 
91  p1 = p2;
92  hp->set_object(p1.get());
93  // (1) - this load operation potentially synchronizes-with any release operation on p.
94  p2 = p.load(order);
95  } while (p1.get() != p2.get());
96 
97  this->ptr = p2;
98 }
99 
100 template <class Traits>
101 template <class T, class MarkedPtr>
102 bool hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::acquire_if_equal(const concurrent_ptr<T>& p,
103  const MarkedPtr& expected,
104  std::memory_order order) {
105  auto p1 = p.load(std::memory_order_relaxed);
106  if (p1 == nullptr || p1 != expected) {
107  reset();
108  return p1 == expected;
109  }
110 
111  if (hp == nullptr) {
112  hp = local_thread_data.alloc_hazard_pointer();
113  }
114  hp->set_object(p1.get());
115  // (2) - this load operation potentially synchronizes-with any release operation on p.
116  this->ptr = p.load(order);
117  if (this->ptr != p1) {
118  reset();
119  return false;
120  }
121  return true;
122 }
123 
124 template <class Traits>
125 template <class T, class MarkedPtr>
126 void hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::reset() noexcept {
127  local_thread_data.release_hazard_pointer(hp);
128  this->ptr.reset();
129 }
130 
131 template <class Traits>
132 template <class T, class MarkedPtr>
133 void hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::do_swap(guard_ptr& g) noexcept {
134  std::swap(hp, g.hp);
135 }
136 
137 template <class Traits>
138 template <class T, class MarkedPtr>
139 void hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::reclaim(Deleter d) noexcept {
140  auto* p = this->ptr.get();
141  reset();
142  p->set_deleter(std::move(d));
143  if (local_thread_data.add_retired_node(p) >= allocation_strategy::retired_nodes_threshold()) {
144  local_thread_data.scan();
145  }
146 }
147 
148 namespace detail {
149  template <class Strategy, class Derived>
150  struct alignas(64) basic_hp_thread_control_block :
151  detail::thread_block_list<Derived>::entry,
152  aligned_object<basic_hp_thread_control_block<Strategy, Derived>> {
153  struct hazard_pointer {
154  void set_object(detail::deletable_object* obj) {
155  // (3) - this release-store synchronizes-with the acquire-fence (9)
156  value.store(reinterpret_cast<void**>(obj), std::memory_order_release);
157  // This release is required because when acquire/acquire_if_equal is called on a
158  // guard_ptr with with an active HE entry, set_era is called without an intermediate
159  // call to set_link, i.e., the protected era is updated. This ensures the required
160  // happens-before relation between releasing a guard_ptr to a node and reclamation
161  // of that node.
162 
163  // (4) - this seq_cst-fence enforces a total order with the seq_cst-fence (8)
164  std::atomic_thread_fence(std::memory_order_seq_cst);
165  }
166 
167  bool try_get_object(detail::deletable_object*& result) const {
168  // TSan does not support explicit fences, so we cannot rely on the acquire-fence (9)
169  // but have to perform an acquire-load here to avoid false positives.
170  constexpr auto memory_order = TSAN_MEMORY_ORDER(std::memory_order_acquire, std::memory_order_relaxed);
171  auto v = value.load(memory_order);
172  if (v.mark() == 0) {
173  result = reinterpret_cast<detail::deletable_object*>(v.get());
174  return true;
175  }
176  return false; // value contains a link
177  }
178 
179  void set_link(hazard_pointer* link) {
180  // (5) - this release store synchronizes-with the acquire fence (9)
181  value.store(marked_ptr<void*, 1>(reinterpret_cast<void**>(link), 1), std::memory_order_release);
182  }
183  [[nodiscard]] hazard_pointer* get_link() const {
184  assert(is_link());
185  return reinterpret_cast<hazard_pointer*>(value.load(std::memory_order_relaxed).get());
186  }
187 
188  [[nodiscard]] bool is_link() const { return value.load(std::memory_order_relaxed).mark() != 0; }
189 
190  private:
191  // since we use the hazard pointer array to build our internal linked list of hazard pointers
192  // we set the LSB to signal that this is an internal pointer and not a pointer to a protected object.
193  std::atomic<marked_ptr<void*, 1>> value;
194  };
195 
196  using hint = hazard_pointer*;
197 
198  void initialize(hint& hint) {
199  Strategy::number_of_active_hps.fetch_add(self().number_of_hps(), std::memory_order_relaxed);
200  hint = initialize_block(self());
201  }
202 
203  void abandon() {
204  Strategy::number_of_active_hps.fetch_sub(self().number_of_hps(), std::memory_order_relaxed);
205  detail::thread_block_list<Derived>::entry::abandon();
206  }
207 
208  hazard_pointer* alloc_hazard_pointer(hint& hint) {
209  auto result = hint;
210  if (result == nullptr) {
211  result = self().need_more_hps();
212  }
213 
214  hint = result->get_link();
215  return result;
216  }
217 
218  void release_hazard_pointer(hazard_pointer*& hp, hint& hint) {
219  if (hp != nullptr) {
220  hp->set_link(hint);
221  hint = hp;
222  hp = nullptr;
223  }
224  }
225 
226  protected:
227  Derived& self() { return static_cast<Derived&>(*this); }
228 
229  hazard_pointer* begin() { return &pointers[0]; }
230  hazard_pointer* end() { return &pointers[Strategy::K]; }
231  [[nodiscard]] const hazard_pointer* begin() const { return &pointers[0]; }
232  [[nodiscard]] const hazard_pointer* end() const { return &pointers[Strategy::K]; }
233 
234  template <typename T>
235  static hazard_pointer* initialize_block(T& block) {
236  auto begin = block.begin();
237  auto end = block.end() - 1; // the last element is handled specially, so loop only over n-1 entries
238  for (auto it = begin; it != end;) {
239  auto next = it + 1;
240  it->set_link(next);
241  it = next;
242  }
243  end->set_link(block.initialize_next_block());
244  return begin;
245  }
246 
247  static void gather_protected_pointers(std::vector<const detail::deletable_object*>& protected_ptrs,
248  const hazard_pointer* begin,
249  const hazard_pointer* end) {
250  for (auto it = begin; it != end; ++it) {
251  detail::deletable_object* obj;
252  if (it->try_get_object(obj)) {
253  protected_ptrs.push_back(obj);
254  }
255  }
256  }
257 
258  hazard_pointer pointers[Strategy::K];
259  };
260 
261  template <class Strategy>
262  struct static_hp_thread_control_block :
263  basic_hp_thread_control_block<Strategy, static_hp_thread_control_block<Strategy>> {
264  using base = basic_hp_thread_control_block<Strategy, static_hp_thread_control_block>;
265  using hazard_pointer = typename base::hazard_pointer;
266  friend base;
267 
268  void gather_protected_pointers(std::vector<const detail::deletable_object*>& protected_ptrs) const {
269  base::gather_protected_pointers(protected_ptrs, this->begin(), this->end());
270  }
271 
272  private:
273  hazard_pointer* need_more_hps() { throw bad_hazard_pointer_alloc("hazard pointer pool exceeded"); }
274  [[nodiscard]] constexpr size_t number_of_hps() const { return Strategy::K; }
275  [[nodiscard]] constexpr hazard_pointer* initialize_next_block() const { return nullptr; }
276  };
277 
278  template <class Strategy>
279  struct dynamic_hp_thread_control_block :
280  basic_hp_thread_control_block<Strategy, dynamic_hp_thread_control_block<Strategy>> {
281  using base = basic_hp_thread_control_block<Strategy, dynamic_hp_thread_control_block>;
282  using hazard_pointer = typename base::hazard_pointer;
283  friend base;
284 
285  void gather_protected_pointers(std::vector<const detail::deletable_object*>& protected_ptrs) const {
286  gather_protected_pointers(*this, protected_ptrs);
287  }
288 
289  private:
290  struct alignas(64) hazard_pointer_block : aligned_object<hazard_pointer_block> {
291  explicit hazard_pointer_block(size_t size) : size(size) {}
292 
293  hazard_pointer* begin() { return reinterpret_cast<hazard_pointer*>(this + 1); }
294  hazard_pointer* end() { return begin() + size; }
295 
296  [[nodiscard]] const hazard_pointer* begin() const { return reinterpret_cast<const hazard_pointer*>(this + 1); }
297  [[nodiscard]] const hazard_pointer* end() const { return begin() + size; }
298 
299  [[nodiscard]] const hazard_pointer_block* next_block() const { return next; }
300  hazard_pointer* initialize_next_block() { return next ? base::initialize_block(*next) : nullptr; }
301 
302  hazard_pointer_block* next = nullptr;
303  const size_t size;
304  };
305 
306  [[nodiscard]] const hazard_pointer_block* next_block() const {
307  // (6) - this acquire-load synchronizes-with the release-store (7)
308  return hp_block.load(std::memory_order_acquire);
309  }
310  [[nodiscard]] size_t number_of_hps() const { return total_number_of_hps; }
311  hazard_pointer* need_more_hps() { return allocate_new_hazard_pointer_block(); }
312 
313  hazard_pointer* initialize_next_block() {
314  auto block = hp_block.load(std::memory_order_relaxed);
315  return block ? base::initialize_block(*block) : nullptr;
316  }
317 
318  template <typename T>
319  static void gather_protected_pointers(const T& block,
320  std::vector<const detail::deletable_object*>& protected_ptrs) {
321  base::gather_protected_pointers(protected_ptrs, block.begin(), block.end());
322 
323  const auto* next = block.next_block();
324  if (next) {
325  gather_protected_pointers(*next, protected_ptrs);
326  }
327  }
328 
329  static detail::deletable_object* as_internal_pointer(hazard_pointer* p) {
330  // since we use the hazard pointer array to build our internal linked list of hazard pointers
331  // we set the LSB to signal that this is an internal pointer and not a pointer to a protected object.
332  auto marked = reinterpret_cast<size_t>(p) | 1;
333  return reinterpret_cast<detail::deletable_object*>(marked);
334  }
335 
336  hazard_pointer* allocate_new_hazard_pointer_block() {
337  size_t hps = std::max(static_cast<size_t>(Strategy::K), total_number_of_hps / 2);
338  total_number_of_hps += hps;
339  Strategy::number_of_active_hps.fetch_add(hps, std::memory_order_relaxed);
340 
341  size_t buffer_size = sizeof(hazard_pointer_block) + hps * sizeof(hazard_pointer);
342  void* buffer = hazard_pointer_block::operator new(buffer_size);
343  auto block = ::new (buffer) hazard_pointer_block(hps);
344  auto result = this->initialize_block(*block);
345  block->next = hp_block.load(std::memory_order_relaxed);
346  // (7) - this release-store synchronizes-with the acquire-load (6)
347  hp_block.store(block, std::memory_order_release);
348  return result;
349  }
350 
351  size_t total_number_of_hps = Strategy::K;
352  std::atomic<hazard_pointer_block*> hp_block;
353  };
354 } // namespace detail
355 
356 template <class Traits>
357 struct alignas(64) hazard_pointer<Traits>::thread_data : aligned_object<thread_data> {
358  using HP = typename thread_control_block::hazard_pointer*;
359 
360  ~thread_data() {
361  if (retire_list != nullptr) {
362  scan();
363  if (retire_list != nullptr) {
364  global_thread_block_list.abandon_retired_nodes(retire_list);
365  }
366  retire_list = nullptr;
367  }
368 
369  if (control_block != nullptr) {
370  global_thread_block_list.release_entry(control_block);
371  control_block = nullptr;
372  }
373  }
374 
375  HP alloc_hazard_pointer() {
376  ensure_has_control_block();
377  return control_block->alloc_hazard_pointer(hint);
378  }
379 
380  void release_hazard_pointer(HP& hp) { control_block->release_hazard_pointer(hp, hint); }
381 
382  std::size_t add_retired_node(detail::deletable_object* p) {
383  p->next = retire_list;
384  retire_list = p;
385  return ++number_of_retired_nodes;
386  }
387 
388  void scan() {
389  std::vector<const detail::deletable_object*> protected_pointers;
390  protected_pointers.reserve(allocation_strategy::number_of_active_hazard_pointers());
391 
392  // (8) - this seq_cst-fence enforces a total order with the seq_cst-fence (4)
393  std::atomic_thread_fence(std::memory_order_seq_cst);
394 
395  auto adopted_nodes = global_thread_block_list.adopt_abandoned_retired_nodes();
396 
397  std::for_each(
398  global_thread_block_list.begin(), global_thread_block_list.end(), [&protected_pointers](const auto& entry) {
399  // TSan does not support explicit fences, so we cannot rely on the acquire-fence (9)
400  // but have to perform an acquire-load here to avoid false positives.
401  constexpr auto memory_order = TSAN_MEMORY_ORDER(std::memory_order_acquire, std::memory_order_relaxed);
402  if (entry.is_active(memory_order)) {
403  entry.gather_protected_pointers(protected_pointers);
404  }
405  });
406 
407  // (9) - this acquire-fence synchronizes-with the release-store (3, 5)
408  std::atomic_thread_fence(std::memory_order_acquire);
409 
410  std::sort(protected_pointers.begin(), protected_pointers.end());
411 
412  auto* list = retire_list;
413  retire_list = nullptr;
414  number_of_retired_nodes = 0;
415  reclaim_nodes(list, protected_pointers);
416  reclaim_nodes(adopted_nodes, protected_pointers);
417  }
418 
419 private:
420  void ensure_has_control_block() {
421  if (control_block == nullptr) {
422  control_block = global_thread_block_list.acquire_entry();
423  control_block->initialize(hint);
424  }
425  }
426 
427  void reclaim_nodes(detail::deletable_object* list,
428  const std::vector<const detail::deletable_object*>& protected_pointers) {
429  while (list != nullptr) {
430  auto* cur = list;
431  list = list->next;
432 
433  if (std::binary_search(protected_pointers.begin(), protected_pointers.end(), cur)) {
434  add_retired_node(cur);
435  } else {
436  cur->delete_self();
437  }
438  }
439  }
440  detail::deletable_object* retire_list = nullptr;
441  std::size_t number_of_retired_nodes = 0;
442  typename thread_control_block::hint hint{};
443 
444  thread_control_block* control_block = nullptr;
445 
446  friend class hazard_pointer;
447  ALLOCATION_COUNTER(hazard_pointer);
448 };
449 
450 #ifdef TRACK_ALLOCATIONS
451 template <class Traits>
452 inline void hazard_pointer<Traits>::count_allocation() {
453  local_thread_data.allocation_counter.count_allocation();
454 }
455 
456 template <class Traits>
457 inline void hazard_pointer<Traits>::count_reclamation() {
458  local_thread_data.allocation_counter.count_reclamation();
459 }
460 #endif
461 } // namespace xenium::reclamation
462 
463 #ifdef _MSC_VER
464  #pragma warning(pop)
465 #endif