6 #ifndef HAZARD_ERAS_IMPL
7 #error "This is an impl file and must not be included directly!"
10 #include <xenium/aligned_object.hpp>
11 #include <xenium/detail/port.hpp>
19 #pragma warning(disable : 4324) // structure was padded due to alignment specifier
20 #pragma warning(disable : 26495) // uninitialized member variable
23 namespace xenium::reclamation {
25 template <
class Traits>
26 template <
class T,
class MarkedPtr>
27 hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::guard_ptr(
const MarkedPtr& p) : base(p), he() {
28 if (this->ptr.get() !=
nullptr) {
29 auto era = era_clock.load(std::memory_order_relaxed);
30 he = local_thread_data().alloc_hazard_era(era);
34 template <
class Traits>
35 template <
class T,
class MarkedPtr>
36 hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::guard_ptr(
const guard_ptr& p) : base(p.ptr), he(p.he) {
42 template <
class Traits>
43 template <
class T,
class MarkedPtr>
44 hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::guard_ptr(guard_ptr&& p) noexcept : base(p.ptr), he(p.he) {
49 template <
class Traits>
50 template <
class T,
class MarkedPtr>
51 auto hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::operator=(
const guard_ptr& p) noexcept -> guard_ptr& {
65 template <
class Traits>
66 template <
class T,
class MarkedPtr>
67 auto hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::operator=(guard_ptr&& p) noexcept -> guard_ptr& {
73 this->ptr = std::move(p.ptr);
80 template <
class Traits>
81 template <
class T,
class MarkedPtr>
82 void hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::acquire(
const concurrent_ptr<T>& p, std::memory_order order) {
83 if (order == std::memory_order_relaxed || order == std::memory_order_consume) {
86 order = std::memory_order_acquire;
88 era_t prev_era = he ==
nullptr ? 0 : he->get_era();
93 auto value = p.load(order);
95 auto era = era_clock.load(std::memory_order_relaxed);
96 if (era == prev_era) {
102 assert(he->get_era() != era);
103 if (he->guards() == 1) {
112 assert(he ==
nullptr);
113 he = local_thread_data().alloc_hazard_era(era);
118 template <
class Traits>
119 template <
class T,
class MarkedPtr>
120 bool hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::acquire_if_equal(
const concurrent_ptr<T>& p,
121 const MarkedPtr& expected,
122 std::memory_order order) {
123 if (order == std::memory_order_relaxed || order == std::memory_order_consume) {
126 order = std::memory_order_acquire;
132 auto p1 = p.load(order);
133 if (p1 ==
nullptr || p1 != expected) {
135 return p1 == expected;
138 const auto era = era_clock.load(std::memory_order_relaxed);
139 if (he !=
nullptr && he->guards() == 1) {
146 he = local_thread_data().alloc_hazard_era(era);
149 this->ptr = p.load(std::memory_order_relaxed);
150 if (this->ptr != p1) {
157 template <
class Traits>
158 template <
class T,
class MarkedPtr>
159 void hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::reset() noexcept {
160 local_thread_data().release_hazard_era(he);
161 assert(this->he ==
nullptr);
165 template <
class Traits>
166 template <
class T,
class MarkedPtr>
167 void hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::do_swap(guard_ptr& g) noexcept {
171 template <
class Traits>
172 template <
class T,
class MarkedPtr>
173 void hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::reclaim(Deleter d) noexcept {
174 auto* p = this->ptr.get();
176 p->set_deleter(std::move(d));
178 p->retirement_era = era_clock.fetch_add(1, std::memory_order_release);
180 if (local_thread_data().add_retired_node(p) >= allocation_strategy::retired_nodes_threshold()) {
181 local_thread_data().scan();
186 template <
class Strategy,
class Derived>
187 struct alignas(64) basic_he_thread_control_block :
188 detail::thread_block_list<Derived, detail::deletable_object_with_eras>::entry,
189 aligned_object<basic_he_thread_control_block<Strategy, Derived>> {
190 using era_t = uint64_t;
192 ~basic_he_thread_control_block() { assert(last_hazard_era !=
nullptr); }
195 void set_era(era_t era) {
198 value.store(marked_ptr(
reinterpret_cast<void**
>(era << 1)), std::memory_order_release);
207 std::atomic_thread_fence(std::memory_order_seq_cst);
210 [[nodiscard]] era_t get_era()
const {
212 bool success = try_get_era(result);
219 [[nodiscard]] uint64_t guards()
const {
return guard_cnt; }
220 uint64_t add_guard() {
return ++guard_cnt; }
221 uint64_t release_guard() {
return --guard_cnt; }
223 bool try_get_era(era_t& result)
const {
226 constexpr
auto memory_order = TSAN_MEMORY_ORDER(std::memory_order_acquire, std::memory_order_relaxed);
227 auto v = value.load(memory_order);
229 result =
reinterpret_cast<era_t
>(v.get()) >> 1;
235 void set_link(hazard_era* link) {
236 assert(guard_cnt == 0);
238 value.store(marked_ptr(
reinterpret_cast<void**
>(link), 1), std::memory_order_release);
240 [[nodiscard]] hazard_era* get_link()
const {
242 return reinterpret_cast<hazard_era*
>(value.load(std::memory_order_relaxed).get());
245 [[nodiscard]]
bool is_link()
const {
return value.load(std::memory_order_relaxed).mark() != 0; }
251 std::atomic<marked_ptr> value{
nullptr};
253 uint64_t guard_cnt = 0;
256 using hint = hazard_era*;
258 void initialize(hint& hint) {
259 Strategy::number_of_active_hes.fetch_add(
self().number_of_hes(), std::memory_order_relaxed);
260 hint = initialize_block(
self());
264 Strategy::number_of_active_hes.fetch_sub(
self().number_of_hes(), std::memory_order_relaxed);
265 detail::thread_block_list<Derived, detail::deletable_object_with_eras>::entry::abandon();
268 hazard_era* alloc_hazard_era(hint& hint, era_t era) {
269 if (last_hazard_era && last_era == era) {
270 last_hazard_era->add_guard();
271 return last_hazard_era;
274 if (result ==
nullptr) {
275 result =
self().need_more_hes();
278 hint = result->get_link();
279 result->set_era(era);
282 last_hazard_era = result;
287 void release_hazard_era(hazard_era*& he, hint& hint) {
288 assert(he !=
nullptr);
289 if (he->release_guard() == 0) {
290 if (he == last_hazard_era) {
291 last_hazard_era =
nullptr;
301 Derived&
self() {
return static_cast<Derived&
>(*this); }
303 hazard_era* begin() {
return &eras[0]; }
304 hazard_era* end() {
return &eras[Strategy::K]; }
305 [[nodiscard]]
const hazard_era* begin()
const {
return &eras[0]; }
306 [[nodiscard]]
const hazard_era* end()
const {
return &eras[Strategy::K]; }
308 template <
typename T>
309 static hazard_era* initialize_block(T& block) {
310 auto begin = block.begin();
311 auto end = block.end() - 1;
312 for (
auto it = begin; it != end;) {
317 end->set_link(block.initialize_next_block());
322 gather_protected_eras(std::vector<era_t>& protected_eras,
const hazard_era* begin,
const hazard_era* end) {
323 for (
auto it = begin; it != end; ++it) {
325 if (it->try_get_era(era)) {
326 protected_eras.push_back(era);
331 hazard_era* last_hazard_era;
333 hazard_era eras[Strategy::K];
336 template <
class Strategy>
337 struct static_he_thread_control_block :
338 basic_he_thread_control_block<Strategy, static_he_thread_control_block<Strategy>> {
339 using base = basic_he_thread_control_block<Strategy, static_he_thread_control_block>;
340 using hazard_era =
typename base::hazard_era;
341 using era_t =
typename base::era_t;
344 void gather_protected_eras(std::vector<era_t>& protected_eras)
const {
345 base::gather_protected_eras(protected_eras, this->begin(), this->end());
349 hazard_era* need_more_hes() {
throw bad_hazard_era_alloc(
"hazard era pool exceeded"); }
350 [[nodiscard]] constexpr
size_t number_of_hes()
const {
return Strategy::K; }
351 [[nodiscard]] constexpr hazard_era* initialize_next_block()
const {
return nullptr; }
354 template <
class Strategy>
355 struct dynamic_he_thread_control_block :
356 basic_he_thread_control_block<Strategy, dynamic_he_thread_control_block<Strategy>> {
357 using base = basic_he_thread_control_block<Strategy, dynamic_he_thread_control_block>;
358 using hazard_era =
typename base::hazard_era;
359 using era_t =
typename base::era_t;
362 void gather_protected_eras(std::vector<era_t>& protected_eras)
const {
363 gather_protected_eras(*
this, protected_eras);
367 struct alignas(64) hazard_eras_block : aligned_object<hazard_eras_block> {
368 explicit hazard_eras_block(
size_t size) : size(size) {
369 for (
auto it = begin(); it != end(); ++it) {
374 hazard_era* begin() {
return reinterpret_cast<hazard_era*
>(
this + 1); }
375 hazard_era* end() {
return begin() + size; }
377 [[nodiscard]]
const hazard_era* begin()
const {
return reinterpret_cast<const hazard_era*
>(
this + 1); }
378 [[nodiscard]]
const hazard_era* end()
const {
return begin() + size; }
380 [[nodiscard]]
const hazard_eras_block* next_block()
const {
return next; }
381 hazard_era* initialize_next_block() {
return next ? base::initialize_block(*next) : nullptr; }
383 hazard_eras_block* next =
nullptr;
387 [[nodiscard]]
const hazard_eras_block* next_block()
const {
389 return he_block.load(std::memory_order_acquire);
391 [[nodiscard]]
size_t number_of_hes()
const {
return total_number_of_hes; }
392 hazard_era* need_more_hes() {
return allocate_new_hazard_eras_block(); }
394 hazard_era* initialize_next_block() {
395 auto block = he_block.load(std::memory_order_relaxed);
396 return block ? base::initialize_block(*block) : nullptr;
399 template <
typename T>
400 static void gather_protected_eras(
const T& block, std::vector<era_t>& protected_eras) {
401 base::gather_protected_eras(protected_eras, block.begin(), block.end());
403 const auto* next = block.next_block();
405 gather_protected_eras(*next, protected_eras);
409 hazard_era* allocate_new_hazard_eras_block() {
410 size_t hes = std::max(
static_cast<size_t>(Strategy::K), total_number_of_hes / 2);
411 total_number_of_hes += hes;
412 Strategy::number_of_active_hes.fetch_add(hes, std::memory_order_relaxed);
414 size_t buffer_size =
sizeof(hazard_eras_block) + hes *
sizeof(hazard_era);
415 void* buffer = hazard_eras_block::operator
new(buffer_size);
416 auto block = ::new (buffer) hazard_eras_block(hes);
417 auto result = this->initialize_block(*block);
418 block->next = he_block.load(std::memory_order_relaxed);
420 he_block.store(block, std::memory_order_release);
424 size_t total_number_of_hes = Strategy::K;
425 std::atomic<hazard_eras_block*> he_block;
429 template <
class Traits>
430 struct alignas(64) hazard_eras<Traits>::thread_data : aligned_object<thread_data> {
431 using HE =
typename thread_control_block::hazard_era*;
434 if (retire_list !=
nullptr) {
436 if (retire_list !=
nullptr) {
437 global_thread_block_list.abandon_retired_nodes(retire_list);
439 retire_list =
nullptr;
442 if (control_block !=
nullptr) {
443 global_thread_block_list.release_entry(control_block);
444 control_block =
nullptr;
448 HE alloc_hazard_era(era_t era) {
449 ensure_has_control_block();
450 return control_block->alloc_hazard_era(hint, era);
453 void release_hazard_era(HE& he) {
455 assert(control_block !=
nullptr);
456 control_block->release_hazard_era(he, hint);
460 std::size_t add_retired_node(detail::deletable_object_with_eras* p) {
461 p->next = retire_list;
463 return ++number_of_retired_nodes;
467 std::vector<era_t> protected_eras;
468 protected_eras.reserve(allocation_strategy::number_of_active_hazard_eras());
471 std::atomic_thread_fence(std::memory_order_seq_cst);
473 auto adopted_nodes = global_thread_block_list.adopt_abandoned_retired_nodes();
476 global_thread_block_list.begin(), global_thread_block_list.end(), [&protected_eras](
const auto& entry) {
479 constexpr auto memory_order = TSAN_MEMORY_ORDER(std::memory_order_acquire, std::memory_order_relaxed);
480 if (entry.is_active(memory_order)) {
481 entry.gather_protected_eras(protected_eras);
486 std::atomic_thread_fence(std::memory_order_acquire);
488 std::sort(protected_eras.begin(), protected_eras.end());
489 auto last = std::unique(protected_eras.begin(), protected_eras.end());
490 protected_eras.erase(last, protected_eras.end());
492 auto* list = retire_list;
493 retire_list =
nullptr;
494 number_of_retired_nodes = 0;
495 reclaim_nodes(list, protected_eras);
496 reclaim_nodes(adopted_nodes, protected_eras);
500 void ensure_has_control_block() {
501 if (control_block ==
nullptr) {
502 control_block = global_thread_block_list.acquire_inactive_entry();
503 control_block->initialize(hint);
504 control_block->activate();
508 void reclaim_nodes(detail::deletable_object_with_eras* list,
const std::vector<era_t>& protected_eras) {
509 while (list !=
nullptr) {
513 auto era_it = std::lower_bound(protected_eras.begin(), protected_eras.end(), cur->construction_era);
514 if (era_it == protected_eras.end() || *era_it > cur->retirement_era) {
517 add_retired_node(cur);
522 detail::deletable_object_with_eras* retire_list =
nullptr;
523 std::size_t number_of_retired_nodes = 0;
524 typename thread_control_block::hint hint;
526 thread_control_block* control_block =
nullptr;
528 friend class hazard_eras;
529 ALLOCATION_COUNTER(hazard_eras);
532 template <
class Traits>
533 inline typename hazard_eras<Traits>::thread_data& hazard_eras<Traits>::local_thread_data() {
535 static thread_local thread_data local_thread_data;
536 return local_thread_data;
539 #ifdef TRACK_ALLOCATIONS
540 template <
class Traits>
541 inline void hazard_eras<Traits>::count_allocation() {
542 local_thread_data().allocation_counter.count_allocation();
545 template <
class Traits>
546 inline void hazard_eras<Traits>::count_reclamation() {
547 local_thread_data().allocation_counter.count_reclamation();