6 #ifndef QUIESCENT_STATE_BASED_IMPL
7 #error "This is an impl file and must not be included directly!"
10 #include <xenium/detail/port.hpp>
11 #include <xenium/reclamation/detail/orphan.hpp>
15 namespace xenium::reclamation {
17 struct quiescent_state_based::thread_control_block : detail::thread_block_list<thread_control_block>::entry {
18 std::atomic<unsigned> local_epoch;
21 struct quiescent_state_based::thread_data {
23 if (control_block ==
nullptr) {
28 if (std::any_of(retire_lists.begin(), retire_lists.end(), [](
auto p) { return p != nullptr; })) {
31 auto target_epoch = (global_epoch.load(std::memory_order_relaxed) + number_epochs - 1) % number_epochs;
32 assert(target_epoch < number_epochs);
33 global_thread_block_list.abandon_retired_nodes(
new detail::orphan<number_epochs>(target_epoch, retire_lists));
36 global_thread_block_list.release_entry(control_block);
37 control_block =
nullptr;
41 ensure_has_control_block();
46 if (--region_entries == 0) {
51 void add_retired_node(detail::deletable_object* p) {
52 assert(control_block !=
nullptr);
53 add_retired_node(p, control_block->local_epoch.load(std::memory_order_relaxed));
57 void ensure_has_control_block() {
58 if (control_block ==
nullptr) {
59 control_block = global_thread_block_list.acquire_entry();
60 auto epoch = global_epoch.load(std::memory_order_relaxed);
62 control_block->local_epoch.store(epoch, std::memory_order_relaxed);
66 }
while (!global_epoch.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel, std::memory_order_relaxed));
70 void quiescent_state() {
72 auto epoch = global_epoch.load(std::memory_order_acquire);
74 if (control_block->local_epoch.load(std::memory_order_relaxed) == epoch) {
75 const auto new_epoch = (epoch + 1) % number_epochs;
76 if (!try_update_epoch(epoch, new_epoch)) {
87 control_block->local_epoch.store(epoch, std::memory_order_release);
88 detail::delete_objects(retire_lists[epoch]);
91 void add_retired_node(detail::deletable_object* p,
size_t epoch) {
92 assert(epoch < number_epochs);
93 p->next = retire_lists[epoch];
94 retire_lists[epoch] = p;
97 bool try_update_epoch(
unsigned curr_epoch,
unsigned new_epoch) {
98 const auto old_epoch = (curr_epoch + number_epochs - 1) % number_epochs;
99 auto prevents_update = [old_epoch](
const thread_control_block& data) {
102 constexpr
auto memory_order = TSAN_MEMORY_ORDER(std::memory_order_acquire, std::memory_order_relaxed);
103 return data.local_epoch.load(memory_order) == old_epoch && data.is_active(memory_order);
107 bool cannot_update = std::any_of(global_thread_block_list.begin(), global_thread_block_list.end(), prevents_update);
112 if (global_epoch.load(std::memory_order_relaxed) == curr_epoch) {
114 std::atomic_thread_fence(std::memory_order_acquire);
118 bool success = global_epoch.compare_exchange_strong(
119 curr_epoch, new_epoch, std::memory_order_acq_rel, std::memory_order_relaxed);
130 void adopt_orphans() {
131 auto* cur = global_thread_block_list.adopt_abandoned_retired_nodes();
132 for (detail::deletable_object* next =
nullptr; cur !=
nullptr; cur = next) {
135 add_retired_node(cur,
static_cast<detail::orphan<number_epochs>*
>(cur)->target_epoch);
139 unsigned region_entries = 0;
140 thread_control_block* control_block =
nullptr;
141 std::array<detail::deletable_object*, number_epochs> retire_lists = {};
143 friend class quiescent_state_based;
144 ALLOCATION_COUNTER(quiescent_state_based);
147 inline quiescent_state_based::region_guard::region_guard() noexcept {
148 local_thread_data().enter_region();
151 inline quiescent_state_based::region_guard::~region_guard() noexcept {
152 local_thread_data().leave_region();
155 template <
class T,
class MarkedPtr>
156 quiescent_state_based::guard_ptr<T, MarkedPtr>::guard_ptr(
const MarkedPtr& p) noexcept : base(p) {
158 local_thread_data().enter_region();
162 template <
class T,
class MarkedPtr>
163 quiescent_state_based::guard_ptr<T, MarkedPtr>::guard_ptr(
const guard_ptr& p) noexcept : guard_ptr(MarkedPtr(p)) {}
165 template <
class T,
class MarkedPtr>
166 quiescent_state_based::guard_ptr<T, MarkedPtr>::guard_ptr(guard_ptr&& p) noexcept : base(p.ptr) {
170 template <
class T,
class MarkedPtr>
171 typename quiescent_state_based::template guard_ptr<T, MarkedPtr>&
172 quiescent_state_based::guard_ptr<T, MarkedPtr>::operator=(
const guard_ptr& p) noexcept {
180 local_thread_data().enter_region();
186 template <
class T,
class MarkedPtr>
187 typename quiescent_state_based::template guard_ptr<T, MarkedPtr>&
188 quiescent_state_based::guard_ptr<T, MarkedPtr>::operator=(guard_ptr&& p) noexcept {
194 this->ptr = std::move(p.ptr);
200 template <
class T,
class MarkedPtr>
201 void quiescent_state_based::guard_ptr<T, MarkedPtr>::acquire(
const concurrent_ptr<T>& p,
202 std::memory_order order) noexcept {
203 if (p.load(std::memory_order_relaxed) ==
nullptr) {
209 local_thread_data().enter_region();
212 this->ptr = p.load(order);
214 local_thread_data().leave_region();
218 template <
class T,
class MarkedPtr>
219 bool quiescent_state_based::guard_ptr<T, MarkedPtr>::acquire_if_equal(
const concurrent_ptr<T>& p,
220 const MarkedPtr& expected,
221 std::memory_order order) noexcept {
222 auto actual = p.load(std::memory_order_relaxed);
223 if (actual ==
nullptr || actual != expected) {
225 return actual == expected;
229 local_thread_data().enter_region();
232 this->ptr = p.load(order);
233 if (!this->ptr || this->ptr != expected) {
234 local_thread_data().leave_region();
238 return this->ptr == expected;
241 template <
class T,
class MarkedPtr>
242 void quiescent_state_based::guard_ptr<T, MarkedPtr>::reset() noexcept {
244 local_thread_data().leave_region();
249 template <
class T,
class MarkedPtr>
250 void quiescent_state_based::guard_ptr<T, MarkedPtr>::reclaim(Deleter d) noexcept {
251 this->ptr->set_deleter(std::move(d));
252 local_thread_data().add_retired_node(this->ptr.get());
256 inline quiescent_state_based::thread_data& quiescent_state_based::local_thread_data() {
258 static thread_local thread_data local_thread_data;
259 return local_thread_data;
262 #ifdef TRACK_ALLOCATIONS
263 inline void quiescent_state_based::count_allocation() {
264 local_thread_data().allocation_counter.count_allocation();
267 inline void quiescent_state_based::count_reclamation() {
268 local_thread_data().allocation_counter.count_reclamation();