6 #ifndef GENERIC_EPOCH_BASED_IMPL
7 #error "This is an impl file and must not be included directly!"
11 #include <xenium/detail/port.hpp>
15 #pragma warning(disable : 4127) // conditional expression is constant
18 namespace xenium::reclamation {
24 template <
class Reclaimer>
26 bool scan(
typename Reclaimer::epoch_t epoch) {
27 auto prevents_update = [epoch](
const typename Reclaimer::thread_control_block& data) ->
bool {
30 constexpr
auto memory_order = TSAN_MEMORY_ORDER(std::memory_order_acquire, std::memory_order_relaxed);
31 return data.is_in_critical_region.load(memory_order) && data.local_epoch.load(memory_order) != epoch;
36 Reclaimer::global_thread_block_list.begin(), Reclaimer::global_thread_block_list.end(), prevents_update);
48 template <
class Reclaimer>
52 bool scan(
typename Reclaimer::epoch_t epoch) {
53 for (
unsigned i = 0; i < N; ++i) {
56 constexpr
auto memory_order = TSAN_MEMORY_ORDER(std::memory_order_acquire, std::memory_order_relaxed);
57 if (!thread_iterator->is_in_critical_region.load(memory_order) ||
58 thread_iterator->local_epoch.load(memory_order) == epoch) {
59 if (++thread_iterator == Reclaimer::global_thread_block_list.end()) {
67 void reset() { thread_iterator = Reclaimer::global_thread_block_list.begin(); }
70 typename detail::thread_block_list<typename Reclaimer::thread_control_block>::iterator thread_iterator;
78 template <
class Reclaimer>
88 using retire_list = detail::retire_list<>;
89 static void apply(retire_list&, detail::orphan_list<>&) {}
97 using retire_list = detail::retire_list<>;
98 static void apply(retire_list& retire_list, detail::orphan_list<>& orphans) {
99 if (!retire_list.empty()) {
100 orphans.add(retire_list.steal());
109 template <
size_t Threshold>
111 using retire_list = detail::counting_retire_list<>;
112 static void apply(retire_list& retire_list, detail::orphan_list<>& orphans) {
113 if (retire_list.size() >= Threshold) {
114 orphans.add(retire_list.steal());
120 template <
class Traits>
121 generic_epoch_based<Traits>::region_guard::region_guard() noexcept {
122 local_thread_data.enter_region();
125 template <
class Traits>
126 generic_epoch_based<Traits>::region_guard::~region_guard() noexcept {
127 local_thread_data.leave_region();
130 template <
class Traits>
131 template <
class T,
class MarkedPtr>
132 generic_epoch_based<Traits>::guard_ptr<T, MarkedPtr>::guard_ptr(
const MarkedPtr& p) noexcept : base(p) {
134 local_thread_data.enter_critical();
138 template <
class Traits>
139 template <
class T,
class MarkedPtr>
140 generic_epoch_based<Traits>::guard_ptr<T, MarkedPtr>::guard_ptr(
const guard_ptr& p) noexcept :
141 guard_ptr(MarkedPtr(p)) {}
143 template <
class Traits>
144 template <
class T,
class MarkedPtr>
145 generic_epoch_based<Traits>::guard_ptr<T, MarkedPtr>::guard_ptr(guard_ptr&& p) noexcept : base(p.ptr) {
149 template <
class Traits>
150 template <
class T,
class MarkedPtr>
151 auto generic_epoch_based<Traits>::guard_ptr<T, MarkedPtr>::operator=(
const guard_ptr& p) noexcept -> guard_ptr& {
159 local_thread_data.enter_critical();
165 template <
class Traits>
166 template <
class T,
class MarkedPtr>
167 auto generic_epoch_based<Traits>::guard_ptr<T, MarkedPtr>::operator=(guard_ptr&& p) noexcept -> guard_ptr& {
173 this->ptr = std::move(p.ptr);
179 template <
class Traits>
180 template <
class T,
class MarkedPtr>
181 void generic_epoch_based<Traits>::guard_ptr<T, MarkedPtr>::acquire(
const concurrent_ptr<T>& p,
182 std::memory_order order) noexcept {
183 if (p.load(std::memory_order_relaxed) ==
nullptr) {
189 local_thread_data.enter_critical();
192 this->ptr = p.load(order);
194 local_thread_data.leave_critical();
198 template <
class Traits>
199 template <
class T,
class MarkedPtr>
200 bool generic_epoch_based<Traits>::guard_ptr<T, MarkedPtr>::acquire_if_equal(
const concurrent_ptr<T>& p,
201 const MarkedPtr& expected,
202 std::memory_order order) noexcept {
203 auto actual = p.load(std::memory_order_relaxed);
204 if (actual ==
nullptr || actual != expected) {
206 return actual == expected;
210 local_thread_data.enter_critical();
213 this->ptr = p.load(order);
214 if (!this->ptr || this->ptr != expected) {
215 local_thread_data.leave_critical();
219 return this->ptr == expected;
222 template <
class Traits>
223 template <
class T,
class MarkedPtr>
224 void generic_epoch_based<Traits>::guard_ptr<T, MarkedPtr>::reset() noexcept {
226 local_thread_data.leave_critical();
231 template <
class Traits>
232 template <
class T,
class MarkedPtr>
233 void generic_epoch_based<Traits>::guard_ptr<T, MarkedPtr>::reclaim(Deleter d) noexcept {
234 this->ptr->set_deleter(std::move(d));
235 local_thread_data.add_retired_node(this->ptr.get());
239 template <
class Traits>
240 struct generic_epoch_based<Traits>::thread_control_block : detail::thread_block_list<thread_control_block>::entry {
241 thread_control_block() : is_in_critical_region(false), local_epoch(number_epochs) {}
243 std::atomic<bool> is_in_critical_region;
244 std::atomic<epoch_t> local_epoch;
247 template <
class Traits>
248 struct generic_epoch_based<Traits>::thread_data {
250 if (control_block ==
nullptr) {
254 for (
unsigned i = 0; i < number_epochs; ++i) {
255 if (!retire_lists[i].empty()) {
256 orphans[i].add(retire_lists[i].steal());
260 assert(control_block->is_in_critical_region.load(std::memory_order_relaxed) ==
false);
261 global_thread_block_list.release_entry(control_block);
262 control_block =
nullptr;
265 void enter_region() {
266 ensure_has_control_block();
267 if (Traits::region_extension_type != region_extension::none && ++region_entries == 1) {
268 if (Traits::region_extension_type == region_extension::eager) {
269 set_critical_region_flag();
274 void leave_region() {
275 if (Traits::region_extension_type != region_extension::none && --region_entries == 0) {
276 clear_critical_region_flag();
280 void enter_critical() {
282 if (++nested_critical_entries == 1) {
287 void leave_critical() {
288 if (--nested_critical_entries == 0 && Traits::region_extension_type == region_extension::none) {
289 clear_critical_region_flag();
295 void ensure_has_control_block() {
296 if (XENIUM_UNLIKELY(control_block ==
nullptr)) {
297 acquire_control_block();
301 XENIUM_NOINLINE
void acquire_control_block() {
302 assert(control_block ==
nullptr);
303 control_block = global_thread_block_list.acquire_entry();
304 assert(control_block->is_in_critical_region.load() ==
false);
305 auto epoch = global_epoch.load(std::memory_order_relaxed);
306 control_block->local_epoch.store(epoch, std::memory_order_relaxed);
307 local_epoch_idx = epoch % number_epochs;
308 scan_strategy.reset();
311 void set_critical_region_flag() {
312 assert(!control_block->is_in_critical_region.load(std::memory_order_relaxed));
313 control_block->is_in_critical_region.store(
true, std::memory_order_relaxed);
316 std::atomic_thread_fence(std::memory_order_seq_cst);
319 void clear_critical_region_flag() {
323 assert(control_block !=
nullptr);
324 assert(control_block->is_in_critical_region.load(std::memory_order_relaxed));
327 control_block->is_in_critical_region.store(
false, std::memory_order_release);
328 for (
unsigned i = 0; i < number_epochs; ++i) {
329 Traits::abandon_strategy::apply(retire_lists[i], orphans[i]);
333 void do_enter_critical() {
334 if constexpr (Traits::region_extension_type == region_extension::none) {
335 set_critical_region_flag();
336 }
else if constexpr (Traits::region_extension_type == region_extension::lazy) {
337 if (!control_block->is_in_critical_region.load(std::memory_order_relaxed)) {
338 set_critical_region_flag();
342 assert(control_block->is_in_critical_region.load(std::memory_order_relaxed));
345 auto epoch = global_epoch.load(std::memory_order_acquire);
346 if (control_block->local_epoch.load(std::memory_order_relaxed) != epoch)
348 critical_entries_since_update = 0;
349 update_local_epoch(epoch);
350 }
else if (critical_entries_since_update++ == Traits::scan_frequency) {
351 critical_entries_since_update = 0;
352 if (scan_strategy.scan(epoch)) {
353 epoch = update_global_epoch(epoch, epoch + 1);
354 update_local_epoch(epoch);
359 void update_local_epoch(epoch_t new_epoch) {
365 const auto old_epoch = control_block->local_epoch.load(std::memory_order_relaxed);
366 assert(new_epoch > old_epoch);
369 constexpr
auto memory_order = TSAN_MEMORY_ORDER(std::memory_order_release, std::memory_order_relaxed);
370 control_block->local_epoch.store(new_epoch, memory_order);
372 auto diff = std::min<int>(
static_cast<int>(number_epochs),
static_cast<int>(new_epoch - old_epoch));
373 epoch_t epoch_idx = local_epoch_idx;
374 for (
int i = diff - 1; i >= 0; --i) {
375 epoch_idx = (new_epoch - i) % number_epochs;
376 auto nodes = retire_lists[epoch_idx].steal();
377 detail::delete_objects(nodes.first);
379 local_epoch_idx = epoch_idx;
381 scan_strategy.reset();
384 epoch_t update_global_epoch(epoch_t curr_epoch, epoch_t new_epoch) {
385 if (global_epoch.load(std::memory_order_relaxed) == curr_epoch) {
388 std::atomic_thread_fence(std::memory_order_acquire);
391 bool success = global_epoch.compare_exchange_strong(
392 curr_epoch, new_epoch, std::memory_order_release, std::memory_order_relaxed);
393 if (XENIUM_LIKELY(success)) {
394 reclaim_orphans(new_epoch);
400 void add_retired_node(detail::deletable_object* p) { retire_lists[local_epoch_idx].push(p); }
402 void reclaim_orphans(epoch_t epoch) {
403 auto idx = epoch % number_epochs;
404 auto* nodes = orphans[idx].adopt();
405 detail::delete_objects(nodes);
408 unsigned critical_entries_since_update = 0;
409 unsigned nested_critical_entries = 0;
410 unsigned region_entries = 0;
411 typename Traits::scan_strategy::template type<generic_epoch_based> scan_strategy;
412 thread_control_block* control_block =
nullptr;
413 epoch_t local_epoch_idx = 0;
414 std::array<typename Traits::abandon_strategy::retire_list, number_epochs> retire_lists = {};
416 friend class generic_epoch_based;
417 ALLOCATION_COUNTER(generic_epoch_based);
420 #ifdef TRACK_ALLOCATIONS
421 template <
class Traits>
422 inline void generic_epoch_based<Traits>::count_allocation() {
423 local_thread_data.allocation_counter.count_allocation();
426 template <
class Traits>
427 inline void generic_epoch_based<Traits>::count_reclamation() {
428 local_thread_data.allocation_counter.count_reclamation();