xenium
lock_free_ref_count.hpp
1 //
2 // Copyright (c) 2018-2020 Manuel Pöter.
3 // Licensed under the MIT License. See LICENSE file in the project root for full license information.
4 //
5 
6 #ifndef XENIUM_LOCK_FREE_REF_COUNT_HPP
7 #define XENIUM_LOCK_FREE_REF_COUNT_HPP
8 
9 #include <xenium/reclamation/detail/allocation_tracker.hpp>
10 #include <xenium/reclamation/detail/concurrent_ptr.hpp>
11 #include <xenium/reclamation/detail/guard_ptr.hpp>
12 
13 #include <xenium/acquire_guard.hpp>
14 #include <xenium/parameter.hpp>
15 
16 #include <memory>
17 
18 namespace xenium {
19 
20 namespace policy {
32  template <bool Value>
34 
46  template <std::size_t Value>
48 } // namespace policy
49 
50 namespace reclamation {
51  template <bool InsertPadding = false, std::size_t ThreadLocalFreeListSize = 0>
52  struct lock_free_ref_count_traits {
53  static constexpr bool insert_padding = InsertPadding;
54  static constexpr std::size_t thread_local_free_list_size = ThreadLocalFreeListSize;
55 
56  template <class... Policies>
57  using with = lock_free_ref_count_traits<
58  parameter::value_param_t<bool, policy::insert_padding, InsertPadding, Policies...>::value,
59  parameter::value_param_t<std::size_t, policy::thread_local_free_list_size, ThreadLocalFreeListSize, Policies...>::
60  value>;
61  };
62 
80  template <class Traits = lock_free_ref_count_traits<>>
82  template <class T, class MarkedPtr>
83  class guard_ptr;
84 
85  public:
86  template <class... Policies>
87  using with = lock_free_ref_count<typename Traits::template with<Policies...>>;
88 
89  template <class T, std::size_t N = T::number_of_mark_bits>
91 
92  template <class T, std::size_t N = 0, class DeleterT = std::default_delete<T>>
93  class enable_concurrent_ptr;
94 
95  class region_guard {};
96 
97  ALLOCATION_TRACKER
98  private:
99  static constexpr unsigned RefCountInc = 2;
100  static constexpr unsigned RefCountClaimBit = 1;
101 
102  ALLOCATION_TRACKING_FUNCTIONS;
103 #ifdef TRACK_ALLOCATIONS
104  inline static thread_local detail::registered_allocation_counter<lock_free_ref_count> allocation_counter_;
105  static detail::allocation_counter& allocation_counter();
106 #endif
107  };
108 
109  template <class Traits>
110  template <class T, std::size_t N, class DeleterT>
111  class lock_free_ref_count<Traits>::enable_concurrent_ptr : private detail::tracked_object<lock_free_ref_count> {
112  public:
113  enable_concurrent_ptr(const enable_concurrent_ptr&) noexcept = delete;
114  enable_concurrent_ptr(enable_concurrent_ptr&&) noexcept = delete;
115  enable_concurrent_ptr& operator=(const enable_concurrent_ptr&) noexcept = delete;
116  enable_concurrent_ptr& operator=(enable_concurrent_ptr&&) noexcept = delete;
117 
118  protected:
119  enable_concurrent_ptr() noexcept { destroyed().store(false, std::memory_order_relaxed); }
120  virtual ~enable_concurrent_ptr() noexcept {
121  assert(!is_destroyed());
122  destroyed().store(true, std::memory_order_relaxed);
123  }
124 
125  public:
126  using Deleter = DeleterT;
127  static_assert(std::is_same<Deleter, std::default_delete<T>>::value,
128  "lock_free_ref_count reclamation can only be used with std::default_delete as Deleter.");
129 
130  static constexpr std::size_t number_of_mark_bits = N;
131  [[nodiscard]] unsigned refs() const { return getHeader()->ref_count.load(std::memory_order_relaxed) >> 1; }
132 
133  void* operator new(size_t sz);
134  void operator delete(void* p);
135 
136  private:
137  bool decrement_refcnt();
138  [[nodiscard]] bool is_destroyed() const { return getHeader()->destroyed.load(std::memory_order_relaxed); }
139  void push_to_free_list() { global_free_list.push(static_cast<T*>(this)); }
140 
141  struct unpadded_header {
142  std::atomic<unsigned> ref_count;
143  std::atomic<bool> destroyed;
144  concurrent_ptr<T, N> next_free;
145  };
146  struct padded_header : unpadded_header {
147  char padding[64 - sizeof(unpadded_header)];
148  };
149  using header = std::conditional_t<Traits::insert_padding, padded_header, unpadded_header>;
150  header* getHeader() { return static_cast<header*>(static_cast<void*>(this)) - 1; }
151  [[nodiscard]] const header* getHeader() const {
152  return static_cast<const header*>(static_cast<const void*>(this)) - 1;
153  }
154 
155  std::atomic<unsigned>& ref_count() { return getHeader()->ref_count; }
156  std::atomic<bool>& destroyed() { return getHeader()->destroyed; }
157  concurrent_ptr<T, N>& next_free() { return getHeader()->next_free; }
158 
159  friend class lock_free_ref_count;
160 
161  using guard_ptr = typename concurrent_ptr<T, N>::guard_ptr;
162  using marked_ptr = typename concurrent_ptr<T, N>::marked_ptr;
163 
164  class free_list;
165  static free_list global_free_list;
166  };
167 
168  template <class Traits>
169  template <class T, class MarkedPtr>
170  class lock_free_ref_count<Traits>::guard_ptr : public detail::guard_ptr<T, MarkedPtr, guard_ptr<T, MarkedPtr>> {
171  using base = detail::guard_ptr<T, MarkedPtr, guard_ptr>;
172  using Deleter = typename T::Deleter;
173 
174  public:
175  template <class, std::size_t, class>
176  friend class enable_concurrent_ptr;
177 
178  // Guard a marked ptr.
179  explicit guard_ptr(const MarkedPtr& p = MarkedPtr()) noexcept;
180  guard_ptr(const guard_ptr& p) noexcept;
181  guard_ptr(guard_ptr&& p) noexcept;
182 
183  guard_ptr& operator=(const guard_ptr& p);
184  guard_ptr& operator=(guard_ptr&& p) noexcept;
185 
186  // Atomically take snapshot of p, and *if* it points to unreclaimed object, acquire shared ownership of it.
187  void acquire(const concurrent_ptr<T>& p, std::memory_order order = std::memory_order_seq_cst) noexcept;
188 
189  // Like acquire, but quit early if a snapshot != expected.
190  bool acquire_if_equal(const concurrent_ptr<T>& p,
191  const MarkedPtr& expected,
192  std::memory_order order = std::memory_order_seq_cst) noexcept;
193 
194  // Release ownership. Postcondition: get() == nullptr.
195  void reset() noexcept;
196 
197  // Reset. Deleter d will be applied some time after all owners release their ownership.
198  void reclaim(Deleter d = Deleter()) noexcept;
199  };
200 } // namespace reclamation
201 } // namespace xenium
202 
203 #define LOCK_FREE_REF_COUNT_IMPL
204 #include <xenium/reclamation/impl/lock_free_ref_count.hpp>
205 #undef LOCK_FREE_REF_COUNT_IMPL
206 
207 #endif
xenium::reclamation::detail::concurrent_ptr
T must be derived from enable_concurrent_ptr<T>. D is a deleter.
Definition: concurrent_ptr.hpp:17
xenium::policy::insert_padding
Policy to configure whether to insert padding after the internal header for lock_free_ref_count recla...
Definition: lock_free_ref_count.hpp:33
xenium::reclamation::lock_free_ref_count
An implementation of the lock-free reference counting (LFRC) schemea as proposed by Valois [Val95,...
Definition: lock_free_ref_count.hpp:81
xenium::policy::thread_local_free_list_size
Policy to configure the size of thread-local free-lists for lock reclamation.
Definition: lock_free_ref_count.hpp:47