xenium
hazard_eras.hpp
1 //
2 // Copyright (c) 2018-2020 Manuel Pöter.
3 // Licensed under the MIT License. See LICENSE file in the project root for full license information.
4 //
5 
6 #ifndef XENIUM_HAZARD_ERAS_HPP
7 #define XENIUM_HAZARD_ERAS_HPP
8 
9 #include <xenium/reclamation/detail/allocation_tracker.hpp>
10 #include <xenium/reclamation/detail/concurrent_ptr.hpp>
11 #include <xenium/reclamation/detail/deletable_object.hpp>
12 #include <xenium/reclamation/detail/guard_ptr.hpp>
13 #include <xenium/reclamation/detail/thread_block_list.hpp>
14 
15 #include <xenium/acquire_guard.hpp>
16 #include <xenium/parameter.hpp>
17 #include <xenium/policy.hpp>
18 
19 #include <memory>
20 #include <stdexcept>
21 
22 namespace xenium::reclamation {
23 
29 class bad_hazard_era_alloc : public std::runtime_error {
30  using std::runtime_error::runtime_error;
31 };
32 
33 namespace detail {
34  struct deletable_object_with_eras;
35 
36  template <class Strategy, class Derived>
37  struct basic_he_thread_control_block;
38 
39  template <size_t K_, size_t A, size_t B, template <class> class ThreadControlBlock>
40  struct generic_hazard_era_allocation_strategy {
41  static constexpr size_t K = K_;
42 
43  static size_t retired_nodes_threshold() { return A * number_of_active_hazard_eras() + B; }
44 
45  static size_t number_of_active_hazard_eras() { return number_of_active_hes.load(std::memory_order_relaxed); }
46 
47  using thread_control_block = ThreadControlBlock<generic_hazard_era_allocation_strategy>;
48 
49  private:
50  friend thread_control_block;
51  friend basic_he_thread_control_block<generic_hazard_era_allocation_strategy, thread_control_block>;
52 
53  inline static std::atomic<size_t> number_of_active_hes{0};
54  };
55 
56  template <class Strategy>
57  struct static_he_thread_control_block;
58 
59  template <class Strategy>
60  struct dynamic_he_thread_control_block;
61 } // namespace detail
62 
63 namespace he_allocation {
79  template <size_t K = 2, size_t A = 2, size_t B = 100>
80  struct static_strategy :
81  detail::generic_hazard_era_allocation_strategy<K, A, B, detail::static_he_thread_control_block> {};
82 
104  template <size_t K = 2, size_t A = 2, size_t B = 100>
106  detail::generic_hazard_era_allocation_strategy<K, A, B, detail::dynamic_he_thread_control_block> {};
107 } // namespace he_allocation
108 
109 template <class AllocationStrategy = he_allocation::static_strategy<3>>
110 struct hazard_era_traits {
111  using allocation_strategy = AllocationStrategy;
112 
113  template <class... Policies>
114  using with = hazard_era_traits<parameter::type_param_t<policy::allocation_strategy, AllocationStrategy, Policies...>>;
115 };
116 
135 template <class Traits = hazard_era_traits<>>
136 class hazard_eras {
137  using allocation_strategy = typename Traits::allocation_strategy;
138  using thread_control_block = typename allocation_strategy::thread_control_block;
139  friend detail::basic_he_thread_control_block<allocation_strategy, thread_control_block>;
140 
141  template <class T, class MarkedPtr>
142  class guard_ptr;
143 
144 public:
155  template <class... Policies>
156  using with = hazard_eras<typename Traits::template with<Policies...>>;
157 
158  template <class T, std::size_t N = 0, class Deleter = std::default_delete<T>>
159  class enable_concurrent_ptr;
160 
161  class region_guard {};
162 
163  template <class T, std::size_t N = T::number_of_mark_bits>
164  using concurrent_ptr = detail::concurrent_ptr<T, N, guard_ptr>;
165 
166  ALLOCATION_TRACKER;
167 
168 private:
169  struct thread_data;
170 
171  friend struct detail::deletable_object_with_eras;
172 
173  using era_t = uint64_t;
174  inline static std::atomic<era_t> era_clock{1};
175  inline static detail::thread_block_list<thread_control_block, detail::deletable_object_with_eras>
176  global_thread_block_list{};
177  static thread_data& local_thread_data();
178 
179  ALLOCATION_TRACKING_FUNCTIONS;
180 };
181 
182 template <class Traits>
183 template <class T, std::size_t N, class Deleter>
184 class hazard_eras<Traits>::enable_concurrent_ptr :
185  public detail::deletable_object_impl<T, Deleter, detail::deletable_object_with_eras>,
186  private detail::tracked_object<hazard_eras> {
187 public:
188  static constexpr std::size_t number_of_mark_bits = N;
189 
190 protected:
191  enable_concurrent_ptr() noexcept { this->construction_era = era_clock.load(std::memory_order_relaxed); }
192  enable_concurrent_ptr(const enable_concurrent_ptr&) noexcept = default;
193  enable_concurrent_ptr(enable_concurrent_ptr&&) noexcept = default;
194  enable_concurrent_ptr& operator=(const enable_concurrent_ptr&) noexcept = default;
195  enable_concurrent_ptr& operator=(enable_concurrent_ptr&&) noexcept = default;
196  ~enable_concurrent_ptr() noexcept override = default;
197 
198 private:
199  friend detail::deletable_object_impl<T, Deleter>;
200 
201  template <class, class>
202  friend class guard_ptr;
203 };
204 
205 template <class Traits>
206 template <class T, class MarkedPtr>
207 class hazard_eras<Traits>::guard_ptr : public detail::guard_ptr<T, MarkedPtr, guard_ptr<T, MarkedPtr>> {
208  using base = detail::guard_ptr<T, MarkedPtr, guard_ptr>;
209  using Deleter = typename T::Deleter;
210 
211 public:
212  guard_ptr() noexcept = default;
213 
214  // Guard a marked ptr.
215  explicit guard_ptr(const MarkedPtr& p);
216  guard_ptr(const guard_ptr& p);
217  guard_ptr(guard_ptr&& p) noexcept;
218 
219  guard_ptr& operator=(const guard_ptr& p) noexcept;
220  guard_ptr& operator=(guard_ptr&& p) noexcept;
221 
222  // Atomically take snapshot of p, and *if* it points to unreclaimed object, acquire shared ownership of it.
223  void acquire(const concurrent_ptr<T>& p, std::memory_order order = std::memory_order_seq_cst);
224 
225  // Like acquire, but quit early if a snapshot != expected.
226  bool acquire_if_equal(const concurrent_ptr<T>& p,
227  const MarkedPtr& expected,
228  std::memory_order order = std::memory_order_seq_cst);
229 
230  // Release ownership. Postcondition: get() == nullptr.
231  void reset() noexcept;
232 
233  // Reset. Deleter d will be applied some time after all owners release their ownership.
234  void reclaim(Deleter d = Deleter()) noexcept;
235 
236 private:
237  using enable_concurrent_ptr = hazard_eras::enable_concurrent_ptr<T, MarkedPtr::number_of_mark_bits, Deleter>;
238 
239  friend base;
240  void do_swap(guard_ptr& g) noexcept;
241 
242  typename thread_control_block::hazard_era* he = nullptr;
243 };
244 
245 namespace detail {
246  struct deletable_object_with_eras {
247  virtual void delete_self() = 0;
248  deletable_object_with_eras* next = nullptr;
249 
250  protected:
251  virtual ~deletable_object_with_eras() = default;
252  using era_t = size_t;
253  era_t construction_era{};
254  era_t retirement_era{};
255  template <class>
256  friend class hazard_eras;
257 
258 #ifdef __clang__
259  #pragma clang diagnostic push
260  // clang does not support dependent nested types for friend class declaration
261  #pragma clang diagnostic ignored "-Wunsupported-friend"
262 #endif
263  template <class T>
264  friend struct hazard_eras<T>::thread_data;
265 #ifdef __clang__
266  #pragma clang diagnostic pop
267 #endif
268  };
269 } // namespace detail
270 } // namespace xenium::reclamation
271 
272 #define HAZARD_ERAS_IMPL
273 #include <xenium/reclamation/impl/hazard_eras.hpp>
274 #undef HAZARD_ERAS_IMPL
275 
276 #endif
xenium::reclamation::bad_hazard_era_alloc
This exception is thrown if a thread tries to allocate a new hazard era, but the number of available ...
Definition: hazard_eras.hpp:29
xenium::reclamation::hazard_eras
An implementation of the hazard eras scheme proposed by Ramalhete and Correia [RC17].
Definition: hazard_eras.hpp:136
xenium::reclamation::detail::concurrent_ptr
T must be derived from enable_concurrent_ptr<T>. D is a deleter.
Definition: concurrent_ptr.hpp:17
xenium::reclamation::he_allocation::dynamic_strategy
Hazard era allocation strategy for a dynamic number of hazard eras per thread.
Definition: hazard_eras.hpp:105
xenium::reclamation::he_allocation::static_strategy
Hazard era allocation strategy for a static number of hazard eras per thread.
Definition: hazard_eras.hpp:80
xenium::policy::allocation_strategy
Policy to configure the allocation strategy.
Definition: policy.hpp:96