xenium
seqlock.hpp
1 //
2 // Copyright (c) 2018-2020 Manuel Pöter.
3 // Licensed under the MIT License. See LICENSE file in the project root for full license information.
4 //
5 
6 #ifndef XENIUM_SEQLOCK
7 #define XENIUM_SEQLOCK
8 
9 #include <xenium/detail/port.hpp>
10 #include <xenium/parameter.hpp>
11 
12 #include <atomic>
13 #include <cassert>
14 #include <cstdint>
15 #include <memory>
16 
17 namespace xenium {
18 
19 namespace policy {
24  template <unsigned Value>
25  struct slots;
26 } // namespace policy
27 
60 template <class T, class... Policies>
61 struct seqlock {
62  using value_type = T;
63 
64  static_assert(std::is_default_constructible_v<T>, "T must be default constructible");
65  static_assert(std::is_trivially_destructible_v<T>, "T must be trivially destructible");
66  static_assert(std::is_trivially_copyable_v<T>, "T must be trivially copyable");
67  static_assert(
68  sizeof(T) > sizeof(std::uintptr_t),
69  "For types T with a size less or equal to a pointer use an atomic<T> with a compare_exchange update loop.");
70 
71  static constexpr unsigned slots = parameter::value_param_t<unsigned, policy::slots, 1, Policies...>::value;
72  static_assert(slots >= 1, "slots must be >= 1");
73 
77  seqlock() = default;
78 
82  explicit seqlock(const T& data) { new (&_data[0]) T(data); }
83 
90  template <class... Args>
91  explicit seqlock(Args&&... args) {
92  new (&_data[0]) T(std::forward<Args>(args)...);
93  }
94 
95  seqlock(const seqlock&) = delete;
96  seqlock(seqlock&&) = delete;
97 
98  seqlock& operator=(const seqlock&) = delete;
99  seqlock& operator=(seqlock&&) = delete;
100 
108  T load() const;
109 
117  void store(const T& value);
118 
130  template <class Func>
131  void update(Func func);
132 
133 private:
134  using storage_t = typename std::aligned_storage<sizeof(T), alignof(T)>::type;
135  using sequence_t = uintptr_t;
136  using copy_t = uintptr_t;
137 
138  [[nodiscard]] bool is_write_pending(sequence_t seq) const { return (seq & 1) != 0; }
139 
140  sequence_t acquire_lock();
141  void release_lock(sequence_t seq);
142 
143  void read_data(T& dest, const storage_t& src) const;
144  void store_data(const T& src, storage_t& dest);
145 
146  std::atomic<sequence_t> _seq{0};
147  storage_t _data[slots];
148 };
149 
150 template <class T, class... Policies>
152  T result;
153  // (1) - this acquire-load synchronizes-with the release-store (5)
154  sequence_t seq = _seq.load(std::memory_order_acquire);
155  for (;;) {
156  unsigned idx;
157  if constexpr (slots == 1) {
158  // wait while update is in progress...
159  // this is only necessary if we have a single slot, since otherwise we let
160  // reader and writer operate on separate slots.
161  while (is_write_pending(seq)) {
162  // (2) - this acquire-load synchronizes-with the release-store (5)
163  seq = _seq.load(std::memory_order_acquire);
164  }
165  idx = 0;
166  } else {
167  seq >>= 1;
168  idx = seq % slots;
169  seq <<= 1; // we have to ignore a potential write flag here
170  }
171 
172  read_data(result, _data[idx]);
173 
174  // (3) - this acquire-load synchronizes-with the release-store (5)
175  auto seq2 = _seq.load(std::memory_order_acquire);
176  if (seq2 - seq < (2 * slots - 1)) {
177  break;
178  }
179  seq = seq2;
180  }
181  return result;
182 }
183 
184 template <class T, class... Policies>
185 template <class Func>
187  auto seq = acquire_lock();
188  T data;
189  auto idx = (seq >> 1) % slots;
190  read_data(data, _data[idx]);
191  func(data);
192  store_data(data, _data[(idx + 1) % slots]);
193  release_lock(seq);
194 }
195 
196 template <class T, class... Policies>
197 void seqlock<T, Policies...>::store(const T& value) {
198  auto seq = acquire_lock();
199  auto idx = ((seq >> 1) + 1) % slots;
200  store_data(value, _data[idx]);
201  release_lock(seq);
202 }
203 
204 template <class T, class... Policies>
205 auto seqlock<T, Policies...>::acquire_lock() -> sequence_t {
206  auto seq = _seq.load(std::memory_order_relaxed);
207  for (;;) {
208  while (is_write_pending(seq)) {
209  seq = _seq.load(std::memory_order_relaxed);
210  }
211 
212  assert(is_write_pending(seq) == false);
213  // (4) - this acquire-CAS synchronizes-with the release-store (5)
214  if (_seq.compare_exchange_weak(seq, seq + 1, std::memory_order_acquire, std::memory_order_relaxed)) {
215  return seq + 1;
216  }
217  }
218 }
219 
220 template <class T, class... Policies>
221 void seqlock<T, Policies...>::release_lock(sequence_t seq) {
222  assert(seq == _seq.load(std::memory_order_relaxed));
223  assert(is_write_pending(seq));
224  // (5) - this release-store synchronizes-with acquire-CAS (4)
225  // and the acquire-load (1, 2, 3)
226  _seq.store(seq + 1, std::memory_order_release);
227 }
228 
229 template <class T, class... Policies>
230 void seqlock<T, Policies...>::read_data(T& dest, const storage_t& src) const {
231  auto* pdest = reinterpret_cast<copy_t*>(&dest);
232  auto* pend = pdest + (sizeof(T) / sizeof(copy_t));
233  const auto* psrc = reinterpret_cast<const std::atomic<copy_t>*>(&src);
234  for (; pdest != pend; ++psrc, ++pdest) {
235  *pdest = psrc->load(std::memory_order_relaxed);
236  }
237  // (6) - this acquire-fence synchronizes-with the release-fence (7)
238  std::atomic_thread_fence(std::memory_order_acquire);
239 
240  // Effectively this fence transforms the previous relaxed-loads into acquire-loads. This
241  // is necessary to enforce an order with the subsequent load of _seq, so that these
242  // relaxed-loads cannot be reordered with the load on _seq. This also implies that if
243  // one of these relaxed-loads returns a new value written by a concurrent update operation,
244  // the fences synchronize with each other, so it is guaranteed that the subsequent load on
245  // _seq "sees" the new sequence value and the load operation will perform a retry.
246 }
247 
248 template <class T, class... Policies>
249 void seqlock<T, Policies...>::store_data(const T& src, storage_t& dest) {
250  // (7) - this release-fence synchronizes-with the acquire-fence (6)
251  std::atomic_thread_fence(std::memory_order_release);
252 
253  const auto* psrc = reinterpret_cast<const copy_t*>(&src);
254  const auto* pend = psrc + (sizeof(T) / sizeof(copy_t));
255  auto* pdest = reinterpret_cast<std::atomic<copy_t>*>(&dest);
256  for (; psrc != pend; ++psrc, ++pdest) {
257  pdest->store(*psrc, std::memory_order_relaxed);
258  }
259 }
260 
261 } // namespace xenium
262 
263 #endif
xenium::seqlock
An implementation of the sequence lock (also often referred to as "sequential lock").
Definition: seqlock.hpp:61
xenium::seqlock::seqlock
seqlock()=default
Constructs an empty object.
xenium::seqlock::load
T load() const
Reads the current value.
Definition: seqlock.hpp:151
xenium::seqlock::update
void update(Func func)
Updates the stored value with the given functor.
Definition: seqlock.hpp:186
xenium::seqlock::seqlock
seqlock(Args &&... args)
Constructs an object of type T using args as the parameter list for the constructor of T.
Definition: seqlock.hpp:91
xenium::seqlock::seqlock
seqlock(const T &data)
Constructs an object of type T via copy construction.
Definition: seqlock.hpp:82
xenium::policy::slots
Policy to configure the number of slots used in seqlock.
Definition: seqlock.hpp:25
xenium::seqlock::store
void store(const T &value)
Stores the given value.
Definition: seqlock.hpp:197