9 #include <xenium/detail/port.hpp>
10 #include <xenium/parameter.hpp>
24 template <
unsigned Value>
60 template <
class T,
class... Policies>
64 static_assert(std::is_default_constructible_v<T>,
"T must be default constructible");
65 static_assert(std::is_trivially_destructible_v<T>,
"T must be trivially destructible");
66 static_assert(std::is_trivially_copyable_v<T>,
"T must be trivially copyable");
68 sizeof(T) >
sizeof(std::uintptr_t),
69 "For types T with a size less or equal to a pointer use an atomic<T> with a compare_exchange update loop.");
71 static constexpr
unsigned slots = parameter::value_param_t<unsigned,
policy::slots, 1, Policies...>::value;
72 static_assert(slots >= 1,
"slots must be >= 1");
82 explicit seqlock(
const T& data) {
new (&_data[0]) T(data); }
90 template <
class... Args>
92 new (&_data[0]) T(std::forward<Args>(args)...);
117 void store(
const T& value);
130 template <
class Func>
134 using storage_t =
typename std::aligned_storage<
sizeof(T),
alignof(T)>::type;
135 using sequence_t = uintptr_t;
136 using copy_t = uintptr_t;
138 [[nodiscard]]
bool is_write_pending(sequence_t seq)
const {
return (seq & 1) != 0; }
140 sequence_t acquire_lock();
141 void release_lock(sequence_t seq);
143 void read_data(T& dest,
const storage_t& src)
const;
144 void store_data(
const T& src, storage_t& dest);
146 std::atomic<sequence_t> _seq{0};
147 storage_t _data[slots];
150 template <
class T,
class... Policies>
154 sequence_t seq = _seq.load(std::memory_order_acquire);
157 if constexpr (slots == 1) {
161 while (is_write_pending(seq)) {
163 seq = _seq.load(std::memory_order_acquire);
172 read_data(result, _data[idx]);
175 auto seq2 = _seq.load(std::memory_order_acquire);
176 if (seq2 - seq < (2 * slots - 1)) {
184 template <
class T,
class... Policies>
185 template <
class Func>
187 auto seq = acquire_lock();
189 auto idx = (seq >> 1) % slots;
190 read_data(data, _data[idx]);
192 store_data(data, _data[(idx + 1) % slots]);
196 template <
class T,
class... Policies>
198 auto seq = acquire_lock();
199 auto idx = ((seq >> 1) + 1) % slots;
200 store_data(value, _data[idx]);
204 template <
class T,
class... Policies>
206 auto seq = _seq.
load(std::memory_order_relaxed);
208 while (is_write_pending(seq)) {
209 seq = _seq.load(std::memory_order_relaxed);
212 assert(is_write_pending(seq) ==
false);
214 if (_seq.compare_exchange_weak(seq, seq + 1, std::memory_order_acquire, std::memory_order_relaxed)) {
220 template <
class T,
class... Policies>
221 void seqlock<T, Policies...>::release_lock(sequence_t seq) {
222 assert(seq == _seq.load(std::memory_order_relaxed));
223 assert(is_write_pending(seq));
226 _seq.store(seq + 1, std::memory_order_release);
229 template <
class T,
class... Policies>
230 void seqlock<T, Policies...>::read_data(T& dest,
const storage_t& src)
const {
231 auto* pdest =
reinterpret_cast<copy_t*
>(&dest);
232 auto* pend = pdest + (
sizeof(T) /
sizeof(copy_t));
233 const auto* psrc =
reinterpret_cast<const std::atomic<copy_t>*
>(&src);
234 for (; pdest != pend; ++psrc, ++pdest) {
235 *pdest = psrc->load(std::memory_order_relaxed);
238 std::atomic_thread_fence(std::memory_order_acquire);
248 template <
class T,
class... Policies>
249 void seqlock<T, Policies...>::store_data(
const T& src, storage_t& dest) {
251 std::atomic_thread_fence(std::memory_order_release);
253 const auto* psrc =
reinterpret_cast<const copy_t*
>(&src);
254 const auto* pend = psrc + (
sizeof(T) /
sizeof(copy_t));
255 auto* pdest =
reinterpret_cast<std::atomic<copy_t>*
>(&dest);
256 for (; psrc != pend; ++psrc, ++pdest) {
257 pdest->store(*psrc, std::memory_order_relaxed);