6 #ifndef XENIUM_VYUKOV_HASH_MAP_IMPL
7 #error "This is an impl file and must not be included directly!"
10 #include <xenium/acquire_guard.hpp>
11 #include <xenium/backoff.hpp>
12 #include <xenium/parameter.hpp>
13 #include <xenium/policy.hpp>
21 #pragma warning(disable : 26495) // uninitialized member variable
22 #pragma warning(disable : 4324) // structure was padded due to alignment specifier
27 template <
class Key,
class Value,
class... Policies>
28 struct vyukov_hash_map<Key, Value, Policies...>::bucket_state {
29 bucket_state() =
default;
31 [[nodiscard]] bucket_state locked() const noexcept {
return bucket_state(value | lock); }
32 [[nodiscard]] bucket_state clear_lock()
const {
34 return bucket_state(value ^ lock);
36 [[nodiscard]] bucket_state new_version() const noexcept {
return bucket_state(value + version_inc); }
37 [[nodiscard]] bucket_state inc_item_count()
const {
38 assert(item_count() < bucket_item_count);
39 bucket_state result(value + item_count_inc);
40 assert(result.item_count() == item_count() + 1);
43 [[nodiscard]] bucket_state dec_item_count()
const {
44 assert(item_count() > 0);
45 bucket_state result(value - item_count_inc);
46 assert(result.item_count() == item_count() - 1);
49 [[nodiscard]] bucket_state set_delete_marker(std::uint32_t marker)
const {
50 assert(delete_marker() == 0);
51 bucket_state result(value | (marker << delete_marker_shift));
52 assert(result.delete_marker() == marker);
56 bool operator==(bucket_state r)
const noexcept {
return this->value == r.value; }
57 bool operator!=(bucket_state r)
const noexcept {
return this->value != r.value; }
59 [[nodiscard]] std::uint32_t item_count() const noexcept {
return (value >> 1) & item_count_mask; }
60 [[nodiscard]] std::uint32_t delete_marker() const noexcept {
61 return (value >> delete_marker_shift) & item_count_mask;
63 [[nodiscard]] std::uint32_t version() const noexcept {
return value >> version_shift; }
64 [[nodiscard]]
bool is_locked() const noexcept {
return (value & lock) != 0; }
67 explicit bucket_state(std::uint32_t value) noexcept : value(value) {}
69 std::uint32_t value{};
88 static constexpr std::size_t item_counter_bits = utils::find_last_bit_set(bucket_item_count);
90 static constexpr std::size_t item_count_shift = 1;
91 static constexpr std::size_t delete_marker_shift = item_count_shift + item_counter_bits;
92 static constexpr std::size_t version_shift = delete_marker_shift + item_counter_bits;
94 static constexpr std::uint32_t lock = 1;
95 static constexpr std::uint32_t version_inc = 1 << version_shift;
96 static constexpr std::uint32_t item_count_inc = 1 << item_count_shift;
98 static constexpr std::uint32_t item_count_mask = (1 << item_counter_bits) - 1;
101 template <
class Key,
class Value,
class... Policies>
102 struct vyukov_hash_map<Key, Value, Policies...>::bucket {
103 std::atomic<bucket_state> state;
104 std::atomic<extension_item*> head;
105 typename traits::storage_key_type key[bucket_item_count];
106 typename traits::storage_value_type value[bucket_item_count];
109 template <
class Key,
class Value,
class... Policies>
110 struct vyukov_hash_map<Key, Value, Policies...>::extension_item {
111 typename traits::storage_key_type key;
112 typename traits::storage_value_type value;
113 std::atomic<extension_item*> next;
116 template <
class Key,
class Value,
class... Policies>
117 struct vyukov_hash_map<Key, Value, Policies...>::extension_bucket {
118 std::atomic<std::uint32_t> lock;
119 std::atomic<extension_item*> head;
120 extension_item items[extension_item_count];
122 void acquire_lock() {
125 while (lock.load(std::memory_order_relaxed) != 0) {
129 if (lock.exchange(1, std::memory_order_acquire) == 0) {
135 void release_lock() {
137 lock.store(0, std::memory_order_release);
141 template <
class Key,
class Value,
class... Policies>
142 struct alignas(64) vyukov_hash_map<Key, Value, Policies...>::block : reclaimer::template enable_concurrent_ptr<block> {
144 std::uint32_t bucket_count;
145 std::uint32_t extension_bucket_count;
146 extension_bucket* extension_buckets;
149 [[nodiscard]] std::uint32_t index(
const key_type& key)
const {
return static_cast<std::uint32_t
>(key & mask); }
150 bucket* buckets() {
return reinterpret_cast<bucket*
>(
this + 1); }
152 void operator delete(
void* p) { ::operator
delete(p, cacheline_size); }
155 template <
class Key,
class Value,
class... Policies>
156 struct vyukov_hash_map<Key, Value, Policies...>::unlocker {
157 unlocker(bucket& locked_bucket, bucket_state state) : state(state), locked_bucket(locked_bucket) {}
160 assert(locked_bucket.state.load().is_locked());
161 locked_bucket.state.store(state, std::memory_order_relaxed);
164 void unlock(bucket_state new_state, std::memory_order order) {
166 assert(locked_bucket.state.load().is_locked());
167 locked_bucket.state.store(new_state, order);
170 void disable() { enabled =
false; }
175 bucket& locked_bucket;
178 template <
class Key,
class Value,
class... Policies>
179 vyukov_hash_map<Key, Value, Policies...>::vyukov_hash_map(std::size_t initial_capacity) : resize_lock(0) {
180 auto b = allocate_block(
static_cast<std::uint32_t
>(utils::next_power_of_two(initial_capacity)));
182 throw std::bad_alloc();
184 data_block.store(b, std::memory_order_relaxed);
187 template <
class Key,
class Value,
class... Policies>
188 vyukov_hash_map<Key, Value, Policies...>::~vyukov_hash_map() {
194 while (it != end()) {
202 delete data_block.load().get();
205 template <
class Key,
class Value,
class... Policies>
207 return do_get_or_emplace<false>(
208 std::move(key), [v = std::move(value)]()
mutable {
return std::move(v); }, [](accessor&&,
auto&) {});
211 template <
class Key,
class Value,
class... Policies>
212 template <
class... Args>
214 -> std::pair<accessor, bool> {
215 std::pair<accessor, bool> result;
216 result.second = do_get_or_emplace<true>(
218 [&] {
return value_type(std::forward<Args>(args)...); },
219 [&result](accessor&& acc,
auto&) { result.first = std::move(acc); });
223 template <
class Key,
class Value,
class... Policies>
224 template <
class Factory>
226 -> std::pair<accessor, bool> {
227 std::pair<accessor, bool> result;
228 result.second = do_get_or_emplace<true>(std::move(key),
229 std::forward<Factory>(factory),
230 [&result](accessor&& acc,
auto&) { result.first = std::move(acc); });
234 template <
class Key,
class Value,
class... Policies>
235 template <
bool AcquireAccessor,
class Factory,
class Callback>
236 bool vyukov_hash_map<Key, Value, Policies...>::do_get_or_emplace(Key&& key, Factory&& factory, Callback&& callback) {
237 const hash_t h = hash{}(key);
243 bucket& bucket = lock_bucket(h, b, state);
245 unlocker unlocker(bucket, state);
247 std::uint32_t item_count = state.item_count();
249 for (std::uint32_t i = 0; i != item_count; ++i) {
250 if (traits::template compare_key<AcquireAccessor>(bucket.key[i], bucket.value[i], key, h, acc)) {
251 callback(std::move(acc), bucket.value[i]);
252 unlocker.unlock(state, std::memory_order_relaxed);
257 if (item_count < bucket_item_count) {
258 traits::template store_item<AcquireAccessor>(
259 bucket.key[item_count], bucket.value[item_count], h, std::move(key), factory(), std::memory_order_relaxed, acc);
260 callback(std::move(acc), bucket.value[item_count]);
263 unlocker.unlock(state.inc_item_count(), std::memory_order_release);
268 for (extension_item* extension = bucket.head.load(std::memory_order_relaxed); extension !=
nullptr;
269 extension = extension->next.load(std::memory_order_relaxed)) {
270 if (traits::template compare_key<AcquireAccessor>(extension->key, extension->value, key, h, acc)) {
271 callback(std::move(acc), extension->value);
272 unlocker.unlock(state, std::memory_order_relaxed);
277 extension_item* extension = allocate_extension_item(b.get(), h);
278 if (extension ==
nullptr) {
284 traits::template store_item<AcquireAccessor>(
285 extension->key, extension->value, h, std::move(key), factory(), std::memory_order_relaxed, acc);
287 free_extension_item(extension);
290 callback(std::move(acc), extension->value);
291 auto old_head = bucket.head.load(std::memory_order_relaxed);
292 extension->next.store(old_head, std::memory_order_relaxed);
294 bucket.head.store(extension, std::memory_order_release);
297 unlocker.unlock(state, std::memory_order_release);
302 template <
class Key,
class Value,
class... Policies>
305 bool result = do_extract(key, acc);
306 traits::reclaim(acc);
310 template <
class Key,
class Value,
class... Policies>
312 bool result = do_extract(key, acc);
313 traits::reclaim_internal(acc);
317 template <
class Key,
class Value,
class... Policies>
319 const hash_t h =
hash{}(key);
325 b.acquire(data_block, std::memory_order_acquire);
326 const std::size_t bucket_idx = h & b->mask;
327 bucket& bucket = b->buckets()[bucket_idx];
328 bucket_state state = bucket.state.load(std::memory_order_relaxed);
329 std::uint32_t item_count = state.item_count();
331 if (item_count == 0) {
335 if (state.is_locked()) {
339 auto locked_state = state.locked();
341 if (!bucket.state.compare_exchange_strong(
342 state, locked_state, std::memory_order_acquire, std::memory_order_relaxed)) {
347 unlocker unlocker(bucket, state);
351 for (std::uint32_t i = 0; i != item_count; ++i) {
352 if (traits::template compare_key<true>(bucket.key[i], bucket.value[i], key, h, result)) {
353 extension_item* extension = bucket.head.load(std::memory_order_relaxed);
356 bucket.state.store(locked_state.set_delete_marker(i + 1), std::memory_order_relaxed);
358 auto k = extension->key.load(std::memory_order_relaxed);
359 auto v = extension->value.load(std::memory_order_relaxed);
360 bucket.key[i].store(k, std::memory_order_relaxed);
362 bucket.value[i].store(v, std::memory_order_release);
365 locked_state = locked_state.new_version();
367 bucket.state.store(locked_state, std::memory_order_release);
369 extension_item* extension_next = extension->next.load(std::memory_order_relaxed);
371 bucket.head.store(extension_next, std::memory_order_release);
375 unlocker.unlock(locked_state.new_version().clear_lock(), std::memory_order_release);
377 free_extension_item(extension);
379 if (i != item_count - 1) {
381 bucket.state.store(locked_state.set_delete_marker(i + 1), std::memory_order_relaxed);
383 auto k = bucket.key[item_count - 1].load(std::memory_order_relaxed);
384 auto v = bucket.value[item_count - 1].load(std::memory_order_relaxed);
385 bucket.key[i].store(k, std::memory_order_relaxed);
387 bucket.value[i].store(v, std::memory_order_release);
393 unlocker.unlock(state.new_version().dec_item_count(), std::memory_order_release);
399 auto extension_prev = &bucket.head;
400 extension_item* extension = extension_prev->load(std::memory_order_relaxed);
402 if (traits::template compare_key<true>(extension->key, extension->value, key, h, result)) {
403 extension_item* extension_next = extension->next.load(std::memory_order_relaxed);
404 extension_prev->store(extension_next, std::memory_order_relaxed);
408 unlocker.unlock(state.new_version(), std::memory_order_release);
410 free_extension_item(extension);
413 extension_prev = &extension->next;
414 extension = extension_prev->load(std::memory_order_relaxed);
420 unlocker.unlock(state, std::memory_order_relaxed);
425 template <
class Key,
class Value,
class... Policies>
429 auto next = pos.extension->next.load(std::memory_order_relaxed);
430 pos.prev->store(next, std::memory_order_relaxed);
431 auto new_state = pos.current_bucket_state.locked().new_version();
433 pos.current_bucket->state.store(new_state, std::memory_order_release);
435 free_extension_item(pos.extension);
436 pos.extension = next;
437 if (next ==
nullptr) {
438 pos.move_to_next_bucket();
445 auto extension = pos.current_bucket->head.load(std::memory_order_relaxed);
447 auto locked_state = pos.current_bucket_state.locked();
448 auto marked_state = locked_state.set_delete_marker(pos.index + 1);
449 pos.current_bucket->state.store(marked_state, std::memory_order_relaxed);
450 assert(pos.current_bucket->state.load().is_locked());
452 auto k = extension->key.load(std::memory_order_relaxed);
453 auto v = extension->value.load(std::memory_order_relaxed);
454 pos.current_bucket->key[pos.index].store(k, std::memory_order_relaxed);
456 pos.current_bucket->value[pos.index].store(v, std::memory_order_release);
459 locked_state = locked_state.new_version();
461 pos.current_bucket->state.store(locked_state, std::memory_order_release);
462 assert(pos.current_bucket->state.load().is_locked());
464 auto next = extension->next.load(std::memory_order_relaxed);
466 pos.current_bucket->head.store(next, std::memory_order_release);
470 pos.current_bucket->state.store(locked_state.new_version(), std::memory_order_release);
471 assert(pos.current_bucket->state.load().is_locked());
472 free_extension_item(extension);
474 auto max_index = pos.current_bucket_state.item_count() - 1;
475 if (pos.index != max_index) {
477 auto locked_state = pos.current_bucket_state.locked();
478 auto marked_state = locked_state.set_delete_marker(pos.index + 1);
479 pos.current_bucket->state.store(marked_state, std::memory_order_relaxed);
480 assert(pos.current_bucket->state.load().is_locked());
482 auto k = pos.current_bucket->key[max_index].load(std::memory_order_relaxed);
483 auto v = pos.current_bucket->value[max_index].load(std::memory_order_relaxed);
484 pos.current_bucket->key[pos.index].store(k, std::memory_order_relaxed);
486 pos.current_bucket->value[pos.index].store(v, std::memory_order_release);
489 auto new_state = pos.current_bucket_state.new_version().dec_item_count();
490 pos.current_bucket_state = new_state;
493 pos.current_bucket->state.store(new_state.locked(), std::memory_order_release);
494 assert(pos.current_bucket->state.load().is_locked());
495 if (pos.index == new_state.item_count()) {
496 pos.prev = &pos.current_bucket->head;
497 pos.extension = pos.current_bucket->head.load(std::memory_order_relaxed);
498 if (pos.extension ==
nullptr) {
499 pos.move_to_next_bucket();
505 template <
class Key,
class Value,
class... Policies>
507 const hash_t h = hash{}(key);
510 guarded_block b = acquire_guard(data_block, std::memory_order_acquire);
511 const std::size_t bucket_idx = h & b->mask;
512 bucket& bucket = b->buckets()[bucket_idx];
520 bucket_state state = bucket.state.load(std::memory_order_acquire);
522 std::uint32_t item_count = state.item_count();
523 for (std::uint32_t i = 0; i != item_count; ++i) {
524 if (traits::compare_trivial_key(bucket.key[i], key, h)) {
529 accessor acc = traits::acquire(bucket.value[i], std::memory_order_acquire);
532 const auto state2 = bucket.state.load(std::memory_order_relaxed);
533 if (state.version() != state2.version()) {
539 const auto delete_marker = i + 1;
540 if (state2.delete_marker() == delete_marker) {
553 if (!traits::compare_nontrivial_key(acc, key)) {
557 result = std::move(acc);
563 extension_item* extension = bucket.head.load(std::memory_order_acquire);
565 if (traits::compare_trivial_key(extension->key, key, h)) {
570 accessor acc = traits::acquire(extension->value, std::memory_order_acquire);
572 auto state2 = bucket.state.load(std::memory_order_relaxed);
573 if (state.version() != state2.version()) {
579 if (!traits::compare_nontrivial_key(acc, key)) {
583 result = std::move(acc);
588 extension = extension->next.load(std::memory_order_acquire);
589 auto state2 = bucket.state.load(std::memory_order_relaxed);
590 if (state.version() != state2.version()) {
597 auto state2 = bucket.state.load(std::memory_order_relaxed);
598 if (state.version() != state2.version()) {
608 template <
class Key,
class Value,
class... Policies>
611 const int already_resizing = resize_lock.exchange(1, std::memory_order_relaxed);
614 bucket.state.store(state, std::memory_order_relaxed);
620 if (already_resizing != 0) {
624 while (resize_lock.load(std::memory_order_acquire) != 0) {
634 template <
class Key,
class Value,
class... Policies>
635 void vyukov_hash_map<Key, Value, Policies...>::do_grow() {
638 auto old_block = data_block.load(std::memory_order_acquire);
639 const auto bucket_count = old_block->bucket_count;
640 block* new_block = allocate_block(bucket_count * 2);
641 if (new_block ==
nullptr) {
642 resize_lock.store(0, std::memory_order_relaxed);
643 throw std::bad_alloc();
647 auto old_buckets = old_block->buckets();
648 for (std::uint32_t i = 0; i != bucket_count; ++i) {
649 auto& bucket = old_buckets[i];
652 auto st = bucket.state.load(std::memory_order_relaxed);
653 if (st.is_locked()) {
658 if (bucket.state.compare_exchange_strong(st, st.locked(), std::memory_order_acquire, std::memory_order_relaxed)) {
666 auto new_buckets = new_block->buckets();
667 for (std::uint32_t bucket_idx = 0; bucket_idx != bucket_count; ++bucket_idx) {
668 auto& old_bucket = old_buckets[bucket_idx];
669 const std::uint32_t item_count = old_bucket.state.load(std::memory_order_relaxed).item_count();
670 for (std::uint32_t i = 0; i != item_count; ++i) {
671 auto k = old_bucket.key[i].load(std::memory_order_relaxed);
672 hash_t h = traits::template rehash<hash>(k);
673 auto& new_bucket = new_buckets[h & new_block->mask];
674 auto new_bucket_state = new_bucket.state.load(std::memory_order_relaxed);
675 auto new_bucket_count = new_bucket_state.item_count();
676 auto v = old_bucket.value[i].load(std::memory_order_relaxed);
677 new_bucket.key[new_bucket_count].store(k, std::memory_order_relaxed);
678 new_bucket.value[new_bucket_count].store(v, std::memory_order_relaxed);
679 new_bucket.state.store(new_bucket_state.inc_item_count(), std::memory_order_relaxed);
683 for (extension_item* extension = old_bucket.head; extension !=
nullptr;
684 extension = extension->next.load(std::memory_order_relaxed)) {
685 auto k = extension->key.load(std::memory_order_relaxed);
686 hash_t h = traits::template rehash<hash>(k);
687 auto& new_bucket = new_buckets[h & new_block->mask];
688 auto new_bucket_state = new_bucket.state.load(std::memory_order_relaxed);
689 auto new_bucket_count = new_bucket_state.item_count();
690 auto v = extension->value.load(std::memory_order_relaxed);
691 if (new_bucket_count < bucket_item_count) {
692 new_bucket.key[new_bucket_count].store(k, std::memory_order_relaxed);
693 new_bucket.value[new_bucket_count].store(v, std::memory_order_relaxed);
694 new_bucket.state.store(new_bucket_state.inc_item_count(), std::memory_order_relaxed);
696 extension_item* new_extension = allocate_extension_item(new_block, h);
697 assert(new_extension);
698 new_extension->key.store(k, std::memory_order_relaxed);
699 new_extension->value.store(v, std::memory_order_relaxed);
700 new_extension->next.store(new_bucket.head, std::memory_order_relaxed);
701 new_bucket.head = new_extension;
706 data_block.store(new_block, std::memory_order_release);
708 resize_lock.store(0, std::memory_order_release);
711 guarded_block g(old_block);
715 template <
class Key,
class Value,
class... Policies>
716 auto vyukov_hash_map<Key, Value, Policies...>::allocate_block(std::uint32_t bucket_count) -> block* {
717 std::uint32_t extension_bucket_count = bucket_count / bucket_to_extension_ratio;
718 std::size_t size =
sizeof(block) +
sizeof(bucket) * bucket_count +
719 sizeof(extension_bucket) * (
static_cast<size_t>(extension_bucket_count) + 1);
721 void* mem = ::operator
new(size, cacheline_size, std::nothrow);
722 if (mem ==
nullptr) {
726 std::memset(mem, 0, size);
727 auto* b =
new (mem) block;
728 b->mask = bucket_count - 1;
729 b->bucket_count = bucket_count;
730 b->extension_bucket_count = extension_bucket_count;
732 std::size_t extension_bucket_addr =
reinterpret_cast<std::size_t
>(b) +
sizeof(block) +
sizeof(bucket) * bucket_count;
733 if (extension_bucket_addr %
sizeof(extension_bucket) != 0) {
734 extension_bucket_addr +=
sizeof(extension_bucket) - (extension_bucket_addr %
sizeof(extension_bucket));
736 b->extension_buckets =
reinterpret_cast<extension_bucket*
>(extension_bucket_addr);
738 for (std::uint32_t i = 0; i != extension_bucket_count; ++i) {
739 auto& bucket = b->extension_buckets[i];
740 extension_item* head =
nullptr;
741 for (std::size_t j = 0; j != extension_item_count; ++j) {
742 bucket.items[j].next.store(head, std::memory_order_relaxed);
743 head = &bucket.items[j];
745 bucket.head.store(head, std::memory_order_relaxed);
751 template <
class Key,
class Value,
class... Policies>
753 const auto h = hash{}(key);
756 result.current_bucket = &lock_bucket(h, result.block, result.current_bucket_state);
757 auto& bucket = *result.current_bucket;
760 auto item_count = result.current_bucket_state.item_count();
761 for (std::uint32_t i = 0; i != item_count; ++i) {
762 if (traits::template compare_key<false>(bucket.key[i], bucket.value[i], key, h, acc)) {
768 auto extension = bucket.head.load(std::memory_order_relaxed);
770 if (traits::template compare_key<false>(extension->key, extension->value, key, h, acc)) {
771 result.extension = extension;
774 extension = extension->next.load(std::memory_order_relaxed);
780 template <
class Key,
class Value,
class... Policies>
783 result.current_bucket = &lock_bucket(0, result.block, result.current_bucket_state);
784 if (result.current_bucket_state.item_count() == 0) {
785 result.move_to_next_bucket();
790 template <
class Key,
class Value,
class... Policies>
796 block.acquire(data_block, std::memory_order_acquire);
797 const std::size_t bucket_idx =
hash & block->mask;
798 auto& bucket = block->buckets()[bucket_idx];
799 bucket_state st = bucket.state.load(std::memory_order_relaxed);
800 if (st.is_locked()) {
805 if (bucket.state.compare_exchange_strong(st, st.locked(), std::memory_order_acquire, std::memory_order_relaxed)) {
813 template <
class Key,
class Value,
class... Policies>
814 auto vyukov_hash_map<Key, Value, Policies...>::allocate_extension_item(block* b, hash_t hash) -> extension_item* {
815 const std::size_t extension_bucket_count = b->extension_bucket_count;
816 const std::size_t mod_mask = extension_bucket_count - 1;
817 for (std::size_t iter = 0; iter != 2; ++iter) {
818 for (std::size_t idx = 0; idx != extension_bucket_count; ++idx) {
819 const std::size_t extension_bucket_idx = (hash + idx) & mod_mask;
820 extension_bucket& extension_bucket = b->extension_buckets[extension_bucket_idx];
822 if (extension_bucket.head.load(std::memory_order_relaxed) ==
nullptr) {
826 extension_bucket.acquire_lock();
827 auto item = extension_bucket.head.load(std::memory_order_relaxed);
829 extension_bucket.head.store(item->next, std::memory_order_relaxed);
830 extension_bucket.release_lock();
833 extension_bucket.release_lock();
839 template <
class Key,
class Value,
class... Policies>
840 void vyukov_hash_map<Key, Value, Policies...>::free_extension_item(extension_item* item) {
841 auto item_addr =
reinterpret_cast<std::uintptr_t
>(item);
842 auto bucket =
reinterpret_cast<extension_bucket*
>(item_addr - item_addr %
sizeof(extension_bucket));
844 bucket->acquire_lock();
845 auto head = bucket->head.load(std::memory_order_relaxed);
847 item->next.store(head, std::memory_order_release);
850 bucket->head.store(item, std::memory_order_relaxed);
851 bucket->release_lock();
854 template <
class Key,
class Value,
class... Policies>
855 vyukov_hash_map<Key, Value, Policies...>::iterator::iterator(iterator&& other) :
856 block(std::move(other.block)),
857 current_bucket(std::move(other.current_bucket)),
858 current_bucket_state(std::move(other.current_bucket_state)),
859 index(std::move(other.index)),
860 extension(std::move(other.extension)),
861 prev(std::move(other.prev)) {
863 other.current_bucket =
nullptr;
864 other.current_bucket_state = bucket_state();
866 other.extension =
nullptr;
867 other.prev =
nullptr;
870 template <
class Key,
class Value,
class... Policies>
871 auto vyukov_hash_map<Key, Value, Policies...>::iterator::operator=(iterator&& other) -> iterator& {
872 block = std::move(other.block);
873 current_bucket = std::move(other.current_bucket);
874 current_bucket_state = std::move(other.current_bucket_state);
875 index = std::move(other.index);
876 extension = std::move(other.extension);
877 prev = std::move(other.prev);
880 other.current_bucket =
nullptr;
881 other.current_bucket_state = bucket_state();
883 other.extension =
nullptr;
884 other.prev =
nullptr;
889 template <
class Key,
class Value,
class... Policies>
890 vyukov_hash_map<Key, Value, Policies...>::iterator::~iterator() {
894 template <
class Key,
class Value,
class... Policies>
897 if (current_bucket) {
898 assert(current_bucket->state.load().is_locked());
900 current_bucket->state.store(current_bucket_state, std::memory_order_release);
904 current_bucket =
nullptr;
905 current_bucket_state = bucket_state();
911 template <
class Key,
class Value,
class... Policies>
913 return block == r.block && current_bucket == r.current_bucket && current_bucket_state == r.current_bucket_state &&
914 index == r.index && extension == r.extension;
917 template <
class Key,
class Value,
class... Policies>
919 return !(*
this == r);
922 template <
class Key,
class Value,
class... Policies>
923 auto vyukov_hash_map<Key, Value, Policies...>::iterator::operator++() -> iterator& {
925 prev = &extension->next;
926 extension = extension->next.load(std::memory_order_relaxed);
927 if (extension ==
nullptr) {
928 move_to_next_bucket();
934 if (index == current_bucket_state.item_count()) {
935 prev = ¤t_bucket->head;
936 extension = current_bucket->head.load(std::memory_order_relaxed);
937 if (extension ==
nullptr) {
938 move_to_next_bucket();
944 template <
class Key,
class Value,
class... Policies>
945 auto vyukov_hash_map<Key, Value, Policies...>::iterator::operator->() -> pointer {
946 static_assert(std::is_reference<reference>::value,
947 "operator-> is only available for instantiations with non-trivial key types. Use explicit "
948 "dereferenciation instead (operator*). The reason is that all other instantiations create "
949 "temporary std::pair<> instances since key and value are stored separately.");
950 return &this->operator*();
953 template <
class Key,
class Value,
class... Policies>
954 auto vyukov_hash_map<Key, Value, Policies...>::iterator::operator*() -> reference {
956 return traits::deref_iterator(extension->key, extension->value);
958 return traits::deref_iterator(current_bucket->key[index], current_bucket->value[index]);
961 template <
class Key,
class Value,
class... Policies>
962 void vyukov_hash_map<Key, Value, Policies...>::iterator::move_to_next_bucket() {
963 assert(current_bucket !=
nullptr);
964 if (current_bucket == block->buckets() + block->bucket_count - 1) {
970 auto old_bucket = current_bucket;
971 auto old_bucket_state = current_bucket_state;
978 auto st = current_bucket->state.load(std::memory_order_relaxed);
979 if (st.is_locked()) {
984 if (current_bucket->state.compare_exchange_strong(
985 st, st.locked(), std::memory_order_acquire, std::memory_order_relaxed)) {
986 current_bucket_state = st;
993 old_bucket->state.store(old_bucket_state, std::memory_order_release);
998 if (current_bucket_state.item_count() == 0) {
999 move_to_next_bucket();
1006 #pragma warning(pop)