38 #ifndef PMEMOBJ_CONCURRENT_HASH_MAP_HPP 39 #define PMEMOBJ_CONCURRENT_HASH_MAP_HPP 41 #include <libpmemobj++/detail/atomic_backoff.hpp> 51 #include <libpmemobj++/detail/persistent_pool_ptr.hpp> 57 #include <initializer_list> 62 #include <type_traits> 71 struct hash<
pmem::obj::p<T>> {
75 return hash<T>()(x.
get_ro());
85 namespace concurrent_hash_map_internal
87 template <
typename SharedMutexT>
88 class shared_mutex_scoped_lock {
89 using rw_mutex_type = SharedMutexT;
92 shared_mutex_scoped_lock(
const shared_mutex_scoped_lock &) =
delete;
93 shared_mutex_scoped_lock &
94 operator=(
const shared_mutex_scoped_lock &) =
delete;
97 shared_mutex_scoped_lock() : mutex(
nullptr), is_writer(
false)
102 shared_mutex_scoped_lock(rw_mutex_type &m,
bool write =
true)
109 ~shared_mutex_scoped_lock()
117 acquire(rw_mutex_type &m,
bool write =
true)
124 mutex->lock_shared();
134 rw_mutex_type *m = mutex;
148 try_acquire(rw_mutex_type &m,
bool write =
true)
153 result = write ? m.try_lock() : m.try_lock_shared();
164 rw_mutex_type *mutex;
172 template <
typename ScopedLockType>
173 using scoped_lock_upgrade_to_writer =
174 decltype(std::declval<ScopedLockType>().upgrade_to_writer());
176 template <
typename ScopedLockType>
177 using scoped_lock_has_upgrade_to_writer =
178 detail::supports<ScopedLockType, scoped_lock_upgrade_to_writer>;
180 template <
typename ScopedLockType>
181 using scoped_lock_downgrade_to_reader =
182 decltype(std::declval<ScopedLockType>().downgrade_to_reader());
184 template <
typename ScopedLockType>
185 using scoped_lock_has_downgrade_to_reader =
186 detail::supports<ScopedLockType, scoped_lock_downgrade_to_reader>;
188 template <
typename ScopedLockType,
189 bool = scoped_lock_has_upgrade_to_writer<ScopedLockType>::value
190 &&scoped_lock_has_downgrade_to_reader<ScopedLockType>::value>
191 class scoped_lock_traits {
193 using scope_lock_type = ScopedLockType;
196 initial_rw_state(
bool write)
203 upgrade_to_writer(scope_lock_type &lock)
205 return lock.upgrade_to_writer();
209 downgrade_to_reader(scope_lock_type &lock)
211 return lock.downgrade_to_reader();
215 template <
typename ScopedLockType>
216 class scoped_lock_traits<ScopedLockType, false> {
218 using scope_lock_type = ScopedLockType;
221 initial_rw_state(
bool write)
229 upgrade_to_writer(scope_lock_type &lock)
238 downgrade_to_reader(scope_lock_type &lock)
250 template <
typename Key,
typename T,
typename Hash = std::hash<Key>,
251 typename KeyEqual = std::equal_to<Key>,
252 typename MutexType = pmem::obj::shared_mutex,
253 typename ScopedLockType = concurrent_hash_map_
internal::
254 shared_mutex_scoped_lock<MutexType>>
258 namespace concurrent_hash_map_internal
264 if (pmemobj_tx_stage() != TX_STAGE_NONE)
266 "Function called inside transaction scope.");
269 template <
typename Hash>
270 using transparent_key_equal =
typename Hash::transparent_key_equal;
272 template <
typename Hash>
273 using has_transparent_key_equal = detail::supports<Hash, transparent_key_equal>;
275 template <
typename Hash,
typename Pred,
276 bool = has_transparent_key_equal<Hash>::value>
277 struct key_equal_type {
278 using type =
typename Hash::transparent_key_equal;
281 template <
typename Hash,
typename Pred>
282 struct key_equal_type<Hash, Pred, false> {
286 template <
typename Mutex,
typename ScopedLockType>
288 assert_not_locked(Mutex &mtx)
291 ScopedLockType scoped_lock;
292 assert(scoped_lock.try_acquire(mtx));
293 scoped_lock.release();
299 template <
typename Key,
typename T,
typename MutexType,
typename ScopedLockType>
300 struct hash_map_node {
302 using mutex_t = MutexType;
305 using scoped_t = ScopedLockType;
307 using value_type = std::pair<const Key, T>;
310 using node_ptr_t = detail::persistent_pool_ptr<
311 hash_map_node<Key, T, mutex_t, scoped_t>>;
322 hash_map_node() : next(OID_NULL)
326 hash_map_node(
const node_ptr_t &_next) : next(_next)
330 hash_map_node(node_ptr_t &&_next) : next(
std::move(_next))
334 hash_map_node(
const node_ptr_t &_next,
const Key &key)
335 : next(_next), item(key, T())
339 hash_map_node(
const node_ptr_t &_next,
const Key &key,
const T &t)
340 : next(_next), item(key, t)
344 hash_map_node(
const node_ptr_t &_next, value_type &&i)
345 : next(_next), item(
std::move(i))
349 template <
typename... Args>
350 hash_map_node(node_ptr_t &&_next, Args &&... args)
351 : next(
std::forward<node_ptr_t>(_next)),
352 item(
std::forward<Args>(args)...)
356 hash_map_node(
const node_ptr_t &_next,
const value_type &i)
357 : next(_next), item(i)
362 hash_map_node(
const hash_map_node &) =
delete;
365 hash_map_node &operator=(
const hash_map_node &) =
delete;
372 template <
typename Bucket>
373 class segment_traits {
376 using segment_index_t = size_t;
377 using size_type = size_t;
378 using bucket_type = Bucket;
382 constexpr
static size_type max_allocation_size = PMEMOBJ_MAX_ALLOC_SIZE;
385 constexpr
static segment_index_t first_big_block = 27;
390 constexpr
static size_type big_block_size = size_type(1)
394 static_assert((big_block_size *
sizeof(bucket_type)) <
396 "Block size exceeds max_allocation_size");
399 constexpr
static segment_index_t
400 first_block_in_segment(segment_index_t seg)
402 return seg < first_big_block
405 (segment_index_t(1) << (seg - first_big_block)) - 1);
409 constexpr
static size_type
410 blocks_in_segment(segment_index_t seg)
412 return seg < first_big_block
414 : segment_index_t(1) << (seg - first_big_block);
418 constexpr
static size_type
419 block_size(segment_index_t b)
421 return b < first_big_block ? segment_size(b ? b : 1)
427 constexpr
static segment_index_t embedded_segments = 1;
430 constexpr
static size_type embedded_buckets = 1 << embedded_segments;
433 constexpr
static segment_index_t number_of_segments = 32;
436 static const size_type first_block = 8;
439 constexpr
static segment_index_t
442 return first_block_in_segment(number_of_segments);
446 static segment_index_t
447 segment_index_of(size_type index)
449 return segment_index_t(detail::Log2(index | 1));
453 constexpr
static segment_index_t
454 segment_base(segment_index_t k)
456 return (segment_index_t(1) << k) & ~segment_index_t(1);
460 constexpr
static size_type
461 segment_size(segment_index_t k)
463 return size_type(1) << k;
466 embedded_segments < first_big_block,
467 "Number of embedded segments cannot exceed max_allocation_size");
486 template <
typename BlockTable,
typename SegmentTraits,
bool is_const>
487 class segment_facade_impl :
public SegmentTraits {
489 using traits_type = SegmentTraits;
490 using traits_type::block_size;
491 using traits_type::blocks_in_segment;
492 using traits_type::embedded_buckets;
493 using traits_type::embedded_segments;
494 using traits_type::first_block;
495 using traits_type::first_block_in_segment;
496 using traits_type::segment_base;
497 using traits_type::segment_size;
500 using table_reference =
501 typename std::conditional<is_const,
const BlockTable &,
504 using table_pointer =
505 typename std::conditional<is_const,
const BlockTable *,
508 using bucket_type =
typename traits_type::bucket_type;
509 using segment_index_t =
typename traits_type::segment_index_t;
510 using size_type =
typename traits_type::size_type;
513 segment_facade_impl(table_reference table, segment_index_t s)
514 : my_table(&table), my_seg(s)
516 assert(my_seg < traits_type::number_of_segments);
520 segment_facade_impl(
const segment_facade_impl &src)
521 : my_table(src.my_table), my_seg(src.my_seg)
525 segment_facade_impl(segment_facade_impl &&src) =
default;
528 segment_facade_impl &
529 operator=(
const segment_facade_impl &src)
531 my_table = src.my_table;
537 segment_facade_impl &
538 operator=(segment_facade_impl &&src)
540 my_table = src.my_table;
551 bucket_type &operator[](size_type i)
const 555 segment_index_t table_block = first_block_in_segment(my_seg);
556 size_type b_size = block_size(table_block);
558 table_block += i / b_size;
561 return (*my_table)[table_block][
static_cast<std::ptrdiff_t
>(i)];
567 segment_facade_impl &
580 segment_facade_impl tmp = *
this;
588 segment_facade_impl &
601 segment_facade_impl tmp = *
this;
609 segment_facade_impl &
619 segment_facade_impl &
632 return segment_facade_impl(*(this->my_table),
642 return segment_facade_impl(*(this->my_table),
652 assert(my_seg >= embedded_segments);
654 if (my_seg < first_block) {
655 enable_first_block(pop);
657 enable_big_segment(pop);
667 assert(my_seg >= embedded_segments);
669 if (my_seg < first_block) {
670 if (my_seg == embedded_segments) {
671 size_type sz = segment_size(first_block) -
673 delete_persistent<bucket_type[]>(
674 (*my_table)[my_seg], sz);
676 (*my_table)[my_seg] =
nullptr;
678 block_range blocks = segment_blocks(my_seg);
680 for (segment_index_t b = blocks.first;
681 b < blocks.second; ++b) {
682 if ((*my_table)[b] !=
nullptr) {
683 delete_persistent<bucket_type[]>(
684 (*my_table)[b], block_size(b));
685 (*my_table)[b] =
nullptr;
697 return segment_size(my_seg ? my_seg : 1);
708 block_range blocks = segment_blocks(my_seg);
710 for (segment_index_t b = blocks.first; b < blocks.second; ++b) {
711 if ((*my_table)[b] ==
nullptr)
719 using block_range = std::pair<segment_index_t, segment_index_t>;
725 segment_blocks(segment_index_t seg)
727 segment_index_t
begin = first_block_in_segment(seg);
729 return block_range(begin, begin + blocks_in_segment(seg));
735 assert(my_seg == embedded_segments);
740 segment_size(first_block) - embedded_buckets;
741 (*my_table)[my_seg] =
742 make_persistent<bucket_type[]>(sz);
745 (*my_table)[embedded_segments].
raw();
747 for (segment_index_t s = my_seg + 1; s < first_block;
750 static_cast<std::ptrdiff_t
>(
752 segment_base(my_seg));
754 (*my_table)[s] = (base + off).raw();
757 transaction::commit();
764 block_range blocks = segment_blocks(my_seg);
768 for (segment_index_t b = blocks.first;
769 b < blocks.second; ++b) {
770 assert((*my_table)[b] ==
nullptr);
771 (*my_table)[b] = make_persistent<bucket_type[]>(
775 transaction::commit();
780 table_pointer my_table;
783 segment_index_t my_seg;
792 template <
typename Key,
typename T,
typename MutexType,
typename ScopedLockType>
793 class hash_map_base {
795 using mutex_t = MutexType;
796 using scoped_t = ScopedLockType;
799 using size_type = size_t;
802 using hashcode_type = size_t;
805 using node = hash_map_node<Key, T, mutex_t, scoped_t>;
808 using node_ptr_t = detail::persistent_pool_ptr<node>;
812 using mutex_t = MutexType;
813 using scoped_t = ScopedLockType;
822 node_ptr_t node_list;
825 bucket() : node_list(nullptr)
827 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED 828 VALGRIND_HG_DISABLE_CHECKING(&rehashed,
831 rehashed.
get_rw() =
false;
840 is_rehashed(std::memory_order order)
842 return rehashed.
get_ro().load(order);
846 set_rehashed(std::memory_order order)
848 rehashed.
get_rw().store(
true, order);
852 bucket(
const bucket &) =
delete;
855 bucket &operator=(
const bucket &) =
delete;
859 using segment_traits_t = segment_traits<bucket>;
862 using segment_index_t =
typename segment_traits_t::segment_index_t;
865 static const size_type embedded_buckets =
866 segment_traits_t::embedded_buckets;
869 static const size_type first_block = segment_traits_t::first_block;
872 constexpr
static size_type block_table_size =
873 segment_traits_t::number_of_blocks();
882 using blocks_table_t = segment_ptr_t[block_table_size];
894 static constexpr features header_features = {0, 0};
903 features layout_features = (features)header_features;
907 std::aligned_storage<sizeof(size_t), sizeof(size_t)>::type
915 std::aligned_storage<32, 8>::type padding1;
921 blocks_table_t my_table;
929 std::aligned_storage<24, 8>::type padding2;
932 std::aligned_storage<64, 8>::type reserved;
935 segment_enable_mutex_t my_segment_enable_mutex;
938 bucket my_embedded_segment[embedded_buckets];
942 const std::atomic<hashcode_type> &
943 mask() const noexcept
948 std::atomic<hashcode_type> &
955 using const_segment_facade_t =
956 segment_facade_impl<blocks_table_t, segment_traits_t, true>;
959 using segment_facade_t =
960 segment_facade_impl<blocks_table_t, segment_traits_t, false>;
966 sizeof(size_type) ==
sizeof(std::atomic<size_type>),
967 "std::atomic should have the same layout as underlying integral type");
969 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED 970 VALGRIND_HG_DISABLE_CHECKING(&my_size,
sizeof(my_size));
971 VALGRIND_HG_DISABLE_CHECKING(&my_mask,
sizeof(my_mask));
975 PMEMoid oid = pmemobj_oid(
this);
977 assert(!OID_IS_NULL(oid));
979 my_pool_uuid = oid.pool_uuid_lo;
983 for (size_type i = 0; i < segment_traits_t::embedded_segments;
986 pmemobj_oid(my_embedded_segment +
987 segment_traits_t::segment_base(i));
988 segment_facade_t seg(my_table, i);
989 mark_rehashed<false>(pop, seg);
999 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED 1000 VALGRIND_HG_DISABLE_CHECKING(&my_size,
sizeof(my_size));
1001 VALGRIND_HG_DISABLE_CHECKING(&my_mask,
sizeof(my_mask));
1003 #if LIBPMEMOBJ_CPP_VG_PMEMCHECK_ENABLED 1004 VALGRIND_PMC_REMOVE_PMEM_MAPPING(&my_mask,
sizeof(my_mask));
1007 hashcode_type m = embedded_buckets - 1;
1009 const_segment_facade_t segment(
1010 my_table, segment_traits_t::embedded_segments);
1012 while (segment.is_valid()) {
1013 m += segment.size();
1017 mask().store(m, std::memory_order_relaxed);
1021 restore_size(size_type actual_size)
1023 my_size.
get_rw().store(actual_size, std::memory_order_relaxed);
1031 template <
bool Flush = true>
1033 mark_rehashed(
pool_base &pop, segment_facade_t &segment)
1035 for (size_type i = 0; i < segment.size(); ++i) {
1036 bucket *b = &(segment[i]);
1038 assert_not_locked<mutex_t, scoped_t>(b->mutex);
1040 b->set_rehashed(std::memory_order_relaxed);
1045 for (size_type i = 0; i < segment.size(); ++i) {
1046 bucket *b = &(segment[i]);
1047 pop.
flush(b->rehashed);
1058 enable_segment(segment_index_t k,
bool is_initial =
false)
1065 if (k >= first_block) {
1066 segment_facade_t new_segment(my_table, k);
1068 sz = new_segment.size();
1069 if (!new_segment.is_valid())
1070 new_segment.enable(pop);
1073 mark_rehashed(pop, new_segment);
1080 assert(k == segment_traits_t::embedded_segments);
1082 for (segment_index_t i = k; i < first_block; ++i) {
1083 segment_facade_t new_segment(my_table, i);
1085 if (!new_segment.is_valid())
1086 new_segment.enable(pop);
1089 mark_rehashed(pop, new_segment);
1093 sz = segment_traits_t::segment_size(first_block);
1095 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED 1096 ANNOTATE_HAPPENS_BEFORE(&my_mask);
1098 mask().store(sz - 1, std::memory_order_release);
1106 get_bucket(hashcode_type h)
const 1108 segment_index_t s = segment_traits_t::segment_index_of(h);
1110 h -= segment_traits_t::segment_base(s);
1112 const_segment_facade_t segment(my_table, s);
1114 assert(segment.is_valid());
1116 return &(segment[h]);
1123 check_mask_race(hashcode_type h, hashcode_type &m)
const 1125 hashcode_type m_now, m_old = m;
1127 m_now = mask().load(std::memory_order_acquire);
1128 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED 1129 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
1133 return check_rehashing_collision(h, m_old, m = m_now);
1142 check_rehashing_collision(hashcode_type h, hashcode_type m_old,
1143 hashcode_type m)
const 1147 if ((h & m_old) != (h & m)) {
1152 for (++m_old; !(h & m_old); m_old <<= 1)
1155 m_old = (m_old << 1) - 1;
1157 assert((m_old & (m_old + 1)) == 0 && m_old <= m);
1160 bucket *b = get_bucket(h & m_old);
1161 return b->is_rehashed(std::memory_order_acquire);
1172 template <
typename Node,
typename... Args>
1174 insert_new_node(bucket *b, detail::persistent_pool_ptr<Node> &new_node,
1180 new_node = pmem::obj::make_persistent<Node>(
1181 b->node_list, std::forward<Args>(args)...);
1182 b->node_list = new_node;
1187 size_t sz = ++(my_size.
get_rw());
1188 pop.
persist(&my_size,
sizeof(my_size));
1198 check_growth(hashcode_type m, size_type sz)
1201 segment_index_t new_seg =
1202 static_cast<segment_index_t
>(detail::Log2(
1206 assert(segment_facade_t(my_table, new_seg - 1)
1209 std::unique_lock<segment_enable_mutex_t> lock(
1210 my_segment_enable_mutex, std::try_to_lock);
1213 if (mask().load(std::memory_order_relaxed) ==
1217 enable_segment(new_seg);
1231 reserve(size_type buckets)
1238 bool is_initial = (my_size.
get_ro() == 0);
1240 for (size_type m = mask(); buckets > m; m = mask())
1242 segment_traits_t::segment_index_of(m + 1),
1251 internal_swap(hash_map_base<Key, T, mutex_t, scoped_t> &table)
1257 this->my_pool_uuid.
swap(table.my_pool_uuid);
1259 this->mask() = table.mask().exchange(
1260 this->mask(), std::memory_order_relaxed);
1264 table.my_size.get_rw().exchange(
1266 std::memory_order_relaxed);
1268 for (size_type i = 0; i < embedded_buckets; ++i)
1269 this->my_embedded_segment[i].node_list.swap(
1270 table.my_embedded_segment[i].node_list);
1272 for (size_type i = segment_traits_t::embedded_segments;
1273 i < block_table_size; ++i)
1274 this->my_table[i].
swap(table.my_table[i]);
1276 transaction::commit();
1288 pmemobj_pool_by_oid(PMEMoid{my_pool_uuid, 0});
1299 template <
typename Container,
bool is_const>
1300 class hash_map_iterator {
1302 using iterator_category = std::forward_iterator_tag;
1303 using difference_type = ptrdiff_t;
1304 using map_type = Container;
1305 using value_type =
typename map_type::value_type;
1306 using node =
typename map_type::node;
1307 using bucket =
typename map_type::bucket;
1308 using map_ptr =
typename std::conditional<is_const,
const map_type *,
1311 typename std::conditional<is_const,
1312 typename map_type::const_reference,
1313 typename map_type::reference>::type;
1315 typename std::conditional<is_const,
1316 typename map_type::const_pointer,
1317 typename map_type::pointer>::type;
1319 template <
typename C,
bool M,
bool U>
1320 friend bool operator==(
const hash_map_iterator<C, M> &i,
1321 const hash_map_iterator<C, U> &j);
1323 template <
typename C,
bool M,
bool U>
1324 friend bool operator!=(
const hash_map_iterator<C, M> &i,
1325 const hash_map_iterator<C, U> &j);
1327 friend class hash_map_iterator<map_type, true>;
1329 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER) 1331 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
1332 typename MutexType,
typename ScopedLockType>
1333 friend class ::pmem::obj::concurrent_hash_map;
1337 hash_map_iterator(map_ptr map,
size_t index)
1338 : my_map(map), my_index(index), my_bucket(nullptr), my_node(nullptr)
1340 if (my_index <= my_map->mask()) {
1342 my_bucket = acc.get();
1343 my_node =
static_cast<node *
>(
1344 my_bucket->node_list.get(my_map->my_pool_uuid));
1347 advance_to_next_bucket();
1354 hash_map_iterator() =
default;
1357 hash_map_iterator(
const hash_map_iterator &other)
1358 : my_map(other.my_map),
1359 my_index(other.my_index),
1360 my_bucket(other.my_bucket),
1361 my_node(other.my_node)
1366 template <
typename U = void,
1367 typename =
typename std::enable_if<is_const, U>::type>
1368 hash_map_iterator(
const hash_map_iterator<map_type, false> &other)
1369 : my_map(other.my_map),
1370 my_index(other.my_index),
1371 my_bucket(other.my_bucket),
1372 my_node(other.my_node)
1376 hash_map_iterator &operator=(
const hash_map_iterator &it) =
default;
1379 reference operator*()
const 1382 return my_node->item;
1386 pointer operator->()
const 1388 return &operator*();
1395 my_node =
static_cast<node *
>(
1396 my_node->next.get((my_map->my_pool_uuid)));
1399 advance_to_next_bucket();
1408 hash_map_iterator old(*
this);
1415 map_ptr my_map =
nullptr;
1418 size_t my_index = 0;
1421 bucket *my_bucket =
nullptr;
1424 node *my_node =
nullptr;
1430 my_bucket = m->get_bucket(index);
1444 advance_to_next_bucket()
1446 size_t k = my_index + 1;
1450 while (k <= my_map->mask()) {
1452 my_bucket = acc.get();
1454 if (my_bucket->node_list) {
1455 my_node =
static_cast<node *
>(
1456 my_bucket->node_list.get(
1457 my_map->my_pool_uuid));
1473 template <
typename Container,
bool M,
bool U>
1475 operator==(
const hash_map_iterator<Container, M> &i,
1476 const hash_map_iterator<Container, U> &j)
1478 return i.my_node == j.my_node && i.my_map == j.my_map;
1481 template <
typename Container,
bool M,
bool U>
1483 operator!=(
const hash_map_iterator<Container, M> &i,
1484 const hash_map_iterator<Container, U> &j)
1486 return i.my_node != j.my_node || i.my_map != j.my_map;
1529 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
1530 typename MutexType,
typename ScopedLockType>
1532 :
protected concurrent_hash_map_internal::hash_map_base<Key, T, MutexType,
1534 template <
typename Container,
bool is_const>
1535 friend class concurrent_hash_map_internal::hash_map_iterator;
1538 using size_type =
typename concurrent_hash_map_internal::hash_map_base<
1539 Key, T, MutexType, ScopedLockType>::size_type;
1540 using hashcode_type =
1541 typename concurrent_hash_map_internal::hash_map_base<
1542 Key, T, MutexType, ScopedLockType>::hashcode_type;
1543 using key_type = Key;
1544 using mapped_type = T;
1545 using value_type =
typename concurrent_hash_map_internal::hash_map_base<
1546 Key, T, MutexType, ScopedLockType>::node::value_type;
1547 using difference_type = ptrdiff_t;
1548 using pointer = value_type *;
1549 using const_pointer =
const value_type *;
1550 using reference = value_type &;
1551 using const_reference =
const value_type &;
1552 using iterator = concurrent_hash_map_internal::hash_map_iterator<
1554 using const_iterator = concurrent_hash_map_internal::hash_map_iterator<
1555 concurrent_hash_map,
true>;
1556 using hasher = Hash;
1557 using key_equal =
typename concurrent_hash_map_internal::key_equal_type<
1558 Hash, KeyEqual>::type;
1561 using mutex_t = MutexType;
1562 using scoped_t = ScopedLockType;
1566 using hash_map_base =
1567 concurrent_hash_map_internal::hash_map_base<Key, T, mutex_t,
1569 using hash_map_base::calculate_mask;
1570 using hash_map_base::check_growth;
1571 using hash_map_base::check_mask_race;
1572 using hash_map_base::embedded_buckets;
1573 using hash_map_base::get_bucket;
1574 using hash_map_base::get_pool_base;
1575 using hash_map_base::header_features;
1576 using hash_map_base::insert_new_node;
1577 using hash_map_base::internal_swap;
1578 using hash_map_base::layout_features;
1579 using hash_map_base::mask;
1580 using hash_map_base::reserve;
1581 using node =
typename hash_map_base::node;
1582 using node_mutex_t =
typename node::mutex_t;
1583 using node_ptr_t =
typename hash_map_base::node_ptr_t;
1584 using bucket =
typename hash_map_base::bucket;
1585 using bucket_lock_type =
typename bucket::scoped_t;
1586 using segment_index_t =
typename hash_map_base::segment_index_t;
1587 using segment_traits_t =
typename hash_map_base::segment_traits_t;
1588 using segment_facade_t =
typename hash_map_base::segment_facade_t;
1589 using scoped_lock_traits_type =
1590 concurrent_hash_map_internal::scoped_lock_traits<scoped_t>;
1593 using persistent_node_ptr_t = detail::persistent_pool_ptr<node>;
1596 delete_node(
const node_ptr_t &n)
1598 delete_persistent<node>(
1599 detail::static_persistent_pool_pointer_cast<node>(n)
1600 .get_persistent_ptr(this->my_pool_uuid));
1603 template <
typename K>
1604 persistent_node_ptr_t
1605 search_bucket(
const K &key, bucket *b)
const 1607 assert(b->is_rehashed(std::memory_order_relaxed));
1609 persistent_node_ptr_t n =
1610 detail::static_persistent_pool_pointer_cast<node>(
1615 n.get(this->my_pool_uuid)->item.first)) {
1616 n = detail::static_persistent_pool_pointer_cast<node>(
1617 n.get(this->my_pool_uuid)->next);
1632 const hashcode_type h,
bool writer =
false)
1634 acquire(base, h, writer);
1642 acquire(concurrent_hash_map *base,
const hashcode_type h,
1643 bool writer =
false)
1645 my_b = base->get_bucket(h);
1647 if (my_b->is_rehashed(std::memory_order_acquire) ==
1649 bucket_lock_type::try_acquire(this->my_b->mutex,
1651 if (my_b->is_rehashed(
1652 std::memory_order_relaxed) ==
1655 base->rehash_bucket<
false>(my_b, h);
1658 bucket_lock_type::acquire(my_b->mutex, writer);
1661 assert(my_b->is_rehashed(std::memory_order_relaxed));
1670 return bucket_lock_type::is_writer;
1701 const hashcode_type h,
1702 bool writer =
false)
1704 acquire(base, h, writer);
1711 acquire(concurrent_hash_map *base,
const hashcode_type h,
1712 bool writer =
false)
1714 my_b = base->get_bucket(h);
1716 if (my_b->is_rehashed(std::memory_order_relaxed) ==
1719 base->rehash_bucket<
true>(my_b, h);
1722 assert(my_b->is_rehashed(std::memory_order_relaxed));
1758 get_hash_code(node_ptr_t &n)
1761 detail::static_persistent_pool_pointer_cast<node>(n)(
1766 template <
bool serial>
1768 rehash_bucket(bucket *b_new,
const hashcode_type h)
1770 using accessor_type =
typename std::conditional<
1773 using scoped_lock_traits_type =
1774 concurrent_hash_map_internal::scoped_lock_traits<
1781 node_ptr_t *p_new = &(b_new->node_list);
1782 bool restore_after_crash = *p_new !=
nullptr;
1785 hashcode_type mask = (1u << detail::Log2(h)) - 1;
1786 assert((h & mask) < h);
1787 accessor_type b_old(
1789 scoped_lock_traits_type::initial_rw_state(
true));
1792 mask = (mask << 1) | 1;
1793 assert((mask & (mask + 1)) == 0 && (h & mask) == h);
1795 for (node_ptr_t *p_old = &(b_old->node_list), n = *p_old; n;
1797 hashcode_type c = get_hash_code(n);
1799 hashcode_type bmask = h & (mask >> 1);
1803 : (1u << (detail::Log2(bmask) + 1)) - 1;
1805 assert((c & bmask) == (h & bmask));
1808 if ((c & mask) == h) {
1809 if (!b_old.is_writer() &&
1810 !scoped_lock_traits_type::upgrade_to_writer(
1817 if (restore_after_crash) {
1818 while (*p_new !=
nullptr &&
1819 (mask & get_hash_code(*p_new)) ==
1828 restore_after_crash =
false;
1833 pop.
persist(p_new,
sizeof(*p_new));
1836 *p_old = n(this->my_pool_uuid)->next;
1837 pop.
persist(p_old,
sizeof(*p_old));
1839 p_new = &(n(this->my_pool_uuid)->next);
1842 p_old = &(n(this->my_pool_uuid)->next);
1846 if (restore_after_crash) {
1847 while (*p_new !=
nullptr &&
1848 (mask & get_hash_code(*p_new)) == h)
1849 p_new = &((*p_new)(this->my_pool_uuid)->next);
1853 pop.
persist(p_new,
sizeof(*p_new));
1856 b_new->set_rehashed(std::memory_order_release);
1863 if (layout_features.incompat != header_features.incompat)
1865 "Incompat flags mismatch, for more details go to: https://pmem.io/pmdk/cpp_obj/ \n");
1874 :
protected node::scoped_t {
1875 friend class concurrent_hash_map<Key, T, Hash, KeyEqual,
1879 using node::scoped_t::try_acquire;
1886 const typename concurrent_hash_map::value_type;
1907 concurrent_hash_map_internal::check_outside_tx();
1910 node::scoped_t::release();
1922 return my_node->item;
1930 return &operator*();
1940 concurrent_hash_map_internal::check_outside_tx();
1955 hashcode_type my_hash;
1970 assert(this->my_node);
1972 return this->my_node->item;
1978 return &operator*();
1987 runtime_initialize(
true);
1996 runtime_initialize(
true);
2006 runtime_initialize(
true);
2008 reserve(table.
size());
2010 internal_copy(table);
2018 runtime_initialize(
true);
2026 template <
typename I>
2029 runtime_initialize(
true);
2031 reserve(static_cast<size_type>(std::distance(first, last)));
2033 internal_copy(first, last);
2041 runtime_initialize(
true);
2045 internal_copy(il.begin(), il.end());
2063 if (!graceful_shutdown) {
2065 std::distance(this->begin(), this->
end());
2066 assert(actual_size >= 0);
2067 this->restore_size(size_type(actual_size));
2069 assert(this->size() ==
2070 size_type(std::distance(this->begin(),
2086 concurrent_hash_map &
2089 if (
this != &table) {
2091 internal_copy(table);
2108 concurrent_hash_map &
2115 internal_copy(il.begin(), il.end());
2128 void rehash(size_type n = 0);
2159 return iterator(
this, 0);
2169 return iterator(
this, mask() + 1);
2179 return const_iterator(
this, 0);
2189 return const_iterator(
this, mask() + 1);
2198 return this->my_size.
get_ro();
2207 return this->my_size.
get_ro() == 0;
2216 return (~size_type(0)) /
sizeof(node);
2231 void swap(concurrent_hash_map &table);
2245 concurrent_hash_map_internal::check_outside_tx();
2247 return const_cast<concurrent_hash_map *
>(
this)->internal_find(
2248 key,
nullptr,
false);
2262 template <
typename K,
2263 typename =
typename std::enable_if<
2264 concurrent_hash_map_internal::
2265 has_transparent_key_equal<hasher>::value,
2270 concurrent_hash_map_internal::check_outside_tx();
2272 return const_cast<concurrent_hash_map *
>(
this)->internal_find(
2273 key,
nullptr,
false);
2285 concurrent_hash_map_internal::check_outside_tx();
2289 return const_cast<concurrent_hash_map *
>(
this)->internal_find(
2290 key, &result,
false);
2306 template <
typename K,
2307 typename =
typename std::enable_if<
2308 concurrent_hash_map_internal::
2309 has_transparent_key_equal<hasher>::value,
2314 concurrent_hash_map_internal::check_outside_tx();
2318 return const_cast<concurrent_hash_map *
>(
this)->internal_find(
2319 key, &result,
false);
2331 concurrent_hash_map_internal::check_outside_tx();
2335 return internal_find(key, &result,
true);
2351 template <
typename K,
2352 typename =
typename std::enable_if<
2353 concurrent_hash_map_internal::
2354 has_transparent_key_equal<hasher>::value,
2359 concurrent_hash_map_internal::check_outside_tx();
2363 return internal_find(key, &result,
true);
2375 concurrent_hash_map_internal::check_outside_tx();
2379 return internal_insert(key, &result,
false, key);
2392 concurrent_hash_map_internal::check_outside_tx();
2396 return internal_insert(key, &result,
true, key);
2409 concurrent_hash_map_internal::check_outside_tx();
2413 return internal_insert(value.first, &result,
false, value);
2426 concurrent_hash_map_internal::check_outside_tx();
2430 return internal_insert(value.first, &result,
true, value);
2442 concurrent_hash_map_internal::check_outside_tx();
2444 return internal_insert(value.first,
nullptr,
false, value);
2457 concurrent_hash_map_internal::check_outside_tx();
2461 return internal_insert(value.first, &result,
false,
2475 concurrent_hash_map_internal::check_outside_tx();
2479 return internal_insert(value.first, &result,
true,
2492 concurrent_hash_map_internal::check_outside_tx();
2494 return internal_insert(value.first,
nullptr,
false,
2503 template <
typename I>
2507 concurrent_hash_map_internal::check_outside_tx();
2509 for (; first != last; ++first)
2521 concurrent_hash_map_internal::check_outside_tx();
2523 insert(il.begin(), il.end());
2537 concurrent_hash_map_internal::check_outside_tx();
2539 return internal_erase(key);
2556 template <
typename K,
2557 typename =
typename std::enable_if<
2558 concurrent_hash_map_internal::
2559 has_transparent_key_equal<hasher>::value,
2564 concurrent_hash_map_internal::check_outside_tx();
2566 return internal_erase(key);
2576 bool try_acquire_item(
const_accessor *result, node_mutex_t &mutex,
2579 template <
typename K>
2580 bool internal_find(
const K &key,
const_accessor *result,
bool write);
2582 template <
typename K,
typename... Args>
2583 bool internal_insert(
const K &key,
const_accessor *result,
bool write,
2587 template <
bool Bucket_rw_lock,
typename K>
2588 persistent_node_ptr_t
2592 auto n = search_bucket(key, b.
get());
2596 !scoped_lock_traits_type::upgrade_to_writer(b)) {
2600 n = search_bucket(key, b.
get());
2603 scoped_lock_traits_type::
2604 downgrade_to_reader(b);
2613 template <
typename K>
2614 bool internal_erase(
const K &key);
2616 void clear_segment(segment_index_t s);
2621 void internal_copy(
const concurrent_hash_map &source);
2623 template <
typename I>
2624 void internal_copy(I first, I last);
2628 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2629 typename MutexType,
typename ScopedLockType>
2631 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2633 node_mutex_t &mutex,
2637 if (!result->try_acquire(mutex, write)) {
2638 for (detail::atomic_backoff backoff(
true);;) {
2639 if (result->try_acquire(mutex, write))
2642 if (!backoff.bounded_pause())
2650 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2651 typename MutexType,
typename ScopedLockType>
2652 template <
typename K>
2654 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2655 ScopedLockType>::internal_find(
const K &key,
2659 assert(!result || !result->my_node);
2661 hashcode_type m = mask().load(std::memory_order_acquire);
2662 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED 2663 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2666 assert((m & (m + 1)) == 0);
2668 hashcode_type
const h = hasher{}(key);
2670 persistent_node_ptr_t node;
2676 scoped_lock_traits_type::initial_rw_state(
false));
2677 node = get_node<false>(key, b);
2681 if (check_mask_race(h, m)) {
2692 result, node.get(this->my_pool_uuid)->mutex, write))
2699 std::this_thread::yield();
2701 m = mask().load(std::memory_order_acquire);
2702 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED 2703 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2708 result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
2709 result->my_hash = h;
2715 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2716 typename MutexType,
typename ScopedLockType>
2717 template <
typename K,
typename... Args>
2719 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2720 ScopedLockType>::internal_insert(
const K &key,
2725 assert(!result || !result->my_node);
2727 hashcode_type m = mask().load(std::memory_order_acquire);
2728 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED 2729 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2732 assert((m & (m + 1)) == 0);
2734 hashcode_type
const h = hasher{}(key);
2736 persistent_node_ptr_t node;
2737 size_t new_size = 0;
2738 bool inserted =
false;
2744 scoped_lock_traits_type::initial_rw_state(
true));
2745 node = get_node<true>(key, b);
2749 if (check_mask_race(h, m)) {
2755 new_size = insert_new_node(b.
get(), node,
2756 std::forward<Args>(args)...);
2763 result, node.get(this->my_pool_uuid)->mutex, write))
2770 std::this_thread::yield();
2772 m = mask().load(std::memory_order_acquire);
2773 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED 2774 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2779 result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
2780 result->my_hash = h;
2783 check_growth(m, new_size);
2788 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2789 typename MutexType,
typename ScopedLockType>
2790 template <
typename K>
2792 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2793 ScopedLockType>::internal_erase(
const K &key)
2796 hashcode_type
const h = hasher{}(key);
2797 hashcode_type m = mask().load(std::memory_order_acquire);
2798 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED 2799 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2808 scoped_lock_traits_type::initial_rw_state(
true));
2811 node_ptr_t *p = &b->node_list;
2816 detail::static_persistent_pool_pointer_cast<node>(
2817 n)(this->my_pool_uuid)
2819 p = &n(this->my_pool_uuid)->next;
2825 if (check_mask_race(h, m))
2830 !scoped_lock_traits_type::upgrade_to_writer(b)) {
2831 if (check_mask_race(h, m))
2849 typename node::scoped_t item_locker(del->mutex,
2857 transaction::commit();
2860 --(this->my_size.
get_rw());
2867 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2868 typename MutexType,
typename ScopedLockType>
2873 internal_swap(table);
2876 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2877 typename MutexType,
typename ScopedLockType>
2882 concurrent_hash_map_internal::check_outside_tx();
2885 hashcode_type m = mask();
2889 hashcode_type b = (m + 1) >> 1;
2892 assert((b & (b - 1)) == 0);
2894 for (; b <= m; ++b) {
2895 bucket *bp = get_bucket(b);
2897 concurrent_hash_map_internal::assert_not_locked<mutex_t,
2901 if (bp->is_rehashed(std::memory_order_relaxed) ==
false)
2902 rehash_bucket<true>(bp, b);
2906 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2907 typename MutexType,
typename ScopedLockType>
2911 hashcode_type m = mask();
2913 assert((m & (m + 1)) == 0);
2917 for (segment_index_t b = 0; b <= m; ++b) {
2918 bucket *bp = get_bucket(b);
2919 concurrent_hash_map_internal::assert_not_locked<mutex_t,
2930 this->my_size.
get_rw() = 0;
2931 segment_index_t s = segment_traits_t::segment_index_of(m);
2933 assert(s + 1 == this->block_table_size ||
2934 !segment_facade_t(this->my_table, s + 1).is_valid());
2940 mask().store(embedded_buckets - 1, std::memory_order_relaxed);
2942 transaction::commit();
2946 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2947 typename MutexType,
typename ScopedLockType>
2949 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2950 ScopedLockType>::clear_segment(segment_index_t s)
2952 segment_facade_t segment(this->my_table, s);
2954 assert(segment.is_valid());
2956 size_type sz = segment.size();
2957 for (segment_index_t i = 0; i < sz; ++i) {
2958 for (node_ptr_t n = segment[i].node_list; n;
2959 n = segment[i].node_list) {
2960 segment[i].node_list = n(this->my_pool_uuid)->next;
2965 if (s >= segment_traits_t::embedded_segments)
2969 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2970 typename MutexType,
typename ScopedLockType>
2975 reserve(source.my_size.get_ro());
2976 internal_copy(source.
begin(), source.
end());
2979 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2980 typename MutexType,
typename ScopedLockType>
2981 template <
typename I>
2983 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2984 ScopedLockType>::internal_copy(I first, I last)
2986 hashcode_type m = mask();
2988 for (; first != last; ++first) {
2989 hashcode_type h = hasher{}(first->first);
2990 bucket *b = get_bucket(h & m);
2992 assert(b->is_rehashed(std::memory_order_relaxed));
2994 detail::persistent_pool_ptr<node> p;
2995 insert_new_node(b, p, *first);
2999 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3000 typename MutexType,
typename ScopedLockType>
3002 operator==(
const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3004 const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3007 if (a.size() != b.size())
3010 typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3011 ScopedLockType>::const_iterator
3015 typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3016 ScopedLockType>::const_iterator j,
3019 for (; i != i_end; ++i) {
3020 j = b.equal_range(i->first).first;
3022 if (j == j_end || !(i->second == j->second))
3029 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3030 typename MutexType,
typename ScopedLockType>
3032 operator!=(
const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3034 const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3040 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3041 typename MutexType,
typename ScopedLockType>
bool operator==(standard_alloc_policy< T > const &, standard_alloc_policy< T2 > const &)
Determines if memory from another allocator can be deallocated from this one.
Definition: allocator.hpp:402
bool find(const_accessor &result, const Key &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2283
concurrent_hash_map(concurrent_hash_map &&table)
Move constructor.
Definition: concurrent_hash_map.hpp:2016
p< T > & operator+=(p< T > &lhs, const p< Y > &rhs)
Addition assignment operator overload.
Definition: pext.hpp:123
const typename concurrent_hash_map::value_type value_type
Type of value.
Definition: concurrent_hash_map.hpp:1886
p< T > & operator--(p< T > &pp)
Prefix decrement operator overload.
Definition: pext.hpp:88
void swap(pmem::obj::array< T, N > &lhs, pmem::obj::array< T, N > &rhs)
Non-member swap function.
Definition: array.hpp:913
bool erase(const K &key)
Remove element with corresponding key.
Definition: concurrent_hash_map.hpp:2562
bool insert(accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a write lock on the item...
Definition: concurrent_hash_map.hpp:2424
Persistent pointer class.
Definition: common.hpp:125
Persistent_ptr transactional allocation functions for objects.
Combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.hpp:1873
T & get_rw()
Retrieves read-write reference of the object.
Definition: p.hpp:142
bool erase(const Key &key)
Remove element with corresponding key.
Definition: concurrent_hash_map.hpp:2535
The non-template pool base class.
Definition: pool.hpp:67
void acquire(concurrent_hash_map *base, const hashcode_type h, bool writer=false)
Find a bucket by masked hashcode, optionally rehash, and acquire the lock.
Definition: concurrent_hash_map.hpp:1642
bool is_writer() const
Check whether bucket is locked for write.
Definition: concurrent_hash_map.hpp:1668
bool find(const_accessor &result, const K &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2312
iterator end()
Definition: concurrent_hash_map.hpp:2167
const_pointer operator->() const
Definition: concurrent_hash_map.hpp:1928
const_iterator begin() const
Definition: concurrent_hash_map.hpp:2177
A persistent version of concurrent hash map implementation Ref: https://arxiv.org/abs/1509.02235.
Definition: concurrent_hash_map.hpp:65
concurrent_hash_map(I first, I last)
Construction table with copying iteration range.
Definition: concurrent_hash_map.hpp:2027
size_type size() const
Definition: concurrent_hash_map.hpp:2196
iterator begin()
Definition: concurrent_hash_map.hpp:2157
const_accessor()
Create empty result.
Definition: concurrent_hash_map.hpp:1938
bool insert(const value_type &value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.hpp:2440
C++ manual scope transaction class.
Definition: transaction.hpp:92
Resides on pmem property template.
C++ pmemobj transactions.
concurrent_hash_map(std::initializer_list< value_type > il)
Construct table with initializer list.
Definition: concurrent_hash_map.hpp:2039
persistent_ptr< T > operator+(persistent_ptr< T > const &lhs, std::ptrdiff_t s)
Addition operator for persistent pointers.
Definition: persistent_ptr.hpp:607
bool insert(const_accessor &result, const Key &key)
Insert item (if not already present) and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2373
void swap(concurrent_hash_map &table)
Swap two instances.
Definition: concurrent_hash_map.hpp:2870
bool insert(accessor &result, const Key &key)
Insert item (if not already present) and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2390
Commonly used functionality.
pointer operator->() const
Return pointer to associated value in hash table.
Definition: concurrent_hash_map.hpp:1976
concurrent_hash_map & operator=(std::initializer_list< value_type > il)
Assignment Not thread safe.
Definition: concurrent_hash_map.hpp:2109
bool empty() const
Definition: concurrent_hash_map.hpp:2205
void insert(I first, I last)
Insert range [first, last)
Definition: concurrent_hash_map.hpp:2505
bucket * operator->() const
Overloaded arrow operator.
Definition: concurrent_hash_map.hpp:1687
~const_accessor()
Destroy result after releasing the underlying reference.
Definition: concurrent_hash_map.hpp:1946
void persist(const void *addr, size_t len) noexcept
Performs persist operation on a given chunk of memory.
Definition: pool.hpp:288
Persistent memory resident mutex implementation.
Definition: mutex.hpp:60
const_iterator end() const
Definition: concurrent_hash_map.hpp:2187
void release()
Release accessor.
Definition: concurrent_hash_map.hpp:1905
Custom layout error class.
Definition: pexceptions.hpp:205
pmem::obj::array< T, N >::iterator end(pmem::obj::array< T, N > &a)
Non-member end.
Definition: array.hpp:853
const T & get_ro() const noexcept
Retrieves read-only const reference of the object.
Definition: p.hpp:157
bucket * get() const
Get bucket pointer.
Definition: concurrent_hash_map.hpp:1678
pmem::obj::array< T, N >::iterator begin(pmem::obj::array< T, N > &a)
Non-member begin.
Definition: array.hpp:833
void swap(p &other)
Swaps two p objects of the same type.
Definition: p.hpp:169
bool empty() const
Definition: concurrent_hash_map.hpp:1893
persistent_ptr< T > operator-(persistent_ptr< T > const &lhs, std::ptrdiff_t s)
Subtraction operator for persistent pointers.
Definition: persistent_ptr.hpp:621
bool insert(const_accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a read lock on the item...
Definition: concurrent_hash_map.hpp:2407
bool is_writer() const
This method is added for consistency with bucket_accessor class.
Definition: concurrent_hash_map.hpp:1732
concurrent_hash_map & operator=(const concurrent_hash_map &table)
Assignment Not thread safe.
Definition: concurrent_hash_map.hpp:2087
bool find(accessor &result, const Key &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2329
bool insert(value_type &&value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.hpp:2490
Serial bucket accessor used to access bucket in a serial operations.
Definition: concurrent_hash_map.hpp:1696
size_type bucket_count() const
Definition: concurrent_hash_map.hpp:2223
concurrent_hash_map(size_type n)
Construct empty table with n preallocated buckets.
Definition: concurrent_hash_map.hpp:1994
Commonly used SFINAE helpers.
Persistent memory aware implementation of Intel TBB concurrent_hash_map.
Definition: concurrent_hash_map.hpp:255
p< T > & operator-=(p< T > &lhs, const p< Y > &rhs)
Subtraction assignment operator overload.
Definition: pext.hpp:145
Persistent smart pointer.
const PMEMoid & raw() const noexcept
Get PMEMoid encapsulated by this object.
Definition: persistent_ptr_base.hpp:304
size_type count(const Key &key) const
Definition: concurrent_hash_map.hpp:2243
bool insert(accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a write lock on the item...
Definition: concurrent_hash_map.hpp:2473
bool insert(const_accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a read lock on the item...
Definition: concurrent_hash_map.hpp:2455
size_type max_size() const
Upper bound on size.
Definition: concurrent_hash_map.hpp:2214
void runtime_initialize(bool graceful_shutdown=false)
Intialize persistent concurrent hash map after process restart.
Definition: concurrent_hash_map.hpp:2057
Resides on pmem class.
Definition: p.hpp:64
~concurrent_hash_map()
Clear table and destroy it.
Definition: concurrent_hash_map.hpp:2141
bool operator!=(const allocator< T, P, Tr > &lhs, const OtherAllocator &rhs)
Determines if memory from another allocator can be deallocated from this one.
Definition: allocator.hpp:518
Custom transaction error class.
Definition: pexceptions.hpp:185
bucket * operator->() const
Overloaded arrow operator.
Definition: concurrent_hash_map.hpp:1751
A persistent version of concurrent hash map implementation Ref: https://arxiv.org/abs/1509.02235.
Definition: allocation_flag.hpp:43
const_reference operator*() const
Definition: concurrent_hash_map.hpp:1918
void drain(void) noexcept
Performs drain operation.
Definition: pool.hpp:357
void flush(const void *addr, size_t len) noexcept
Performs flush operation on a given chunk of memory.
Definition: pool.hpp:324
p< T > & operator++(p< T > &pp)
Prefix increment operator overload.
Definition: pext.hpp:77
concurrent_hash_map()
Construct empty table.
Definition: concurrent_hash_map.hpp:1985
void insert(std::initializer_list< value_type > il)
Insert initializer list.
Definition: concurrent_hash_map.hpp:2519
Bucket accessor is to find, rehash, acquire a lock, and access a bucket.
Definition: concurrent_hash_map.hpp:1627
concurrent_hash_map(const concurrent_hash_map &table)
Copy constructor.
Definition: concurrent_hash_map.hpp:2004
bool find(accessor &result, const K &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2357
size_type count(const K &key) const
This overload only participates in overload resolution if the qualified-id Hash::transparent_key_equa...
Definition: concurrent_hash_map.hpp:2268
reference operator*() const
Return reference to associated value in hash table.
Definition: concurrent_hash_map.hpp:1968
static void run(pool_base &pool, std::function< void()> tx, Locks &... locks)
Execute a closure-like transaction and lock locks.
Definition: transaction.hpp:403
Pmem-resident shared mutex.
Allows write access to elements and combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.hpp:1962