PMDK C++ bindings  1.8
This is the C++ bindings documentation for PMDK's libpmemobj.
concurrent_hash_map.hpp
1 /*
2  * Copyright 2019, Intel Corporation
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *
8  * * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  *
11  * * Redistributions in binary form must reproduce the above copyright
12  * notice, this list of conditions and the following disclaimer in
13  * the documentation and/or other materials provided with the
14  * distribution.
15  *
16  * * Neither the name of the copyright holder nor the names of its
17  * contributors may be used to endorse or promote products derived
18  * from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
38 #ifndef PMEMOBJ_CONCURRENT_HASH_MAP_HPP
39 #define PMEMOBJ_CONCURRENT_HASH_MAP_HPP
40 
41 #include <libpmemobj++/detail/atomic_backoff.hpp>
44 
46 #include <libpmemobj++/mutex.hpp>
47 #include <libpmemobj++/p.hpp>
50 
51 #include <libpmemobj++/detail/persistent_pool_ptr.hpp>
53 
54 #include <atomic>
55 #include <cassert>
56 #include <functional>
57 #include <initializer_list>
58 #include <iterator> // for std::distance
59 #include <memory>
60 #include <mutex>
61 #include <thread>
62 #include <type_traits>
63 #include <utility>
64 
65 namespace std
66 {
70 template <typename T>
71 struct hash<pmem::obj::p<T>> {
72  size_t
73  operator()(const pmem::obj::p<T> &x) const
74  {
75  return hash<T>()(x.get_ro());
76  }
77 };
78 } /* namespace std */
79 
80 namespace pmem
81 {
82 namespace obj
83 {
84 
85 namespace concurrent_hash_map_internal
86 {
87 template <typename SharedMutexT>
88 class shared_mutex_scoped_lock {
89  using rw_mutex_type = SharedMutexT;
90 
91 public:
92  shared_mutex_scoped_lock(const shared_mutex_scoped_lock &) = delete;
93  shared_mutex_scoped_lock &
94  operator=(const shared_mutex_scoped_lock &) = delete;
95 
97  shared_mutex_scoped_lock() : mutex(nullptr), is_writer(false)
98  {
99  }
100 
102  shared_mutex_scoped_lock(rw_mutex_type &m, bool write = true)
103  : mutex(nullptr)
104  {
105  acquire(m, write);
106  }
107 
109  ~shared_mutex_scoped_lock()
110  {
111  if (mutex)
112  release();
113  }
114 
116  void
117  acquire(rw_mutex_type &m, bool write = true)
118  {
119  is_writer = write;
120  mutex = &m;
121  if (write)
122  mutex->lock();
123  else
124  mutex->lock_shared();
125  }
126 
130  void
131  release()
132  {
133  assert(mutex);
134  rw_mutex_type *m = mutex;
135  mutex = nullptr;
136  if (is_writer) {
137  m->unlock();
138  } else {
139  m->unlock_shared();
140  }
141  }
142 
147  bool
148  try_acquire(rw_mutex_type &m, bool write = true)
149  {
150  assert(!mutex);
151  bool result;
152  is_writer = write;
153  result = write ? m.try_lock() : m.try_lock_shared();
154  if (result)
155  mutex = &m;
156  return result;
157  }
158 
159 protected:
164  rw_mutex_type *mutex;
169  bool is_writer;
170 }; /* class shared_mutex_scoped_lock */
171 
172 template <typename ScopedLockType>
173 using scoped_lock_upgrade_to_writer =
174  decltype(std::declval<ScopedLockType>().upgrade_to_writer());
175 
176 template <typename ScopedLockType>
177 using scoped_lock_has_upgrade_to_writer =
178  detail::supports<ScopedLockType, scoped_lock_upgrade_to_writer>;
179 
180 template <typename ScopedLockType>
181 using scoped_lock_downgrade_to_reader =
182  decltype(std::declval<ScopedLockType>().downgrade_to_reader());
183 
184 template <typename ScopedLockType>
185 using scoped_lock_has_downgrade_to_reader =
186  detail::supports<ScopedLockType, scoped_lock_downgrade_to_reader>;
187 
188 template <typename ScopedLockType,
189  bool = scoped_lock_has_upgrade_to_writer<ScopedLockType>::value
190  &&scoped_lock_has_downgrade_to_reader<ScopedLockType>::value>
191 class scoped_lock_traits {
192 public:
193  using scope_lock_type = ScopedLockType;
194 
195  static bool
196  initial_rw_state(bool write)
197  {
198  /* For upgradeable locks, initial state is always read */
199  return false;
200  }
201 
202  static bool
203  upgrade_to_writer(scope_lock_type &lock)
204  {
205  return lock.upgrade_to_writer();
206  }
207 
208  static bool
209  downgrade_to_reader(scope_lock_type &lock)
210  {
211  return lock.downgrade_to_reader();
212  }
213 };
214 
215 template <typename ScopedLockType>
216 class scoped_lock_traits<ScopedLockType, false> {
217 public:
218  using scope_lock_type = ScopedLockType;
219 
220  static bool
221  initial_rw_state(bool write)
222  {
223  /* For non-upgradeable locks, we take lock in required mode
224  * immediately */
225  return write;
226  }
227 
228  static bool
229  upgrade_to_writer(scope_lock_type &lock)
230  {
231  /* This overload is for locks which do not support upgrade
232  * operation. For those locks, upgrade_to_writer should not be
233  * called when holding a read lock */
234  return true;
235  }
236 
237  static bool
238  downgrade_to_reader(scope_lock_type &lock)
239  {
240  /* This overload is for locks which do not support downgrade
241  * operation. For those locks, downgrade_to_reader should never
242  * be called */
243  assert(false);
244 
245  return false;
246  }
247 };
248 }
249 
250 template <typename Key, typename T, typename Hash = std::hash<Key>,
251  typename KeyEqual = std::equal_to<Key>,
252  typename MutexType = pmem::obj::shared_mutex,
253  typename ScopedLockType = concurrent_hash_map_internal::
254  shared_mutex_scoped_lock<MutexType>>
256 
258 namespace concurrent_hash_map_internal
259 {
260 /* Helper method which throws an exception when called in a tx */
261 static inline void
262 check_outside_tx()
263 {
264  if (pmemobj_tx_stage() != TX_STAGE_NONE)
266  "Function called inside transaction scope.");
267 }
268 
269 template <typename Hash>
270 using transparent_key_equal = typename Hash::transparent_key_equal;
271 
272 template <typename Hash>
273 using has_transparent_key_equal = detail::supports<Hash, transparent_key_equal>;
274 
275 template <typename Hash, typename Pred,
276  bool = has_transparent_key_equal<Hash>::value>
277 struct key_equal_type {
278  using type = typename Hash::transparent_key_equal;
279 };
280 
281 template <typename Hash, typename Pred>
282 struct key_equal_type<Hash, Pred, false> {
283  using type = Pred;
284 };
285 
286 template <typename Mutex, typename ScopedLockType>
287 void
288 assert_not_locked(Mutex &mtx)
289 {
290 #ifndef NDEBUG
291  ScopedLockType scoped_lock;
292  assert(scoped_lock.try_acquire(mtx));
293  scoped_lock.release();
294 #else
295  (void)mtx;
296 #endif
297 }
298 
299 template <typename Key, typename T, typename MutexType, typename ScopedLockType>
300 struct hash_map_node {
302  using mutex_t = MutexType;
303 
305  using scoped_t = ScopedLockType;
306 
307  using value_type = std::pair<const Key, T>;
308 
310  using node_ptr_t = detail::persistent_pool_ptr<
311  hash_map_node<Key, T, mutex_t, scoped_t>>;
312 
314  node_ptr_t next;
315 
317  mutex_t mutex;
318 
320  value_type item;
321 
322  hash_map_node() : next(OID_NULL)
323  {
324  }
325 
326  hash_map_node(const node_ptr_t &_next) : next(_next)
327  {
328  }
329 
330  hash_map_node(node_ptr_t &&_next) : next(std::move(_next))
331  {
332  }
333 
334  hash_map_node(const node_ptr_t &_next, const Key &key)
335  : next(_next), item(key, T())
336  {
337  }
338 
339  hash_map_node(const node_ptr_t &_next, const Key &key, const T &t)
340  : next(_next), item(key, t)
341  {
342  }
343 
344  hash_map_node(const node_ptr_t &_next, value_type &&i)
345  : next(_next), item(std::move(i))
346  {
347  }
348 
349  template <typename... Args>
350  hash_map_node(node_ptr_t &&_next, Args &&... args)
351  : next(std::forward<node_ptr_t>(_next)),
352  item(std::forward<Args>(args)...)
353  {
354  }
355 
356  hash_map_node(const node_ptr_t &_next, const value_type &i)
357  : next(_next), item(i)
358  {
359  }
360 
362  hash_map_node(const hash_map_node &) = delete;
363 
365  hash_map_node &operator=(const hash_map_node &) = delete;
366 }; /* struct node */
367 
372 template <typename Bucket>
373 class segment_traits {
374 public:
376  using segment_index_t = size_t;
377  using size_type = size_t;
378  using bucket_type = Bucket;
379 
380 protected:
382  constexpr static size_type max_allocation_size = PMEMOBJ_MAX_ALLOC_SIZE;
383 
385  constexpr static segment_index_t first_big_block = 27;
386  /* TODO: avoid hardcoded value; need constexpr similar to:
387  * Log2(max_allocation_size / sizeof(bucket_type)) */
388 
390  constexpr static size_type big_block_size = size_type(1)
391  << first_big_block;
392 
393  /* Block size in bytes cannot exceed max_allocation_size */
394  static_assert((big_block_size * sizeof(bucket_type)) <
395  max_allocation_size,
396  "Block size exceeds max_allocation_size");
397 
399  constexpr static segment_index_t
400  first_block_in_segment(segment_index_t seg)
401  {
402  return seg < first_big_block
403  ? seg
404  : (first_big_block +
405  (segment_index_t(1) << (seg - first_big_block)) - 1);
406  }
407 
409  constexpr static size_type
410  blocks_in_segment(segment_index_t seg)
411  {
412  return seg < first_big_block
413  ? segment_index_t(1)
414  : segment_index_t(1) << (seg - first_big_block);
415  }
416 
418  constexpr static size_type
419  block_size(segment_index_t b)
420  {
421  return b < first_big_block ? segment_size(b ? b : 1)
422  : big_block_size;
423  }
424 
425 public:
427  constexpr static segment_index_t embedded_segments = 1;
428 
430  constexpr static size_type embedded_buckets = 1 << embedded_segments;
431 
433  constexpr static segment_index_t number_of_segments = 32;
434 
436  static const size_type first_block = 8;
437 
439  constexpr static segment_index_t
440  number_of_blocks()
441  {
442  return first_block_in_segment(number_of_segments);
443  }
444 
446  static segment_index_t
447  segment_index_of(size_type index)
448  {
449  return segment_index_t(detail::Log2(index | 1));
450  }
451 
453  constexpr static segment_index_t
454  segment_base(segment_index_t k)
455  {
456  return (segment_index_t(1) << k) & ~segment_index_t(1);
457  }
458 
460  constexpr static size_type
461  segment_size(segment_index_t k)
462  {
463  return size_type(1) << k; // fake value for k == 0
464  }
465  static_assert(
466  embedded_segments < first_big_block,
467  "Number of embedded segments cannot exceed max_allocation_size");
468 }; /* End of class segment_traits */
469 
486 template <typename BlockTable, typename SegmentTraits, bool is_const>
487 class segment_facade_impl : public SegmentTraits {
488 private:
489  using traits_type = SegmentTraits;
490  using traits_type::block_size;
491  using traits_type::blocks_in_segment;
492  using traits_type::embedded_buckets;
493  using traits_type::embedded_segments;
494  using traits_type::first_block;
495  using traits_type::first_block_in_segment;
496  using traits_type::segment_base;
497  using traits_type::segment_size;
498 
499 public:
500  using table_reference =
501  typename std::conditional<is_const, const BlockTable &,
502  BlockTable &>::type;
503 
504  using table_pointer =
505  typename std::conditional<is_const, const BlockTable *,
506  BlockTable *>::type;
507 
508  using bucket_type = typename traits_type::bucket_type;
509  using segment_index_t = typename traits_type::segment_index_t;
510  using size_type = typename traits_type::size_type;
511 
513  segment_facade_impl(table_reference table, segment_index_t s)
514  : my_table(&table), my_seg(s)
515  {
516  assert(my_seg < traits_type::number_of_segments);
517  }
518 
520  segment_facade_impl(const segment_facade_impl &src)
521  : my_table(src.my_table), my_seg(src.my_seg)
522  {
523  }
524 
525  segment_facade_impl(segment_facade_impl &&src) = default;
526 
528  segment_facade_impl &
529  operator=(const segment_facade_impl &src)
530  {
531  my_table = src.my_table;
532  my_seg = src.my_seg;
533  return *this;
534  }
535 
537  segment_facade_impl &
538  operator=(segment_facade_impl &&src)
539  {
540  my_table = src.my_table;
541  my_seg = src.my_seg;
542  return *this;
543  }
544 
551  bucket_type &operator[](size_type i) const
552  {
553  assert(i < size());
554 
555  segment_index_t table_block = first_block_in_segment(my_seg);
556  size_type b_size = block_size(table_block);
557 
558  table_block += i / b_size;
559  i = i % b_size;
560 
561  return (*my_table)[table_block][static_cast<std::ptrdiff_t>(i)];
562  }
563 
567  segment_facade_impl &
568  operator++()
569  {
570  ++my_seg;
571  return *this;
572  }
573 
577  segment_facade_impl
578  operator++(int)
579  {
580  segment_facade_impl tmp = *this;
581  ++(*this);
582  return tmp;
583  }
584 
588  segment_facade_impl &
589  operator--()
590  {
591  --my_seg;
592  return *this;
593  }
594 
598  segment_facade_impl
599  operator--(int)
600  {
601  segment_facade_impl tmp = *this;
602  --(*this);
603  return tmp;
604  }
605 
609  segment_facade_impl &
610  operator+=(segment_index_t off)
611  {
612  my_seg += off;
613  return *this;
614  }
615 
619  segment_facade_impl &
620  operator-=(segment_index_t off)
621  {
622  my_seg -= off;
623  return *this;
624  }
625 
629  segment_facade_impl
630  operator+(segment_index_t off) const
631  {
632  return segment_facade_impl(*(this->my_table),
633  this->my_seg + off);
634  }
635 
639  segment_facade_impl
640  operator-(segment_index_t off) const
641  {
642  return segment_facade_impl(*(this->my_table),
643  this->my_seg - off);
644  }
645 
649  void
650  enable(pool_base &pop)
651  {
652  assert(my_seg >= embedded_segments);
653 
654  if (my_seg < first_block) {
655  enable_first_block(pop);
656  } else {
657  enable_big_segment(pop);
658  }
659  }
660 
664  void
665  disable()
666  {
667  assert(my_seg >= embedded_segments);
668 
669  if (my_seg < first_block) {
670  if (my_seg == embedded_segments) {
671  size_type sz = segment_size(first_block) -
672  embedded_buckets;
673  delete_persistent<bucket_type[]>(
674  (*my_table)[my_seg], sz);
675  }
676  (*my_table)[my_seg] = nullptr;
677  } else {
678  block_range blocks = segment_blocks(my_seg);
679 
680  for (segment_index_t b = blocks.first;
681  b < blocks.second; ++b) {
682  if ((*my_table)[b] != nullptr) {
683  delete_persistent<bucket_type[]>(
684  (*my_table)[b], block_size(b));
685  (*my_table)[b] = nullptr;
686  }
687  }
688  }
689  }
690 
694  constexpr size_type
695  size() const
696  {
697  return segment_size(my_seg ? my_seg : 1);
698  }
699 
705  bool
706  is_valid() const
707  {
708  block_range blocks = segment_blocks(my_seg);
709 
710  for (segment_index_t b = blocks.first; b < blocks.second; ++b) {
711  if ((*my_table)[b] == nullptr)
712  return false;
713  }
714 
715  return true;
716  }
717 
718 private:
719  using block_range = std::pair<segment_index_t, segment_index_t>;
720 
724  static block_range
725  segment_blocks(segment_index_t seg)
726  {
727  segment_index_t begin = first_block_in_segment(seg);
728 
729  return block_range(begin, begin + blocks_in_segment(seg));
730  }
731 
732  void
733  enable_first_block(pool_base &pop)
734  {
735  assert(my_seg == embedded_segments);
736  {
737  transaction::manual tx(pop);
738 
739  size_type sz =
740  segment_size(first_block) - embedded_buckets;
741  (*my_table)[my_seg] =
742  make_persistent<bucket_type[]>(sz);
743 
745  (*my_table)[embedded_segments].raw();
746 
747  for (segment_index_t s = my_seg + 1; s < first_block;
748  ++s) {
749  std::ptrdiff_t off =
750  static_cast<std::ptrdiff_t>(
751  segment_base(s) -
752  segment_base(my_seg));
753 
754  (*my_table)[s] = (base + off).raw();
755  }
756 
757  transaction::commit();
758  }
759  }
760 
761  void
762  enable_big_segment(pool_base &pop)
763  {
764  block_range blocks = segment_blocks(my_seg);
765  {
766  transaction::manual tx(pop);
767 
768  for (segment_index_t b = blocks.first;
769  b < blocks.second; ++b) {
770  assert((*my_table)[b] == nullptr);
771  (*my_table)[b] = make_persistent<bucket_type[]>(
772  block_size(b));
773  }
774 
775  transaction::commit();
776  }
777  }
778 
780  table_pointer my_table;
781 
783  segment_index_t my_seg;
784 }; /* End of class segment_facade_impl */
785 
792 template <typename Key, typename T, typename MutexType, typename ScopedLockType>
793 class hash_map_base {
794 public:
795  using mutex_t = MutexType;
796  using scoped_t = ScopedLockType;
797 
799  using size_type = size_t;
800 
802  using hashcode_type = size_t;
803 
805  using node = hash_map_node<Key, T, mutex_t, scoped_t>;
806 
808  using node_ptr_t = detail::persistent_pool_ptr<node>;
809 
811  struct bucket {
812  using mutex_t = MutexType;
813  using scoped_t = ScopedLockType;
814 
816  mutex_t mutex;
817 
819  p<std::atomic<uint64_t>> rehashed;
820 
822  node_ptr_t node_list;
823 
825  bucket() : node_list(nullptr)
826  {
827 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
828  VALGRIND_HG_DISABLE_CHECKING(&rehashed,
829  sizeof(rehashed));
830 #endif
831  rehashed.get_rw() = false;
832  }
833 
839  bool
840  is_rehashed(std::memory_order order)
841  {
842  return rehashed.get_ro().load(order);
843  }
844 
845  void
846  set_rehashed(std::memory_order order)
847  {
848  rehashed.get_rw().store(true, order);
849  }
850 
852  bucket(const bucket &) = delete;
853 
855  bucket &operator=(const bucket &) = delete;
856  }; /* End of struct bucket */
857 
859  using segment_traits_t = segment_traits<bucket>;
860 
862  using segment_index_t = typename segment_traits_t::segment_index_t;
863 
865  static const size_type embedded_buckets =
866  segment_traits_t::embedded_buckets;
867 
869  static const size_type first_block = segment_traits_t::first_block;
870 
872  constexpr static size_type block_table_size =
873  segment_traits_t::number_of_blocks();
874 
876  using segment_ptr_t = persistent_ptr<bucket[]>;
877 
879  using bucket_ptr_t = persistent_ptr<bucket>;
880 
882  using blocks_table_t = segment_ptr_t[block_table_size];
883 
885  using segment_enable_mutex_t = pmem::obj::mutex;
886 
888  struct features {
889  uint32_t compat;
890  uint32_t incompat;
891  };
892 
894  static constexpr features header_features = {0, 0};
895 
896  /* --------------------------------------------------------- */
897 
899  p<uint64_t> my_pool_uuid;
900 
903  features layout_features = (features)header_features;
904 
907  std::aligned_storage<sizeof(size_t), sizeof(size_t)>::type
908  my_mask_reserved;
909 
911  /* my_mask always restored on restart. */
913 
915  std::aligned_storage<32, 8>::type padding1;
916 
921  blocks_table_t my_table;
922 
923  /* It must be in separate cache line from my_mask due to performance
924  * effects */
926  p<std::atomic<size_type>> my_size;
927 
929  std::aligned_storage<24, 8>::type padding2;
930 
932  std::aligned_storage<64, 8>::type reserved;
933 
935  segment_enable_mutex_t my_segment_enable_mutex;
936 
938  bucket my_embedded_segment[embedded_buckets];
939 
940  /* --------------------------------------------------------- */
941 
942  const std::atomic<hashcode_type> &
943  mask() const noexcept
944  {
945  return my_mask.get_ro();
946  }
947 
948  std::atomic<hashcode_type> &
949  mask() noexcept
950  {
951  return my_mask.get_rw();
952  }
953 
955  using const_segment_facade_t =
956  segment_facade_impl<blocks_table_t, segment_traits_t, true>;
957 
959  using segment_facade_t =
960  segment_facade_impl<blocks_table_t, segment_traits_t, false>;
961 
963  hash_map_base()
964  {
965  static_assert(
966  sizeof(size_type) == sizeof(std::atomic<size_type>),
967  "std::atomic should have the same layout as underlying integral type");
968 
969 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
970  VALGRIND_HG_DISABLE_CHECKING(&my_size, sizeof(my_size));
971  VALGRIND_HG_DISABLE_CHECKING(&my_mask, sizeof(my_mask));
972 #endif
973 
974  my_size.get_rw() = 0;
975  PMEMoid oid = pmemobj_oid(this);
976 
977  assert(!OID_IS_NULL(oid));
978 
979  my_pool_uuid = oid.pool_uuid_lo;
980 
981  pool_base pop = get_pool_base();
982  /* enable embedded segments */
983  for (size_type i = 0; i < segment_traits_t::embedded_segments;
984  ++i) {
985  my_table[i] =
986  pmemobj_oid(my_embedded_segment +
987  segment_traits_t::segment_base(i));
988  segment_facade_t seg(my_table, i);
989  mark_rehashed<false>(pop, seg);
990  }
991  }
992 
996  void
997  calculate_mask()
998  {
999 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1000  VALGRIND_HG_DISABLE_CHECKING(&my_size, sizeof(my_size));
1001  VALGRIND_HG_DISABLE_CHECKING(&my_mask, sizeof(my_mask));
1002 #endif
1003 #if LIBPMEMOBJ_CPP_VG_PMEMCHECK_ENABLED
1004  VALGRIND_PMC_REMOVE_PMEM_MAPPING(&my_mask, sizeof(my_mask));
1005 #endif
1006 
1007  hashcode_type m = embedded_buckets - 1;
1008 
1009  const_segment_facade_t segment(
1010  my_table, segment_traits_t::embedded_segments);
1011 
1012  while (segment.is_valid()) {
1013  m += segment.size();
1014  ++segment;
1015  }
1016 
1017  mask().store(m, std::memory_order_relaxed);
1018  }
1019 
1020  void
1021  restore_size(size_type actual_size)
1022  {
1023  my_size.get_rw().store(actual_size, std::memory_order_relaxed);
1024  pool_base pop = get_pool_base();
1025  pop.persist(my_size);
1026  }
1027 
1031  template <bool Flush = true>
1032  void
1033  mark_rehashed(pool_base &pop, segment_facade_t &segment)
1034  {
1035  for (size_type i = 0; i < segment.size(); ++i) {
1036  bucket *b = &(segment[i]);
1037 
1038  assert_not_locked<mutex_t, scoped_t>(b->mutex);
1039 
1040  b->set_rehashed(std::memory_order_relaxed);
1041  }
1042 
1043  if (Flush) {
1044  /* Flush in separate loop to avoid read-after-flush */
1045  for (size_type i = 0; i < segment.size(); ++i) {
1046  bucket *b = &(segment[i]);
1047  pop.flush(b->rehashed);
1048  }
1049 
1050  pop.drain();
1051  }
1052  }
1053 
1057  void
1058  enable_segment(segment_index_t k, bool is_initial = false)
1059  {
1060  assert(k);
1061 
1062  pool_base pop = get_pool_base();
1063  size_type sz;
1064 
1065  if (k >= first_block) {
1066  segment_facade_t new_segment(my_table, k);
1067 
1068  sz = new_segment.size();
1069  if (!new_segment.is_valid())
1070  new_segment.enable(pop);
1071 
1072  if (is_initial) {
1073  mark_rehashed(pop, new_segment);
1074  }
1075 
1076  /* double it to get entire capacity of the container */
1077  sz <<= 1;
1078  } else {
1079  /* the first block */
1080  assert(k == segment_traits_t::embedded_segments);
1081 
1082  for (segment_index_t i = k; i < first_block; ++i) {
1083  segment_facade_t new_segment(my_table, i);
1084 
1085  if (!new_segment.is_valid())
1086  new_segment.enable(pop);
1087 
1088  if (is_initial) {
1089  mark_rehashed(pop, new_segment);
1090  }
1091  }
1092 
1093  sz = segment_traits_t::segment_size(first_block);
1094  }
1095 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1096  ANNOTATE_HAPPENS_BEFORE(&my_mask);
1097 #endif
1098  mask().store(sz - 1, std::memory_order_release);
1099  }
1100 
1105  bucket *
1106  get_bucket(hashcode_type h) const
1107  {
1108  segment_index_t s = segment_traits_t::segment_index_of(h);
1109 
1110  h -= segment_traits_t::segment_base(s);
1111 
1112  const_segment_facade_t segment(my_table, s);
1113 
1114  assert(segment.is_valid());
1115 
1116  return &(segment[h]);
1117  }
1118 
1122  inline bool
1123  check_mask_race(hashcode_type h, hashcode_type &m) const
1124  {
1125  hashcode_type m_now, m_old = m;
1126 
1127  m_now = mask().load(std::memory_order_acquire);
1128 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1129  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
1130 #endif
1131 
1132  if (m_old != m_now)
1133  return check_rehashing_collision(h, m_old, m = m_now);
1134 
1135  return false;
1136  }
1137 
1141  bool
1142  check_rehashing_collision(hashcode_type h, hashcode_type m_old,
1143  hashcode_type m) const
1144  {
1145  assert(m_old != m);
1146 
1147  if ((h & m_old) != (h & m)) {
1148  /* mask changed for this hashcode, rare event condition
1149  * above proves that 'h' has some other bits set beside
1150  * 'm_old', find next applicable mask after m_old */
1151 
1152  for (++m_old; !(h & m_old); m_old <<= 1)
1153  ;
1154 
1155  m_old = (m_old << 1) - 1; /* get full mask from a bit */
1156 
1157  assert((m_old & (m_old + 1)) == 0 && m_old <= m);
1158 
1159  /* check whether it is rehashing/ed */
1160  bucket *b = get_bucket(h & m_old);
1161  return b->is_rehashed(std::memory_order_acquire);
1162  }
1163 
1164  return false;
1165  }
1166 
1172  template <typename Node, typename... Args>
1173  size_type
1174  insert_new_node(bucket *b, detail::persistent_pool_ptr<Node> &new_node,
1175  Args &&... args)
1176  {
1177  pool_base pop = get_pool_base();
1178 
1179  pmem::obj::transaction::run(pop, [&] {
1180  new_node = pmem::obj::make_persistent<Node>(
1181  b->node_list, std::forward<Args>(args)...);
1182  b->node_list = new_node; /* bucket is locked */
1183  });
1184 
1185  /* prefix form is to enforce allocation after the first item
1186  * inserted */
1187  size_t sz = ++(my_size.get_rw());
1188  pop.persist(&my_size, sizeof(my_size));
1189 
1190  return sz;
1191  }
1192 
1197  bool
1198  check_growth(hashcode_type m, size_type sz)
1199  {
1200  if (sz >= m) {
1201  segment_index_t new_seg =
1202  static_cast<segment_index_t>(detail::Log2(
1203  m +
1204  1)); /* optimized segment_index_of */
1205 
1206  assert(segment_facade_t(my_table, new_seg - 1)
1207  .is_valid());
1208 
1209  std::unique_lock<segment_enable_mutex_t> lock(
1210  my_segment_enable_mutex, std::try_to_lock);
1211 
1212  if (lock) {
1213  if (mask().load(std::memory_order_relaxed) ==
1214  m) {
1215  /* Otherwise, other thread enable this
1216  * segment */
1217  enable_segment(new_seg);
1218 
1219  return true;
1220  }
1221  }
1222  }
1223 
1224  return false;
1225  }
1226 
1230  void
1231  reserve(size_type buckets)
1232  {
1233  if (buckets == 0)
1234  return;
1235 
1236  --buckets;
1237 
1238  bool is_initial = (my_size.get_ro() == 0);
1239 
1240  for (size_type m = mask(); buckets > m; m = mask())
1241  enable_segment(
1242  segment_traits_t::segment_index_of(m + 1),
1243  is_initial);
1244  }
1245 
1250  void
1251  internal_swap(hash_map_base<Key, T, mutex_t, scoped_t> &table)
1252  {
1253  pool_base p = get_pool_base();
1254  {
1255  transaction::manual tx(p);
1256 
1257  this->my_pool_uuid.swap(table.my_pool_uuid);
1258  /* Swap my_mask */
1259  this->mask() = table.mask().exchange(
1260  this->mask(), std::memory_order_relaxed);
1261 
1262  /* Swap my_size */
1263  this->my_size.get_rw() =
1264  table.my_size.get_rw().exchange(
1265  this->my_size.get_ro(),
1266  std::memory_order_relaxed);
1267 
1268  for (size_type i = 0; i < embedded_buckets; ++i)
1269  this->my_embedded_segment[i].node_list.swap(
1270  table.my_embedded_segment[i].node_list);
1271 
1272  for (size_type i = segment_traits_t::embedded_segments;
1273  i < block_table_size; ++i)
1274  this->my_table[i].swap(table.my_table[i]);
1275 
1276  transaction::commit();
1277  }
1278  }
1279 
1284  pool_base
1285  get_pool_base()
1286  {
1287  PMEMobjpool *pop =
1288  pmemobj_pool_by_oid(PMEMoid{my_pool_uuid, 0});
1289 
1290  return pool_base(pop);
1291  }
1292 }; /* End of class hash_map_base */
1293 
1299 template <typename Container, bool is_const>
1300 class hash_map_iterator {
1301 public:
1302  using iterator_category = std::forward_iterator_tag;
1303  using difference_type = ptrdiff_t;
1304  using map_type = Container;
1305  using value_type = typename map_type::value_type;
1306  using node = typename map_type::node;
1307  using bucket = typename map_type::bucket;
1308  using map_ptr = typename std::conditional<is_const, const map_type *,
1309  map_type *>::type;
1310  using reference =
1311  typename std::conditional<is_const,
1312  typename map_type::const_reference,
1313  typename map_type::reference>::type;
1314  using pointer =
1315  typename std::conditional<is_const,
1316  typename map_type::const_pointer,
1317  typename map_type::pointer>::type;
1318 
1319  template <typename C, bool M, bool U>
1320  friend bool operator==(const hash_map_iterator<C, M> &i,
1321  const hash_map_iterator<C, U> &j);
1322 
1323  template <typename C, bool M, bool U>
1324  friend bool operator!=(const hash_map_iterator<C, M> &i,
1325  const hash_map_iterator<C, U> &j);
1326 
1327  friend class hash_map_iterator<map_type, true>;
1328 
1329 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
1330 private:
1331  template <typename Key, typename T, typename Hash, typename KeyEqual,
1332  typename MutexType, typename ScopedLockType>
1333  friend class ::pmem::obj::concurrent_hash_map;
1334 #else
1335 public: /* workaround */
1336 #endif
1337  hash_map_iterator(map_ptr map, size_t index)
1338  : my_map(map), my_index(index), my_bucket(nullptr), my_node(nullptr)
1339  {
1340  if (my_index <= my_map->mask()) {
1341  bucket_accessor acc(my_map, my_index);
1342  my_bucket = acc.get();
1343  my_node = static_cast<node *>(
1344  my_bucket->node_list.get(my_map->my_pool_uuid));
1345 
1346  if (!my_node) {
1347  advance_to_next_bucket();
1348  }
1349  }
1350  }
1351 
1352 public:
1354  hash_map_iterator() = default;
1355 
1357  hash_map_iterator(const hash_map_iterator &other)
1358  : my_map(other.my_map),
1359  my_index(other.my_index),
1360  my_bucket(other.my_bucket),
1361  my_node(other.my_node)
1362  {
1363  }
1364 
1366  template <typename U = void,
1367  typename = typename std::enable_if<is_const, U>::type>
1368  hash_map_iterator(const hash_map_iterator<map_type, false> &other)
1369  : my_map(other.my_map),
1370  my_index(other.my_index),
1371  my_bucket(other.my_bucket),
1372  my_node(other.my_node)
1373  {
1374  }
1375 
1376  hash_map_iterator &operator=(const hash_map_iterator &it) = default;
1377 
1379  reference operator*() const
1380  {
1381  assert(my_node);
1382  return my_node->item;
1383  }
1384 
1386  pointer operator->() const
1387  {
1388  return &operator*();
1389  }
1390 
1392  hash_map_iterator &
1393  operator++()
1394  {
1395  my_node = static_cast<node *>(
1396  my_node->next.get((my_map->my_pool_uuid)));
1397 
1398  if (!my_node)
1399  advance_to_next_bucket();
1400 
1401  return *this;
1402  }
1403 
1405  hash_map_iterator
1406  operator++(int)
1407  {
1408  hash_map_iterator old(*this);
1409  operator++();
1410  return old;
1411  }
1412 
1413 private:
1415  map_ptr my_map = nullptr;
1416 
1418  size_t my_index = 0;
1419 
1421  bucket *my_bucket = nullptr;
1422 
1424  node *my_node = nullptr;
1425 
1426  class bucket_accessor {
1427  public:
1428  bucket_accessor(map_ptr m, size_t index)
1429  {
1430  my_bucket = m->get_bucket(index);
1431  }
1432 
1433  bucket *
1434  get() const
1435  {
1436  return my_bucket;
1437  }
1438 
1439  private:
1440  bucket *my_bucket;
1441  };
1442 
1443  void
1444  advance_to_next_bucket()
1445  {
1446  size_t k = my_index + 1;
1447 
1448  assert(my_bucket);
1449 
1450  while (k <= my_map->mask()) {
1451  bucket_accessor acc(my_map, k);
1452  my_bucket = acc.get();
1453 
1454  if (my_bucket->node_list) {
1455  my_node = static_cast<node *>(
1456  my_bucket->node_list.get(
1457  my_map->my_pool_uuid));
1458 
1459  my_index = k;
1460 
1461  return;
1462  }
1463 
1464  ++k;
1465  }
1466 
1467  my_bucket = 0;
1468  my_node = 0;
1469  my_index = k;
1470  }
1471 };
1472 
1473 template <typename Container, bool M, bool U>
1474 bool
1475 operator==(const hash_map_iterator<Container, M> &i,
1476  const hash_map_iterator<Container, U> &j)
1477 {
1478  return i.my_node == j.my_node && i.my_map == j.my_map;
1479 }
1480 
1481 template <typename Container, bool M, bool U>
1482 bool
1483 operator!=(const hash_map_iterator<Container, M> &i,
1484  const hash_map_iterator<Container, U> &j)
1485 {
1486  return i.my_node != j.my_node || i.my_map != j.my_map;
1487 }
1488 } /* namespace concurrent_hash_map_internal */
1529 template <typename Key, typename T, typename Hash, typename KeyEqual,
1530  typename MutexType, typename ScopedLockType>
1531 class concurrent_hash_map
1532  : protected concurrent_hash_map_internal::hash_map_base<Key, T, MutexType,
1533  ScopedLockType> {
1534  template <typename Container, bool is_const>
1535  friend class concurrent_hash_map_internal::hash_map_iterator;
1536 
1537 public:
1538  using size_type = typename concurrent_hash_map_internal::hash_map_base<
1539  Key, T, MutexType, ScopedLockType>::size_type;
1540  using hashcode_type =
1541  typename concurrent_hash_map_internal::hash_map_base<
1542  Key, T, MutexType, ScopedLockType>::hashcode_type;
1543  using key_type = Key;
1544  using mapped_type = T;
1545  using value_type = typename concurrent_hash_map_internal::hash_map_base<
1546  Key, T, MutexType, ScopedLockType>::node::value_type;
1547  using difference_type = ptrdiff_t;
1548  using pointer = value_type *;
1549  using const_pointer = const value_type *;
1550  using reference = value_type &;
1551  using const_reference = const value_type &;
1552  using iterator = concurrent_hash_map_internal::hash_map_iterator<
1553  concurrent_hash_map, false>;
1554  using const_iterator = concurrent_hash_map_internal::hash_map_iterator<
1555  concurrent_hash_map, true>;
1556  using hasher = Hash;
1557  using key_equal = typename concurrent_hash_map_internal::key_equal_type<
1558  Hash, KeyEqual>::type;
1559 
1560 protected:
1561  using mutex_t = MutexType;
1562  using scoped_t = ScopedLockType;
1563  /*
1564  * Explicitly use methods and types from template base class
1565  */
1566  using hash_map_base =
1567  concurrent_hash_map_internal::hash_map_base<Key, T, mutex_t,
1568  scoped_t>;
1569  using hash_map_base::calculate_mask;
1570  using hash_map_base::check_growth;
1571  using hash_map_base::check_mask_race;
1572  using hash_map_base::embedded_buckets;
1573  using hash_map_base::get_bucket;
1574  using hash_map_base::get_pool_base;
1575  using hash_map_base::header_features;
1576  using hash_map_base::insert_new_node;
1577  using hash_map_base::internal_swap;
1578  using hash_map_base::layout_features;
1579  using hash_map_base::mask;
1580  using hash_map_base::reserve;
1581  using node = typename hash_map_base::node;
1582  using node_mutex_t = typename node::mutex_t;
1583  using node_ptr_t = typename hash_map_base::node_ptr_t;
1584  using bucket = typename hash_map_base::bucket;
1585  using bucket_lock_type = typename bucket::scoped_t;
1586  using segment_index_t = typename hash_map_base::segment_index_t;
1587  using segment_traits_t = typename hash_map_base::segment_traits_t;
1588  using segment_facade_t = typename hash_map_base::segment_facade_t;
1589  using scoped_lock_traits_type =
1590  concurrent_hash_map_internal::scoped_lock_traits<scoped_t>;
1591 
1592  friend class const_accessor;
1593  using persistent_node_ptr_t = detail::persistent_pool_ptr<node>;
1594 
1595  void
1596  delete_node(const node_ptr_t &n)
1597  {
1598  delete_persistent<node>(
1599  detail::static_persistent_pool_pointer_cast<node>(n)
1600  .get_persistent_ptr(this->my_pool_uuid));
1601  }
1602 
1603  template <typename K>
1604  persistent_node_ptr_t
1605  search_bucket(const K &key, bucket *b) const
1606  {
1607  assert(b->is_rehashed(std::memory_order_relaxed));
1608 
1609  persistent_node_ptr_t n =
1610  detail::static_persistent_pool_pointer_cast<node>(
1611  b->node_list);
1612 
1613  while (n &&
1614  !key_equal{}(key,
1615  n.get(this->my_pool_uuid)->item.first)) {
1616  n = detail::static_persistent_pool_pointer_cast<node>(
1617  n.get(this->my_pool_uuid)->next);
1618  }
1619 
1620  return n;
1621  }
1622 
1627  class bucket_accessor : public bucket_lock_type {
1628  bucket *my_b;
1629 
1630  public:
1631  bucket_accessor(concurrent_hash_map *base,
1632  const hashcode_type h, bool writer = false)
1633  {
1634  acquire(base, h, writer);
1635  }
1636 
1641  inline void
1642  acquire(concurrent_hash_map *base, const hashcode_type h,
1643  bool writer = false)
1644  {
1645  my_b = base->get_bucket(h);
1646 
1647  if (my_b->is_rehashed(std::memory_order_acquire) ==
1648  false &&
1649  bucket_lock_type::try_acquire(this->my_b->mutex,
1650  /*write=*/true)) {
1651  if (my_b->is_rehashed(
1652  std::memory_order_relaxed) ==
1653  false) {
1654  /* recursive rehashing */
1655  base->rehash_bucket<false>(my_b, h);
1656  }
1657  } else {
1658  bucket_lock_type::acquire(my_b->mutex, writer);
1659  }
1660 
1661  assert(my_b->is_rehashed(std::memory_order_relaxed));
1662  }
1663 
1667  bool
1668  is_writer() const
1669  {
1670  return bucket_lock_type::is_writer;
1671  }
1672 
1677  bucket *
1678  get() const
1679  {
1680  return my_b;
1681  }
1682 
1687  bucket *operator->() const
1688  {
1689  return this->get();
1690  }
1691  };
1692 
1697  bucket *my_b;
1698 
1699  public:
1700  serial_bucket_accessor(concurrent_hash_map *base,
1701  const hashcode_type h,
1702  bool writer = false)
1703  {
1704  acquire(base, h, writer);
1705  }
1706 
1707  /*
1708  * Find a bucket by masked hashcode, optionally rehash
1709  */
1710  inline void
1711  acquire(concurrent_hash_map *base, const hashcode_type h,
1712  bool writer = false)
1713  {
1714  my_b = base->get_bucket(h);
1715 
1716  if (my_b->is_rehashed(std::memory_order_relaxed) ==
1717  false) {
1718  /* recursive rehashing */
1719  base->rehash_bucket<true>(my_b, h);
1720  }
1721 
1722  assert(my_b->is_rehashed(std::memory_order_relaxed));
1723  }
1724 
1731  bool
1732  is_writer() const
1733  {
1734  return true;
1735  }
1736 
1741  bucket *
1742  get() const
1743  {
1744  return my_b;
1745  }
1746 
1751  bucket *operator->() const
1752  {
1753  return this->get();
1754  }
1755  };
1756 
1757  hashcode_type
1758  get_hash_code(node_ptr_t &n)
1759  {
1760  return hasher{}(
1761  detail::static_persistent_pool_pointer_cast<node>(n)(
1762  this->my_pool_uuid)
1763  ->item.first);
1764  }
1765 
1766  template <bool serial>
1767  void
1768  rehash_bucket(bucket *b_new, const hashcode_type h)
1769  {
1770  using accessor_type = typename std::conditional<
1771  serial, serial_bucket_accessor, bucket_accessor>::type;
1772 
1773  using scoped_lock_traits_type =
1774  concurrent_hash_map_internal::scoped_lock_traits<
1775  accessor_type>;
1776 
1777  /* First two bucket should be always rehashed */
1778  assert(h > 1);
1779 
1780  pool_base pop = get_pool_base();
1781  node_ptr_t *p_new = &(b_new->node_list);
1782  bool restore_after_crash = *p_new != nullptr;
1783 
1784  /* get parent mask from the topmost bit */
1785  hashcode_type mask = (1u << detail::Log2(h)) - 1;
1786  assert((h & mask) < h);
1787  accessor_type b_old(
1788  this, h & mask,
1789  scoped_lock_traits_type::initial_rw_state(true));
1790 
1791  /* get full mask for new bucket */
1792  mask = (mask << 1) | 1;
1793  assert((mask & (mask + 1)) == 0 && (h & mask) == h);
1794  restart:
1795  for (node_ptr_t *p_old = &(b_old->node_list), n = *p_old; n;
1796  n = *p_old) {
1797  hashcode_type c = get_hash_code(n);
1798 #ifndef NDEBUG
1799  hashcode_type bmask = h & (mask >> 1);
1800 
1801  bmask = bmask == 0
1802  ? 1 /* minimal mask of parent bucket */
1803  : (1u << (detail::Log2(bmask) + 1)) - 1;
1804 
1805  assert((c & bmask) == (h & bmask));
1806 #endif
1807 
1808  if ((c & mask) == h) {
1809  if (!b_old.is_writer() &&
1810  !scoped_lock_traits_type::upgrade_to_writer(
1811  b_old)) {
1812  goto restart;
1813  /* node ptr can be invalid due to
1814  * concurrent erase */
1815  }
1816 
1817  if (restore_after_crash) {
1818  while (*p_new != nullptr &&
1819  (mask & get_hash_code(*p_new)) ==
1820  h &&
1821  *p_new != n) {
1822  p_new = &(
1823  (*p_new)(
1824  this->my_pool_uuid)
1825  ->next);
1826  }
1827 
1828  restore_after_crash = false;
1829  }
1830 
1831  /* Add to new b_new */
1832  *p_new = n;
1833  pop.persist(p_new, sizeof(*p_new));
1834 
1835  /* exclude from b_old */
1836  *p_old = n(this->my_pool_uuid)->next;
1837  pop.persist(p_old, sizeof(*p_old));
1838 
1839  p_new = &(n(this->my_pool_uuid)->next);
1840  } else {
1841  /* iterate to next item */
1842  p_old = &(n(this->my_pool_uuid)->next);
1843  }
1844  }
1845 
1846  if (restore_after_crash) {
1847  while (*p_new != nullptr &&
1848  (mask & get_hash_code(*p_new)) == h)
1849  p_new = &((*p_new)(this->my_pool_uuid)->next);
1850  }
1851 
1852  *p_new = nullptr;
1853  pop.persist(p_new, sizeof(*p_new));
1854 
1855  /* mark rehashed */
1856  b_new->set_rehashed(std::memory_order_release);
1857  pop.persist(b_new->rehashed);
1858  }
1859 
1860  void
1861  check_features()
1862  {
1863  if (layout_features.incompat != header_features.incompat)
1864  throw pmem::layout_error(
1865  "Incompat flags mismatch, for more details go to: https://pmem.io/pmdk/cpp_obj/ \n");
1866  }
1867 
1868 public:
1869  class accessor;
1874  : protected node::scoped_t /*which derived from no_copy*/ {
1875  friend class concurrent_hash_map<Key, T, Hash, KeyEqual,
1876  mutex_t, scoped_t>;
1877  friend class accessor;
1879  using node::scoped_t::try_acquire;
1880 
1881  public:
1885  using value_type =
1886  const typename concurrent_hash_map::value_type;
1887 
1892  bool
1893  empty() const
1894  {
1895  return !my_node;
1896  }
1897 
1904  void
1906  {
1907  concurrent_hash_map_internal::check_outside_tx();
1908 
1909  if (my_node) {
1910  node::scoped_t::release();
1911  my_node = 0;
1912  }
1913  }
1914 
1918  const_reference operator*() const
1919  {
1920  assert(my_node);
1921 
1922  return my_node->item;
1923  }
1924 
1928  const_pointer operator->() const
1929  {
1930  return &operator*();
1931  }
1932 
1938  const_accessor() : my_node(OID_NULL)
1939  {
1940  concurrent_hash_map_internal::check_outside_tx();
1941  }
1942 
1947  {
1948  my_node = OID_NULL; // scoped lock's release() is called
1949  // in its destructor
1950  }
1951 
1952  protected:
1953  node_ptr_t my_node;
1954 
1955  hashcode_type my_hash;
1956  };
1957 
1962  class accessor : public const_accessor {
1963  public:
1965  using value_type = typename concurrent_hash_map::value_type;
1966 
1968  reference operator*() const
1969  {
1970  assert(this->my_node);
1971 
1972  return this->my_node->item;
1973  }
1974 
1976  pointer operator->() const
1977  {
1978  return &operator*();
1979  }
1980  };
1981 
1985  concurrent_hash_map() : hash_map_base()
1986  {
1987  runtime_initialize(true);
1988  }
1989 
1994  concurrent_hash_map(size_type n) : hash_map_base()
1995  {
1996  runtime_initialize(true);
1997 
1998  reserve(n);
1999  }
2000 
2004  concurrent_hash_map(const concurrent_hash_map &table) : hash_map_base()
2005  {
2006  runtime_initialize(true);
2007 
2008  reserve(table.size());
2009 
2010  internal_copy(table);
2011  }
2012 
2016  concurrent_hash_map(concurrent_hash_map &&table) : hash_map_base()
2017  {
2018  runtime_initialize(true);
2019 
2020  swap(table);
2021  }
2022 
2026  template <typename I>
2027  concurrent_hash_map(I first, I last)
2028  {
2029  runtime_initialize(true);
2030 
2031  reserve(static_cast<size_type>(std::distance(first, last)));
2032 
2033  internal_copy(first, last);
2034  }
2035 
2039  concurrent_hash_map(std::initializer_list<value_type> il)
2040  {
2041  runtime_initialize(true);
2042 
2043  reserve(il.size());
2044 
2045  internal_copy(il.begin(), il.end());
2046  }
2047 
2056  void
2057  runtime_initialize(bool graceful_shutdown = false)
2058  {
2059  check_features();
2060 
2061  calculate_mask();
2062 
2063  if (!graceful_shutdown) {
2064  auto actual_size =
2065  std::distance(this->begin(), this->end());
2066  assert(actual_size >= 0);
2067  this->restore_size(size_type(actual_size));
2068  } else {
2069  assert(this->size() ==
2070  size_type(std::distance(this->begin(),
2071  this->end())));
2072  }
2073  }
2074 
2086  concurrent_hash_map &
2087  operator=(const concurrent_hash_map &table)
2088  {
2089  if (this != &table) {
2090  clear();
2091  internal_copy(table);
2092  }
2093 
2094  return *this;
2095  }
2096 
2108  concurrent_hash_map &
2109  operator=(std::initializer_list<value_type> il)
2110  {
2111  clear();
2112 
2113  reserve(il.size());
2114 
2115  internal_copy(il.begin(), il.end());
2116 
2117  return *this;
2118  }
2119 
2128  void rehash(size_type n = 0);
2129 
2136  void clear();
2137 
2142  {
2143  clear();
2144  }
2145 
2146  //------------------------------------------------------------------------
2147  // STL support - not thread-safe methods
2148  //------------------------------------------------------------------------
2149 
2156  iterator
2158  {
2159  return iterator(this, 0);
2160  }
2161 
2166  iterator
2168  {
2169  return iterator(this, mask() + 1);
2170  }
2171 
2176  const_iterator
2177  begin() const
2178  {
2179  return const_iterator(this, 0);
2180  }
2181 
2186  const_iterator
2187  end() const
2188  {
2189  return const_iterator(this, mask() + 1);
2190  }
2191 
2195  size_type
2196  size() const
2197  {
2198  return this->my_size.get_ro();
2199  }
2200 
2204  bool
2205  empty() const
2206  {
2207  return this->my_size.get_ro() == 0;
2208  }
2209 
2213  size_type
2214  max_size() const
2215  {
2216  return (~size_type(0)) / sizeof(node);
2217  }
2218 
2222  size_type
2224  {
2225  return mask() + 1;
2226  }
2227 
2231  void swap(concurrent_hash_map &table);
2232 
2233  //------------------------------------------------------------------------
2234  // concurrent map operations
2235  //------------------------------------------------------------------------
2236 
2242  size_type
2243  count(const Key &key) const
2244  {
2245  concurrent_hash_map_internal::check_outside_tx();
2246 
2247  return const_cast<concurrent_hash_map *>(this)->internal_find(
2248  key, nullptr, false);
2249  }
2250 
2262  template <typename K,
2263  typename = typename std::enable_if<
2264  concurrent_hash_map_internal::
2265  has_transparent_key_equal<hasher>::value,
2266  K>::type>
2267  size_type
2268  count(const K &key) const
2269  {
2270  concurrent_hash_map_internal::check_outside_tx();
2271 
2272  return const_cast<concurrent_hash_map *>(this)->internal_find(
2273  key, nullptr, false);
2274  }
2275 
2282  bool
2283  find(const_accessor &result, const Key &key) const
2284  {
2285  concurrent_hash_map_internal::check_outside_tx();
2286 
2287  result.release();
2288 
2289  return const_cast<concurrent_hash_map *>(this)->internal_find(
2290  key, &result, false);
2291  }
2292 
2306  template <typename K,
2307  typename = typename std::enable_if<
2308  concurrent_hash_map_internal::
2309  has_transparent_key_equal<hasher>::value,
2310  K>::type>
2311  bool
2312  find(const_accessor &result, const K &key) const
2313  {
2314  concurrent_hash_map_internal::check_outside_tx();
2315 
2316  result.release();
2317 
2318  return const_cast<concurrent_hash_map *>(this)->internal_find(
2319  key, &result, false);
2320  }
2321 
2328  bool
2329  find(accessor &result, const Key &key)
2330  {
2331  concurrent_hash_map_internal::check_outside_tx();
2332 
2333  result.release();
2334 
2335  return internal_find(key, &result, true);
2336  }
2337 
2351  template <typename K,
2352  typename = typename std::enable_if<
2353  concurrent_hash_map_internal::
2354  has_transparent_key_equal<hasher>::value,
2355  K>::type>
2356  bool
2357  find(accessor &result, const K &key)
2358  {
2359  concurrent_hash_map_internal::check_outside_tx();
2360 
2361  result.release();
2362 
2363  return internal_find(key, &result, true);
2364  }
2372  bool
2373  insert(const_accessor &result, const Key &key)
2374  {
2375  concurrent_hash_map_internal::check_outside_tx();
2376 
2377  result.release();
2378 
2379  return internal_insert(key, &result, false, key);
2380  }
2381 
2389  bool
2390  insert(accessor &result, const Key &key)
2391  {
2392  concurrent_hash_map_internal::check_outside_tx();
2393 
2394  result.release();
2395 
2396  return internal_insert(key, &result, true, key);
2397  }
2398 
2406  bool
2407  insert(const_accessor &result, const value_type &value)
2408  {
2409  concurrent_hash_map_internal::check_outside_tx();
2410 
2411  result.release();
2412 
2413  return internal_insert(value.first, &result, false, value);
2414  }
2415 
2423  bool
2424  insert(accessor &result, const value_type &value)
2425  {
2426  concurrent_hash_map_internal::check_outside_tx();
2427 
2428  result.release();
2429 
2430  return internal_insert(value.first, &result, true, value);
2431  }
2432 
2439  bool
2440  insert(const value_type &value)
2441  {
2442  concurrent_hash_map_internal::check_outside_tx();
2443 
2444  return internal_insert(value.first, nullptr, false, value);
2445  }
2446 
2454  bool
2455  insert(const_accessor &result, value_type &&value)
2456  {
2457  concurrent_hash_map_internal::check_outside_tx();
2458 
2459  result.release();
2460 
2461  return internal_insert(value.first, &result, false,
2462  std::move(value));
2463  }
2464 
2472  bool
2473  insert(accessor &result, value_type &&value)
2474  {
2475  concurrent_hash_map_internal::check_outside_tx();
2476 
2477  result.release();
2478 
2479  return internal_insert(value.first, &result, true,
2480  std::move(value));
2481  }
2482 
2489  bool
2490  insert(value_type &&value)
2491  {
2492  concurrent_hash_map_internal::check_outside_tx();
2493 
2494  return internal_insert(value.first, nullptr, false,
2495  std::move(value));
2496  }
2497 
2503  template <typename I>
2504  void
2505  insert(I first, I last)
2506  {
2507  concurrent_hash_map_internal::check_outside_tx();
2508 
2509  for (; first != last; ++first)
2510  insert(*first);
2511  }
2512 
2518  void
2519  insert(std::initializer_list<value_type> il)
2520  {
2521  concurrent_hash_map_internal::check_outside_tx();
2522 
2523  insert(il.begin(), il.end());
2524  }
2525 
2534  bool
2535  erase(const Key &key)
2536  {
2537  concurrent_hash_map_internal::check_outside_tx();
2538 
2539  return internal_erase(key);
2540  }
2541 
2556  template <typename K,
2557  typename = typename std::enable_if<
2558  concurrent_hash_map_internal::
2559  has_transparent_key_equal<hasher>::value,
2560  K>::type>
2561  bool
2562  erase(const K &key)
2563  {
2564  concurrent_hash_map_internal::check_outside_tx();
2565 
2566  return internal_erase(key);
2567  }
2568 
2569 protected:
2570  /*
2571  * Try to acquire the mutex for read or write.
2572  *
2573  * If acquiring succeeds returns true, otherwise retries for few times.
2574  * If acquiring fails after all attempts returns false.
2575  */
2576  bool try_acquire_item(const_accessor *result, node_mutex_t &mutex,
2577  bool write);
2578 
2579  template <typename K>
2580  bool internal_find(const K &key, const_accessor *result, bool write);
2581 
2582  template <typename K, typename... Args>
2583  bool internal_insert(const K &key, const_accessor *result, bool write,
2584  Args &&... args);
2585 
2586  /* Obtain pointer to node and lock bucket */
2587  template <bool Bucket_rw_lock, typename K>
2588  persistent_node_ptr_t
2589  get_node(const K &key, bucket_accessor &b)
2590  {
2591  /* find a node */
2592  auto n = search_bucket(key, b.get());
2593 
2594  if (!n) {
2595  if (Bucket_rw_lock && !b.is_writer() &&
2596  !scoped_lock_traits_type::upgrade_to_writer(b)) {
2597  /* Rerun search_list, in case another
2598  * thread inserted the item during the
2599  * upgrade. */
2600  n = search_bucket(key, b.get());
2601  if (n) {
2602  /* unfortunately, it did */
2603  scoped_lock_traits_type::
2604  downgrade_to_reader(b);
2605  return n;
2606  }
2607  }
2608  }
2609 
2610  return n;
2611  }
2612 
2613  template <typename K>
2614  bool internal_erase(const K &key);
2615 
2616  void clear_segment(segment_index_t s);
2617 
2621  void internal_copy(const concurrent_hash_map &source);
2622 
2623  template <typename I>
2624  void internal_copy(I first, I last);
2625 
2626 }; // class concurrent_hash_map
2627 
2628 template <typename Key, typename T, typename Hash, typename KeyEqual,
2629  typename MutexType, typename ScopedLockType>
2630 bool
2631 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2632  ScopedLockType>::try_acquire_item(const_accessor *result,
2633  node_mutex_t &mutex,
2634  bool write)
2635 {
2636  /* acquire the item */
2637  if (!result->try_acquire(mutex, write)) {
2638  for (detail::atomic_backoff backoff(true);;) {
2639  if (result->try_acquire(mutex, write))
2640  break;
2641 
2642  if (!backoff.bounded_pause())
2643  return false;
2644  }
2645  }
2646 
2647  return true;
2648 }
2649 
2650 template <typename Key, typename T, typename Hash, typename KeyEqual,
2651  typename MutexType, typename ScopedLockType>
2652 template <typename K>
2653 bool
2654 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2655  ScopedLockType>::internal_find(const K &key,
2656  const_accessor *result,
2657  bool write)
2658 {
2659  assert(!result || !result->my_node);
2660 
2661  hashcode_type m = mask().load(std::memory_order_acquire);
2662 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2663  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2664 #endif
2665 
2666  assert((m & (m + 1)) == 0);
2667 
2668  hashcode_type const h = hasher{}(key);
2669 
2670  persistent_node_ptr_t node;
2671 
2672  while (true) {
2673  /* get bucket and acquire the lock */
2674  bucket_accessor b(
2675  this, h & m,
2676  scoped_lock_traits_type::initial_rw_state(false));
2677  node = get_node<false>(key, b);
2678 
2679  if (!node) {
2680  /* Element was possibly relocated, try again */
2681  if (check_mask_race(h, m)) {
2682  b.release();
2683  continue;
2684  } else {
2685  return false;
2686  }
2687  }
2688 
2689  /* No need to acquire the item or item acquired */
2690  if (!result ||
2691  try_acquire_item(
2692  result, node.get(this->my_pool_uuid)->mutex, write))
2693  break;
2694 
2695  /* the wait takes really long, restart the
2696  * operation */
2697  b.release();
2698 
2699  std::this_thread::yield();
2700 
2701  m = mask().load(std::memory_order_acquire);
2702 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2703  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2704 #endif
2705  }
2706 
2707  if (result) {
2708  result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
2709  result->my_hash = h;
2710  }
2711 
2712  return true;
2713 }
2714 
2715 template <typename Key, typename T, typename Hash, typename KeyEqual,
2716  typename MutexType, typename ScopedLockType>
2717 template <typename K, typename... Args>
2718 bool
2719 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2720  ScopedLockType>::internal_insert(const K &key,
2721  const_accessor *result,
2722  bool write,
2723  Args &&... args)
2724 {
2725  assert(!result || !result->my_node);
2726 
2727  hashcode_type m = mask().load(std::memory_order_acquire);
2728 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2729  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2730 #endif
2731 
2732  assert((m & (m + 1)) == 0);
2733 
2734  hashcode_type const h = hasher{}(key);
2735 
2736  persistent_node_ptr_t node;
2737  size_t new_size = 0;
2738  bool inserted = false;
2739 
2740  while (true) {
2741  /* get bucket and acquire the lock */
2742  bucket_accessor b(
2743  this, h & m,
2744  scoped_lock_traits_type::initial_rw_state(true));
2745  node = get_node<true>(key, b);
2746 
2747  if (!node) {
2748  /* Element was possibly relocated, try again */
2749  if (check_mask_race(h, m)) {
2750  b.release();
2751  continue;
2752  }
2753 
2754  /* insert and set flag to grow the container */
2755  new_size = insert_new_node(b.get(), node,
2756  std::forward<Args>(args)...);
2757  inserted = true;
2758  }
2759 
2760  /* No need to acquire the item or item acquired */
2761  if (!result ||
2762  try_acquire_item(
2763  result, node.get(this->my_pool_uuid)->mutex, write))
2764  break;
2765 
2766  /* the wait takes really long, restart the
2767  * operation */
2768  b.release();
2769 
2770  std::this_thread::yield();
2771 
2772  m = mask().load(std::memory_order_acquire);
2773 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2774  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2775 #endif
2776  }
2777 
2778  if (result) {
2779  result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
2780  result->my_hash = h;
2781  }
2782 
2783  check_growth(m, new_size);
2784 
2785  return inserted;
2786 }
2787 
2788 template <typename Key, typename T, typename Hash, typename KeyEqual,
2789  typename MutexType, typename ScopedLockType>
2790 template <typename K>
2791 bool
2792 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2793  ScopedLockType>::internal_erase(const K &key)
2794 {
2795  node_ptr_t n;
2796  hashcode_type const h = hasher{}(key);
2797  hashcode_type m = mask().load(std::memory_order_acquire);
2798 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2799  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2800 #endif
2801 
2802  pool_base pop = get_pool_base();
2803 
2804 restart : {
2805  /* lock scope */
2806  /* get bucket */
2807  bucket_accessor b(this, h & m,
2808  scoped_lock_traits_type::initial_rw_state(true));
2809 
2810 search:
2811  node_ptr_t *p = &b->node_list;
2812  n = *p;
2813 
2814  while (n &&
2815  !key_equal{}(key,
2816  detail::static_persistent_pool_pointer_cast<node>(
2817  n)(this->my_pool_uuid)
2818  ->item.first)) {
2819  p = &n(this->my_pool_uuid)->next;
2820  n = *p;
2821  }
2822 
2823  if (!n) {
2824  /* not found, but mask could be changed */
2825  if (check_mask_race(h, m))
2826  goto restart;
2827 
2828  return false;
2829  } else if (!b.is_writer() &&
2830  !scoped_lock_traits_type::upgrade_to_writer(b)) {
2831  if (check_mask_race(h, m)) /* contended upgrade, check mask */
2832  goto restart;
2833 
2834  goto search;
2835  }
2836 
2837  {
2838  transaction::manual tx(pop);
2839 
2840  persistent_ptr<node> del = n(this->my_pool_uuid);
2841 
2842  *p = del->next;
2843 
2844  {
2845  /* We cannot remove this element immediately because
2846  * other threads might work with this element via
2847  * accessors. The item_locker required to wait while
2848  * other threads use the node. */
2849  typename node::scoped_t item_locker(del->mutex,
2850  /*write=*/true);
2851  }
2852 
2853  /* Only one thread can delete it due to write lock on the bucket
2854  */
2855  delete_node(del);
2856 
2857  transaction::commit();
2858  }
2859 
2860  --(this->my_size.get_rw());
2861  pop.persist(this->my_size);
2862 }
2863 
2864  return true;
2865 }
2866 
2867 template <typename Key, typename T, typename Hash, typename KeyEqual,
2868  typename MutexType, typename ScopedLockType>
2869 void
2872 {
2873  internal_swap(table);
2874 }
2875 
2876 template <typename Key, typename T, typename Hash, typename KeyEqual,
2877  typename MutexType, typename ScopedLockType>
2878 void
2880  size_type sz)
2881 {
2882  concurrent_hash_map_internal::check_outside_tx();
2883 
2884  reserve(sz);
2885  hashcode_type m = mask();
2886 
2887  /* only the last segment should be scanned for rehashing size or first
2888  * index of the last segment */
2889  hashcode_type b = (m + 1) >> 1;
2890 
2891  /* zero or power of 2 */
2892  assert((b & (b - 1)) == 0);
2893 
2894  for (; b <= m; ++b) {
2895  bucket *bp = get_bucket(b);
2896 
2897  concurrent_hash_map_internal::assert_not_locked<mutex_t,
2898  scoped_t>(
2899  bp->mutex);
2900  /* XXX Need to investigate if this statement is needed */
2901  if (bp->is_rehashed(std::memory_order_relaxed) == false)
2902  rehash_bucket<true>(bp, b);
2903  }
2904 }
2905 
2906 template <typename Key, typename T, typename Hash, typename KeyEqual,
2907  typename MutexType, typename ScopedLockType>
2908 void
2910 {
2911  hashcode_type m = mask();
2912 
2913  assert((m & (m + 1)) == 0);
2914 
2915 #ifndef NDEBUG
2916  /* check consistency */
2917  for (segment_index_t b = 0; b <= m; ++b) {
2918  bucket *bp = get_bucket(b);
2919  concurrent_hash_map_internal::assert_not_locked<mutex_t,
2920  scoped_t>(
2921  bp->mutex);
2922  }
2923 #endif
2924 
2925  pool_base pop = get_pool_base();
2926  { /* transaction scope */
2927 
2928  transaction::manual tx(pop);
2929 
2930  this->my_size.get_rw() = 0;
2931  segment_index_t s = segment_traits_t::segment_index_of(m);
2932 
2933  assert(s + 1 == this->block_table_size ||
2934  !segment_facade_t(this->my_table, s + 1).is_valid());
2935 
2936  do {
2937  clear_segment(s);
2938  } while (s-- > 0);
2939 
2940  mask().store(embedded_buckets - 1, std::memory_order_relaxed);
2941 
2942  transaction::commit();
2943  }
2944 }
2945 
2946 template <typename Key, typename T, typename Hash, typename KeyEqual,
2947  typename MutexType, typename ScopedLockType>
2948 void
2949 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2950  ScopedLockType>::clear_segment(segment_index_t s)
2951 {
2952  segment_facade_t segment(this->my_table, s);
2953 
2954  assert(segment.is_valid());
2955 
2956  size_type sz = segment.size();
2957  for (segment_index_t i = 0; i < sz; ++i) {
2958  for (node_ptr_t n = segment[i].node_list; n;
2959  n = segment[i].node_list) {
2960  segment[i].node_list = n(this->my_pool_uuid)->next;
2961  delete_node(n);
2962  }
2963  }
2964 
2965  if (s >= segment_traits_t::embedded_segments)
2966  segment.disable();
2967 }
2968 
2969 template <typename Key, typename T, typename Hash, typename KeyEqual,
2970  typename MutexType, typename ScopedLockType>
2971 void
2973  internal_copy(const concurrent_hash_map &source)
2974 {
2975  reserve(source.my_size.get_ro());
2976  internal_copy(source.begin(), source.end());
2977 }
2978 
2979 template <typename Key, typename T, typename Hash, typename KeyEqual,
2980  typename MutexType, typename ScopedLockType>
2981 template <typename I>
2982 void
2983 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2984  ScopedLockType>::internal_copy(I first, I last)
2985 {
2986  hashcode_type m = mask();
2987 
2988  for (; first != last; ++first) {
2989  hashcode_type h = hasher{}(first->first);
2990  bucket *b = get_bucket(h & m);
2991 
2992  assert(b->is_rehashed(std::memory_order_relaxed));
2993 
2994  detail::persistent_pool_ptr<node> p;
2995  insert_new_node(b, p, *first);
2996  }
2997 }
2998 
2999 template <typename Key, typename T, typename Hash, typename KeyEqual,
3000  typename MutexType, typename ScopedLockType>
3001 inline bool
3002 operator==(const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3003  ScopedLockType> &a,
3004  const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3005  ScopedLockType> &b)
3006 {
3007  if (a.size() != b.size())
3008  return false;
3009 
3010  typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3011  ScopedLockType>::const_iterator
3012  i(a.begin()),
3013  i_end(a.end());
3014 
3015  typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3016  ScopedLockType>::const_iterator j,
3017  j_end(b.end());
3018 
3019  for (; i != i_end; ++i) {
3020  j = b.equal_range(i->first).first;
3021 
3022  if (j == j_end || !(i->second == j->second))
3023  return false;
3024  }
3025 
3026  return true;
3027 }
3028 
3029 template <typename Key, typename T, typename Hash, typename KeyEqual,
3030  typename MutexType, typename ScopedLockType>
3031 inline bool
3032 operator!=(const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3033  ScopedLockType> &a,
3034  const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3035  ScopedLockType> &b)
3036 {
3037  return !(a == b);
3038 }
3039 
3040 template <typename Key, typename T, typename Hash, typename KeyEqual,
3041  typename MutexType, typename ScopedLockType>
3042 inline void
3045 {
3046  a.swap(b);
3047 }
3048 
3049 } /* namespace obj */
3050 } /* namespace pmem */
3051 
3052 #endif /* PMEMOBJ_CONCURRENT_HASH_MAP_HPP */
bool operator==(standard_alloc_policy< T > const &, standard_alloc_policy< T2 > const &)
Determines if memory from another allocator can be deallocated from this one.
Definition: allocator.hpp:402
bool find(const_accessor &result, const Key &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2283
concurrent_hash_map(concurrent_hash_map &&table)
Move constructor.
Definition: concurrent_hash_map.hpp:2016
p< T > & operator+=(p< T > &lhs, const p< Y > &rhs)
Addition assignment operator overload.
Definition: pext.hpp:123
const typename concurrent_hash_map::value_type value_type
Type of value.
Definition: concurrent_hash_map.hpp:1886
p< T > & operator--(p< T > &pp)
Prefix decrement operator overload.
Definition: pext.hpp:88
void swap(pmem::obj::array< T, N > &lhs, pmem::obj::array< T, N > &rhs)
Non-member swap function.
Definition: array.hpp:913
bool erase(const K &key)
Remove element with corresponding key.
Definition: concurrent_hash_map.hpp:2562
bool insert(accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a write lock on the item...
Definition: concurrent_hash_map.hpp:2424
Persistent pointer class.
Definition: common.hpp:125
Persistent_ptr transactional allocation functions for objects.
Combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.hpp:1873
T & get_rw()
Retrieves read-write reference of the object.
Definition: p.hpp:142
Pmem-resident mutex.
bool erase(const Key &key)
Remove element with corresponding key.
Definition: concurrent_hash_map.hpp:2535
The non-template pool base class.
Definition: pool.hpp:67
void acquire(concurrent_hash_map *base, const hashcode_type h, bool writer=false)
Find a bucket by masked hashcode, optionally rehash, and acquire the lock.
Definition: concurrent_hash_map.hpp:1642
bool is_writer() const
Check whether bucket is locked for write.
Definition: concurrent_hash_map.hpp:1668
bool find(const_accessor &result, const K &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2312
iterator end()
Definition: concurrent_hash_map.hpp:2167
const_pointer operator->() const
Definition: concurrent_hash_map.hpp:1928
const_iterator begin() const
Definition: concurrent_hash_map.hpp:2177
A persistent version of concurrent hash map implementation Ref: https://arxiv.org/abs/1509.02235.
Definition: concurrent_hash_map.hpp:65
concurrent_hash_map(I first, I last)
Construction table with copying iteration range.
Definition: concurrent_hash_map.hpp:2027
size_type size() const
Definition: concurrent_hash_map.hpp:2196
iterator begin()
Definition: concurrent_hash_map.hpp:2157
const_accessor()
Create empty result.
Definition: concurrent_hash_map.hpp:1938
bool insert(const value_type &value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.hpp:2440
C++ manual scope transaction class.
Definition: transaction.hpp:92
Resides on pmem property template.
C++ pmemobj transactions.
concurrent_hash_map(std::initializer_list< value_type > il)
Construct table with initializer list.
Definition: concurrent_hash_map.hpp:2039
persistent_ptr< T > operator+(persistent_ptr< T > const &lhs, std::ptrdiff_t s)
Addition operator for persistent pointers.
Definition: persistent_ptr.hpp:607
bool insert(const_accessor &result, const Key &key)
Insert item (if not already present) and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2373
void swap(concurrent_hash_map &table)
Swap two instances.
Definition: concurrent_hash_map.hpp:2870
bool insert(accessor &result, const Key &key)
Insert item (if not already present) and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2390
Commonly used functionality.
pointer operator->() const
Return pointer to associated value in hash table.
Definition: concurrent_hash_map.hpp:1976
concurrent_hash_map & operator=(std::initializer_list< value_type > il)
Assignment Not thread safe.
Definition: concurrent_hash_map.hpp:2109
bool empty() const
Definition: concurrent_hash_map.hpp:2205
void insert(I first, I last)
Insert range [first, last)
Definition: concurrent_hash_map.hpp:2505
bucket * operator->() const
Overloaded arrow operator.
Definition: concurrent_hash_map.hpp:1687
~const_accessor()
Destroy result after releasing the underlying reference.
Definition: concurrent_hash_map.hpp:1946
void persist(const void *addr, size_t len) noexcept
Performs persist operation on a given chunk of memory.
Definition: pool.hpp:288
Persistent memory resident mutex implementation.
Definition: mutex.hpp:60
const_iterator end() const
Definition: concurrent_hash_map.hpp:2187
void release()
Release accessor.
Definition: concurrent_hash_map.hpp:1905
Custom layout error class.
Definition: pexceptions.hpp:205
pmem::obj::array< T, N >::iterator end(pmem::obj::array< T, N > &a)
Non-member end.
Definition: array.hpp:853
const T & get_ro() const noexcept
Retrieves read-only const reference of the object.
Definition: p.hpp:157
bucket * get() const
Get bucket pointer.
Definition: concurrent_hash_map.hpp:1678
pmem::obj::array< T, N >::iterator begin(pmem::obj::array< T, N > &a)
Non-member begin.
Definition: array.hpp:833
void swap(p &other)
Swaps two p objects of the same type.
Definition: p.hpp:169
bool empty() const
Definition: concurrent_hash_map.hpp:1893
persistent_ptr< T > operator-(persistent_ptr< T > const &lhs, std::ptrdiff_t s)
Subtraction operator for persistent pointers.
Definition: persistent_ptr.hpp:621
bool insert(const_accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a read lock on the item...
Definition: concurrent_hash_map.hpp:2407
bool is_writer() const
This method is added for consistency with bucket_accessor class.
Definition: concurrent_hash_map.hpp:1732
concurrent_hash_map & operator=(const concurrent_hash_map &table)
Assignment Not thread safe.
Definition: concurrent_hash_map.hpp:2087
bool find(accessor &result, const Key &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2329
bool insert(value_type &&value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.hpp:2490
Serial bucket accessor used to access bucket in a serial operations.
Definition: concurrent_hash_map.hpp:1696
size_type bucket_count() const
Definition: concurrent_hash_map.hpp:2223
concurrent_hash_map(size_type n)
Construct empty table with n preallocated buckets.
Definition: concurrent_hash_map.hpp:1994
Commonly used SFINAE helpers.
Persistent memory aware implementation of Intel TBB concurrent_hash_map.
Definition: concurrent_hash_map.hpp:255
p< T > & operator-=(p< T > &lhs, const p< Y > &rhs)
Subtraction assignment operator overload.
Definition: pext.hpp:145
Persistent smart pointer.
const PMEMoid & raw() const noexcept
Get PMEMoid encapsulated by this object.
Definition: persistent_ptr_base.hpp:304
size_type count(const Key &key) const
Definition: concurrent_hash_map.hpp:2243
bool insert(accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a write lock on the item...
Definition: concurrent_hash_map.hpp:2473
bool insert(const_accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a read lock on the item...
Definition: concurrent_hash_map.hpp:2455
size_type max_size() const
Upper bound on size.
Definition: concurrent_hash_map.hpp:2214
void runtime_initialize(bool graceful_shutdown=false)
Intialize persistent concurrent hash map after process restart.
Definition: concurrent_hash_map.hpp:2057
Resides on pmem class.
Definition: p.hpp:64
~concurrent_hash_map()
Clear table and destroy it.
Definition: concurrent_hash_map.hpp:2141
bool operator!=(const allocator< T, P, Tr > &lhs, const OtherAllocator &rhs)
Determines if memory from another allocator can be deallocated from this one.
Definition: allocator.hpp:518
Custom transaction error class.
Definition: pexceptions.hpp:185
bucket * operator->() const
Overloaded arrow operator.
Definition: concurrent_hash_map.hpp:1751
A persistent version of concurrent hash map implementation Ref: https://arxiv.org/abs/1509.02235.
Definition: allocation_flag.hpp:43
const_reference operator*() const
Definition: concurrent_hash_map.hpp:1918
void drain(void) noexcept
Performs drain operation.
Definition: pool.hpp:357
void flush(const void *addr, size_t len) noexcept
Performs flush operation on a given chunk of memory.
Definition: pool.hpp:324
p< T > & operator++(p< T > &pp)
Prefix increment operator overload.
Definition: pext.hpp:77
concurrent_hash_map()
Construct empty table.
Definition: concurrent_hash_map.hpp:1985
void insert(std::initializer_list< value_type > il)
Insert initializer list.
Definition: concurrent_hash_map.hpp:2519
Bucket accessor is to find, rehash, acquire a lock, and access a bucket.
Definition: concurrent_hash_map.hpp:1627
concurrent_hash_map(const concurrent_hash_map &table)
Copy constructor.
Definition: concurrent_hash_map.hpp:2004
bool find(accessor &result, const K &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2357
size_type count(const K &key) const
This overload only participates in overload resolution if the qualified-id Hash::transparent_key_equa...
Definition: concurrent_hash_map.hpp:2268
reference operator*() const
Return reference to associated value in hash table.
Definition: concurrent_hash_map.hpp:1968
static void run(pool_base &pool, std::function< void()> tx, Locks &... locks)
Execute a closure-like transaction and lock locks.
Definition: transaction.hpp:403
Pmem-resident shared mutex.
Allows write access to elements and combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.hpp:1962