Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
concurrent_hash_map.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef __TBB_concurrent_hash_map_H
18 #define __TBB_concurrent_hash_map_H
19 
20 #include "tbb_stddef.h"
21 #include <iterator>
22 #include <utility> // Need std::pair
23 #include <cstring> // Need std::memset
24 #include __TBB_STD_SWAP_HEADER
25 
26 #include "tbb_allocator.h"
27 #include "spin_rw_mutex.h"
28 #include "atomic.h"
29 #include "tbb_exception.h"
30 #include "tbb_profiling.h"
31 #include "aligned_space.h"
35 #if __TBB_INITIALIZER_LISTS_PRESENT
36 #include <initializer_list>
37 #endif
38 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
39 #include <typeinfo>
40 #endif
41 #if __TBB_STATISTICS
42 #include <stdio.h>
43 #endif
44 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
45 // Definition of __TBB_CPP11_RVALUE_REF_PRESENT includes __TBB_CPP11_TUPLE_PRESENT
46 // for most of platforms, tuple present macro was added for logical correctness
47 #include <tuple>
48 #endif
49 
50 namespace tbb {
51 
52 namespace interface5 {
53 
54  template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<const Key, T> > >
56 
58  namespace internal {
59  using namespace tbb::internal;
60 
61 
63  typedef size_t hashcode_t;
72  mutex_t mutex;
73  };
75  static hash_map_node_base *const rehash_req = reinterpret_cast<hash_map_node_base*>(size_t(3));
77  static hash_map_node_base *const empty_rehashed = reinterpret_cast<hash_map_node_base*>(size_t(0));
79  class hash_map_base {
80  public:
82  typedef size_t size_type;
84  typedef size_t hashcode_t;
86  typedef size_t segment_index_t;
95  mutex_t mutex;
96  node_base *node_list;
97  };
99  static size_type const embedded_block = 1;
101  static size_type const embedded_buckets = 1<<embedded_block;
103  static size_type const first_block = 8; //including embedded_block. perfect with bucket size 16, so the allocations are power of 4096
105  static size_type const pointers_per_table = sizeof(segment_index_t) * 8; // one segment per bit
109  typedef segment_ptr_t segments_table_t[pointers_per_table];
113  segments_table_t my_table;
115  atomic<size_type> my_size; // It must be in separate cache line from my_mask due to performance effects
117  bucket my_embedded_segment[embedded_buckets];
118 #if __TBB_STATISTICS
119  atomic<unsigned> my_info_resizes; // concurrent ones
120  mutable atomic<unsigned> my_info_restarts; // race collisions
121  atomic<unsigned> my_info_rehashes; // invocations of rehash_bucket
122 #endif
123  hash_map_base() {
125  std::memset( this, 0, pointers_per_table*sizeof(segment_ptr_t) // 32*4=128 or 64*8=512
126  + sizeof(my_size) + sizeof(my_mask) // 4+4 or 8+8
127  + embedded_buckets*sizeof(bucket) ); // n*8 or n*16
128  for( size_type i = 0; i < embedded_block; i++ ) // fill the table
129  my_table[i] = my_embedded_segment + segment_base(i);
130  my_mask = embedded_buckets - 1;
131  __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
132 #if __TBB_STATISTICS
133  my_info_resizes = 0; // concurrent ones
134  my_info_restarts = 0; // race collisions
135  my_info_rehashes = 0; // invocations of rehash_bucket
136 #endif
137  }
138 
140  static segment_index_t segment_index_of( size_type index ) {
141  return segment_index_t( __TBB_Log2( index|1 ) );
142  }
143 
145  static segment_index_t segment_base( segment_index_t k ) {
146  return (segment_index_t(1)<<k & ~segment_index_t(1));
147  }
148 
150  static size_type segment_size( segment_index_t k ) {
151  return size_type(1)<<k; // fake value for k==0
152  }
153 
155  static bool is_valid( void *ptr ) {
156  return reinterpret_cast<uintptr_t>(ptr) > uintptr_t(63);
157  }
158 
160  static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) {
161  if( is_initial ) std::memset( static_cast<void*>(ptr), 0, sz*sizeof(bucket) );
162  else for(size_type i = 0; i < sz; i++, ptr++) {
163  *reinterpret_cast<intptr_t*>(&ptr->mutex) = 0;
164  ptr->node_list = rehash_req;
165  }
166  }
167 
169  static void add_to_bucket( bucket *b, node_base *n ) {
170  __TBB_ASSERT(b->node_list != rehash_req, NULL);
171  n->next = b->node_list;
172  b->node_list = n; // its under lock and flag is set
173  }
174 
177  segment_ptr_t *my_segment_ptr;
178  enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {}
180  if( my_segment_ptr ) *my_segment_ptr = 0; // indicate no allocation in progress
181  }
182  };
183 
185  template<typename Allocator>
186  void enable_segment( segment_index_t k, const Allocator& allocator, bool is_initial = false ) {
187  typedef typename tbb::internal::allocator_rebind<Allocator, bucket>::type bucket_allocator_type;
188  typedef tbb::internal::allocator_traits<bucket_allocator_type> bucket_allocator_traits;
189  bucket_allocator_type bucket_allocator(allocator);
190  __TBB_ASSERT( k, "Zero segment must be embedded" );
191  enable_segment_failsafe watchdog( my_table, k );
192  size_type sz;
193  __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment");
194  if( k >= first_block ) {
195  sz = segment_size( k );
196  segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz);
197  init_buckets( ptr, sz, is_initial );
198  itt_hide_store_word( my_table[k], ptr );
199  sz <<= 1;// double it to get entire capacity of the container
200  } else { // the first block
201  __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
202  sz = segment_size( first_block );
203  segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz - embedded_buckets);
204  init_buckets( ptr, sz - embedded_buckets, is_initial );
205  ptr -= segment_base(embedded_block);
206  for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets
207  itt_hide_store_word( my_table[i], ptr + segment_base(i) );
208  }
209  itt_store_word_with_release( my_mask, sz-1 );
210  watchdog.my_segment_ptr = 0;
211  }
212 
213  template<typename Allocator>
214  void delete_segment(segment_index_t s, const Allocator& allocator) {
215  typedef typename tbb::internal::allocator_rebind<Allocator, bucket>::type bucket_allocator_type;
216  typedef tbb::internal::allocator_traits<bucket_allocator_type> bucket_allocator_traits;
217  bucket_allocator_type bucket_allocator(allocator);
218  segment_ptr_t buckets_ptr = my_table[s];
219  size_type sz = segment_size( s ? s : 1 );
220 
221  if( s >= first_block) // the first segment or the next
222  bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr, sz);
223  else if( s == embedded_block && embedded_block != first_block )
224  bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr,
225  segment_size(first_block) - embedded_buckets);
226  if( s >= embedded_block ) my_table[s] = 0;
227  }
228 
230  bucket *get_bucket( hashcode_t h ) const throw() { // TODO: add throw() everywhere?
231  segment_index_t s = segment_index_of( h );
232  h -= segment_base(s);
233  segment_ptr_t seg = my_table[s];
234  __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
235  return &seg[h];
236  }
237 
238  // internal serial rehashing helper
239  void mark_rehashed_levels( hashcode_t h ) throw () {
240  segment_index_t s = segment_index_of( h );
241  while( segment_ptr_t seg = my_table[++s] )
242  if( seg[h].node_list == rehash_req ) {
243  seg[h].node_list = empty_rehashed;
244  mark_rehashed_levels( h + ((hashcode_t)1<<s) ); // optimized segment_base(s)
245  }
246  }
247 
249  // Splitting into two functions should help inlining
250  inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const {
251  hashcode_t m_now, m_old = m;
252  m_now = (hashcode_t) itt_load_word_with_acquire( my_mask );
253  if( m_old != m_now )
254  return check_rehashing_collision( h, m_old, m = m_now );
255  return false;
256  }
257 
259  bool check_rehashing_collision( const hashcode_t h, hashcode_t m_old, hashcode_t m ) const {
260  __TBB_ASSERT(m_old != m, NULL); // TODO?: m arg could be optimized out by passing h = h&m
261  if( (h & m_old) != (h & m) ) { // mask changed for this hashcode, rare event
262  // condition above proves that 'h' has some other bits set beside 'm_old'
263  // find next applicable mask after m_old //TODO: look at bsl instruction
264  for( ++m_old; !(h & m_old); m_old <<= 1 ) // at maximum few rounds depending on the first block size
265  ;
266  m_old = (m_old<<1) - 1; // get full mask from a bit
267  __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
268  // check whether it is rehashing/ed
269  if( itt_load_word_with_acquire(get_bucket(h & m_old)->node_list) != rehash_req )
270  {
271 #if __TBB_STATISTICS
272  my_info_restarts++; // race collisions
273 #endif
274  return true;
275  }
276  }
277  return false;
278  }
279 
281  segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) {
282  size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted
283  add_to_bucket( b, n );
284  // check load factor
285  if( sz >= mask ) { // TODO: add custom load_factor
286  segment_index_t new_seg = __TBB_Log2( mask+1 ); //optimized segment_index_of
287  __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated");
288  static const segment_ptr_t is_allocating = (segment_ptr_t)2;
289  if( !itt_hide_load_word(my_table[new_seg])
290  && as_atomic(my_table[new_seg]).compare_and_swap(is_allocating, NULL) == NULL )
291  return new_seg; // The value must be processed
292  }
293  return 0;
294  }
295 
297  template<typename Allocator>
298  void reserve(size_type buckets, const Allocator& allocator) {
299  if( !buckets-- ) return;
300  bool is_initial = !my_size;
301  for( size_type m = my_mask; buckets > m; m = my_mask )
302  enable_segment( segment_index_of( m+1 ), allocator, is_initial );
303  }
306  using std::swap;
307  swap(this->my_mask, table.my_mask);
308  swap(this->my_size, table.my_size);
309  for(size_type i = 0; i < embedded_buckets; i++)
310  swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
311  for(size_type i = embedded_block; i < pointers_per_table; i++)
312  swap(this->my_table[i], table.my_table[i]);
313  }
314 
315 #if __TBB_CPP11_RVALUE_REF_PRESENT
316  void internal_move(hash_map_base&& other) {
317  my_mask = other.my_mask;
318  other.my_mask = embedded_buckets - 1;
319  my_size = other.my_size;
320  other.my_size = 0;
321 
322  for(size_type i = 0; i < embedded_buckets; ++i) {
323  my_embedded_segment[i].node_list = other.my_embedded_segment[i].node_list;
324  other.my_embedded_segment[i].node_list = NULL;
325  }
326 
327  for(size_type i = embedded_block; i < pointers_per_table; ++i) {
328  my_table[i] = other.my_table[i];
329  other.my_table[i] = NULL;
330  }
331  }
332 #endif // __TBB_CPP11_RVALUE_REF_PRESENT
333  };
334 
335  template<typename Iterator>
337 
339 
341  template<typename Container, typename Value>
343  : public std::iterator<std::forward_iterator_tag,Value>
344  {
345  typedef Container map_type;
346  typedef typename Container::node node;
349 
350  template<typename C, typename T, typename U>
351  friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
352 
353  template<typename C, typename T, typename U>
354  friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
355 
356  template<typename C, typename T, typename U>
357  friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
358 
359  template<typename C, typename U>
360  friend class hash_map_iterator;
361 
362  template<typename I>
363  friend class hash_map_range;
364 
365  void advance_to_next_bucket() { // TODO?: refactor to iterator_base class
366  size_t k = my_index+1;
367  __TBB_ASSERT( my_bucket, "advancing an invalid iterator?");
368  while( k <= my_map->my_mask ) {
369  // Following test uses 2's-complement wizardry
370  if( k&(k-2) ) // not the beginning of a segment
371  ++my_bucket;
372  else my_bucket = my_map->get_bucket( k );
373  my_node = static_cast<node*>( my_bucket->node_list );
374  if( hash_map_base::is_valid(my_node) ) {
375  my_index = k; return;
376  }
377  ++k;
378  }
379  my_bucket = 0; my_node = 0; my_index = k; // the end
380  }
381 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
382  template<typename Key, typename T, typename HashCompare, typename A>
384 #else
385  public: // workaround
386 #endif
387  const Container *my_map;
389 
391  size_t my_index;
392 
394  const bucket *my_bucket;
395 
397  node *my_node;
398 
399  hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n );
400 
401  public:
403  hash_map_iterator(): my_map(), my_index(), my_bucket(), my_node() {}
405  my_map(other.my_map),
406  my_index(other.my_index),
407  my_bucket(other.my_bucket),
408  my_node(other.my_node)
409  {}
410  Value& operator*() const {
411  __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
412  return my_node->value();
413  }
414  Value* operator->() const {return &operator*();}
415  hash_map_iterator& operator++();
416 
419  hash_map_iterator old(*this);
420  operator++();
421  return old;
422  }
423  };
424 
425  template<typename Container, typename Value>
426  hash_map_iterator<Container,Value>::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) :
427  my_map(&map),
428  my_index(index),
429  my_bucket(b),
430  my_node( static_cast<node*>(n) )
431  {
432  if( b && !hash_map_base::is_valid(n) )
434  }
435 
436  template<typename Container, typename Value>
438  my_node = static_cast<node*>( my_node->next );
440  return *this;
441  }
442 
443  template<typename Container, typename T, typename U>
445  return i.my_node == j.my_node && i.my_map == j.my_map;
446  }
447 
448  template<typename Container, typename T, typename U>
450  return i.my_node != j.my_node || i.my_map != j.my_map;
451  }
452 
454 
455  template<typename Iterator>
456  class hash_map_range {
457  typedef typename Iterator::map_type map_type;
458  Iterator my_begin;
459  Iterator my_end;
460  mutable Iterator my_midpoint;
461  size_t my_grainsize;
463  void set_midpoint() const;
464  template<typename U> friend class hash_map_range;
465  public:
467  typedef std::size_t size_type;
468  typedef typename Iterator::value_type value_type;
469  typedef typename Iterator::reference reference;
470  typedef typename Iterator::difference_type difference_type;
471  typedef Iterator iterator;
472 
474  bool empty() const {return my_begin==my_end;}
475 
477  bool is_divisible() const {
478  return my_midpoint!=my_end;
479  }
482  my_end(r.my_end),
483  my_grainsize(r.my_grainsize)
484  {
485  r.my_end = my_begin = r.my_midpoint;
486  __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
487  __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
488  set_midpoint();
489  r.set_midpoint();
490  }
492  template<typename U>
494  my_begin(r.my_begin),
495  my_end(r.my_end),
496  my_midpoint(r.my_midpoint),
497  my_grainsize(r.my_grainsize)
498  {}
500  hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :
501  my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
502  my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
503  my_grainsize( grainsize_ )
504  {
505  __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
506  set_midpoint();
507  }
508  const Iterator& begin() const {return my_begin;}
509  const Iterator& end() const {return my_end;}
511  size_type grainsize() const {return my_grainsize;}
512  };
513 
514  template<typename Iterator>
516  // Split by groups of nodes
517  size_t m = my_end.my_index-my_begin.my_index;
518  if( m > my_grainsize ) {
519  m = my_begin.my_index + m/2u;
520  hash_map_base::bucket *b = my_begin.my_map->get_bucket(m);
521  my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list);
522  } else {
523  my_midpoint = my_end;
524  }
525  __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
526  "my_begin is after my_midpoint" );
527  __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
528  "my_midpoint is after my_end" );
529  __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
530  "[my_begin, my_midpoint) range should not be empty" );
531  }
532 
533  } // internal
535 
536 #if _MSC_VER && !defined(__INTEL_COMPILER)
537  // Suppress "conditional expression is constant" warning.
538  #pragma warning( push )
539  #pragma warning( disable: 4127 )
540 #endif
541 
543 
572 template<typename Key, typename T, typename HashCompare, typename Allocator>
573 class concurrent_hash_map : protected internal::hash_map_base {
574  template<typename Container, typename Value>
575  friend class internal::hash_map_iterator;
576 
577  template<typename I>
578  friend class internal::hash_map_range;
579 
580 public:
581  typedef Key key_type;
582  typedef T mapped_type;
583  typedef std::pair<const Key,T> value_type;
585  typedef ptrdiff_t difference_type;
586  typedef value_type *pointer;
587  typedef const value_type *const_pointer;
588  typedef value_type &reference;
589  typedef const value_type &const_reference;
590  typedef internal::hash_map_iterator<concurrent_hash_map,value_type> iterator;
591  typedef internal::hash_map_iterator<concurrent_hash_map,const value_type> const_iterator;
592  typedef internal::hash_map_range<iterator> range_type;
593  typedef internal::hash_map_range<const_iterator> const_range_type;
594  typedef Allocator allocator_type;
595 
596 protected:
597  friend class const_accessor;
598  class node;
601  node_allocator_type my_allocator;
602  HashCompare my_hash_compare;
603 
604  class node : public node_base {
606  public:
607  value_type* storage() { return my_value.begin(); }
608  value_type& value() { return *storage(); }
609  };
610 
611  void delete_node( node_base *n ) {
612  node_allocator_traits::destroy(my_allocator, static_cast<node*>(n)->storage());
613  node_allocator_traits::destroy(my_allocator, static_cast<node*>(n));
614  node_allocator_traits::deallocate(my_allocator, static_cast<node*>(n), 1);
615  }
616 
619  node_allocator_type& my_alloc;
620 
621  node_scoped_guard(node* n, node_allocator_type& alloc) : my_node(n), my_alloc(alloc) {}
623  if(my_node) {
624  node_allocator_traits::destroy(my_alloc, my_node);
625  node_allocator_traits::deallocate(my_alloc, my_node, 1);
626  }
627  }
628  void dismiss() { my_node = NULL; }
629  };
630 
631 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
632  template<typename... Args>
633  static node* create_node(node_allocator_type& allocator, Args&&... args)
634 #else
635  template<typename Arg1, typename Arg2>
636  static node* create_node(node_allocator_type& allocator, __TBB_FORWARDING_REF(Arg1) arg1, __TBB_FORWARDING_REF(Arg2) arg2)
637 #endif
638  {
639  node* node_ptr = node_allocator_traits::allocate(allocator, 1);
640  node_scoped_guard guard(node_ptr, allocator);
641  node_allocator_traits::construct(allocator, node_ptr);
642 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
643  node_allocator_traits::construct(allocator, node_ptr->storage(), std::forward<Args>(args)...);
644 #else
645  node_allocator_traits::construct(allocator, node_ptr->storage(), tbb::internal::forward<Arg1>(arg1), tbb::internal::forward<Arg2>(arg2));
646 #endif
647  guard.dismiss();
648  return node_ptr;
649  }
650 
651  static node* allocate_node_copy_construct(node_allocator_type& allocator, const Key &key, const T * t){
652  return create_node(allocator, key, *t);
653  }
654 
655 #if __TBB_CPP11_RVALUE_REF_PRESENT
656  static node* allocate_node_move_construct(node_allocator_type& allocator, const Key &key, const T * t){
657  return create_node(allocator, key, std::move(*const_cast<T*>(t)));
658  }
659 #endif
660 
661  static node* allocate_node_default_construct(node_allocator_type& allocator, const Key &key, const T * ){
662 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
663  // Emplace construct an empty T object inside the pair
664  return create_node(allocator, std::piecewise_construct,
665  std::forward_as_tuple(key), std::forward_as_tuple());
666 #else
667  T obj; // Use of temporary object in impossible, because create_node takes non-const reference
668  return create_node(allocator, key, tbb::internal::move(obj));
669 #endif
670  }
671 
672  static node* do_not_allocate_node(node_allocator_type& , const Key &, const T * ){
673  __TBB_ASSERT(false,"this dummy function should not be called");
674  return NULL;
675  }
676 
677  node *search_bucket( const key_type &key, bucket *b ) const {
678  node *n = static_cast<node*>( b->node_list );
679  while( is_valid(n) && !my_hash_compare.equal(key, n->value().first) )
680  n = static_cast<node*>( n->next );
681  __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket");
682  return n;
683  }
684 
688  public:
689  bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); }
691  inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) {
692  my_b = base->get_bucket( h );
693  // TODO: actually, notification is unnecessary here, just hiding double-check
694  if( itt_load_word_with_acquire(my_b->node_list) == internal::rehash_req
695  && try_acquire( my_b->mutex, /*write=*/true ) )
696  {
697  if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h ); //recursive rehashing
698  }
699  else bucket::scoped_t::acquire( my_b->mutex, writer );
700  __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL);
701  }
705  bucket *operator() () { return my_b; }
706  };
707 
708  // TODO refactor to hash_base
709  void rehash_bucket( bucket *b_new, const hashcode_t h ) {
710  __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)");
711  __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
713  hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
714 #if __TBB_STATISTICS
715  my_info_rehashes++; // invocations of rehash_bucket
716 #endif
717 
718  bucket_accessor b_old( this, h & mask );
719 
720  mask = (mask<<1) | 1; // get full mask for new bucket
721  __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
722  restart:
723  for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
724  hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->value().first );
725 #if TBB_USE_ASSERT
726  hashcode_t bmask = h & (mask>>1);
727  bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket
728  __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" );
729 #endif
730  if( (c & mask) == h ) {
731  if( !b_old.is_writer() )
732  if( !b_old.upgrade_to_writer() ) {
733  goto restart; // node ptr can be invalid due to concurrent erase
734  }
735  *p = n->next; // exclude from b_old
736  add_to_bucket( b_new, n );
737  } else p = &n->next; // iterate to next item
738  }
739  }
740 
743  call_clear_on_leave( concurrent_hash_map* a_ch_map ) : my_ch_map(a_ch_map) {}
744  void dismiss() {my_ch_map = 0;}
746  if (my_ch_map){
747  my_ch_map->clear();
748  }
749  }
750  };
751 public:
752 
753  class accessor;
755  class const_accessor : private node::scoped_t /*which derived from no_copy*/ {
756  friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
757  friend class accessor;
758  public:
761 
763  bool empty() const { return !my_node; }
764 
766  void release() {
767  if( my_node ) {
769  my_node = 0;
770  }
771  }
772 
774  const_reference operator*() const {
775  __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
776  return my_node->value();
777  }
778 
780  const_pointer operator->() const {
781  return &operator*();
782  }
783 
785  const_accessor() : my_node(NULL) {}
786 
789  my_node = NULL; // scoped lock's release() is called in its destructor
790  }
791  protected:
792  bool is_writer() { return node::scoped_t::is_writer; }
794  hashcode_t my_hash;
795  };
796 
798  class accessor: public const_accessor {
799  public:
802 
804  reference operator*() const {
805  __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
806  return this->my_node->value();
807  }
808 
810  pointer operator->() const {
811  return &operator*();
812  }
813  };
814 
816  explicit concurrent_hash_map( const allocator_type &a = allocator_type() )
817  : internal::hash_map_base(), my_allocator(a)
818  {}
819 
820  explicit concurrent_hash_map( const HashCompare& compare, const allocator_type& a = allocator_type() )
821  : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
822  {}
823 
825  concurrent_hash_map( size_type n, const allocator_type &a = allocator_type() )
826  : internal::hash_map_base(), my_allocator(a)
827  {
828  reserve( n, my_allocator );
829  }
830 
831  concurrent_hash_map( size_type n, const HashCompare& compare, const allocator_type& a = allocator_type() )
832  : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
833  {
834  reserve( n, my_allocator );
835  }
836 
839  : internal::hash_map_base(),
840  my_allocator(node_allocator_traits::select_on_container_copy_construction(table.get_allocator()))
841  {
842  call_clear_on_leave scope_guard(this);
843  internal_copy(table);
844  scope_guard.dismiss();
845  }
846 
847  concurrent_hash_map( const concurrent_hash_map &table, const allocator_type &a)
848  : internal::hash_map_base(), my_allocator(a)
849  {
850  call_clear_on_leave scope_guard(this);
851  internal_copy(table);
852  scope_guard.dismiss();
853  }
854 
855 #if __TBB_CPP11_RVALUE_REF_PRESENT
858  : internal::hash_map_base(), my_allocator(std::move(table.get_allocator()))
859  {
860  internal_move(std::move(table));
861  }
862 
864  concurrent_hash_map( concurrent_hash_map &&table, const allocator_type &a )
865  : internal::hash_map_base(), my_allocator(a)
866  {
867  if (a == table.get_allocator()){
868  internal_move(std::move(table));
869  }else{
870  call_clear_on_leave scope_guard(this);
871  internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end()), table.size());
872  scope_guard.dismiss();
873  }
874  }
875 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
876 
878  template<typename I>
879  concurrent_hash_map( I first, I last, const allocator_type &a = allocator_type() )
880  : internal::hash_map_base(), my_allocator(a)
881  {
882  call_clear_on_leave scope_guard(this);
883  internal_copy(first, last, std::distance(first, last));
884  scope_guard.dismiss();
885  }
886 
887  template<typename I>
888  concurrent_hash_map( I first, I last, const HashCompare& compare, const allocator_type& a = allocator_type() )
889  : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
890  {
891  call_clear_on_leave scope_guard(this);
892  internal_copy(first, last, std::distance(first, last));
893  scope_guard.dismiss();
894  }
895 
896 #if __TBB_INITIALIZER_LISTS_PRESENT
897  concurrent_hash_map( std::initializer_list<value_type> il, const allocator_type &a = allocator_type() )
899  : internal::hash_map_base(), my_allocator(a)
900  {
901  call_clear_on_leave scope_guard(this);
902  internal_copy(il.begin(), il.end(), il.size());
903  scope_guard.dismiss();
904  }
905 
906  concurrent_hash_map( std::initializer_list<value_type> il, const HashCompare& compare, const allocator_type& a = allocator_type() )
907  : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
908  {
909  call_clear_on_leave scope_guard(this);
910  internal_copy(il.begin(), il.end(), il.size());
911  scope_guard.dismiss();
912  }
913 
914 #endif //__TBB_INITIALIZER_LISTS_PRESENT
915 
918  if( this!=&table ) {
920  clear();
921  tbb::internal::allocator_copy_assignment(my_allocator, table.my_allocator, pocca_type());
922  internal_copy(table);
923  }
924  return *this;
925  }
926 
927 #if __TBB_CPP11_RVALUE_REF_PRESENT
928  concurrent_hash_map& operator=( concurrent_hash_map &&table ) {
930  if(this != &table) {
932  internal_move_assign(std::move(table), pocma_type());
933  }
934  return *this;
935  }
936 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
937 
938 #if __TBB_INITIALIZER_LISTS_PRESENT
939  concurrent_hash_map& operator=( std::initializer_list<value_type> il ) {
941  clear();
942  internal_copy(il.begin(), il.end(), il.size());
943  return *this;
944  }
945 #endif //__TBB_INITIALIZER_LISTS_PRESENT
946 
947 
949 
951  void rehash(size_type n = 0);
952 
954  void clear();
955 
957  ~concurrent_hash_map() { clear(); }
958 
959  //------------------------------------------------------------------------
960  // Parallel algorithm support
961  //------------------------------------------------------------------------
962  range_type range( size_type grainsize=1 ) {
963  return range_type( *this, grainsize );
964  }
965  const_range_type range( size_type grainsize=1 ) const {
966  return const_range_type( *this, grainsize );
967  }
968 
969  //------------------------------------------------------------------------
970  // STL support - not thread-safe methods
971  //------------------------------------------------------------------------
972  iterator begin() { return iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); }
973  iterator end() { return iterator( *this, 0, 0, 0 ); }
974  const_iterator begin() const { return const_iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); }
975  const_iterator end() const { return const_iterator( *this, 0, 0, 0 ); }
976  std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range( key, end() ); }
977  std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range( key, end() ); }
978 
980  size_type size() const { return my_size; }
981 
983  bool empty() const { return my_size == 0; }
984 
986  size_type max_size() const {return (~size_type(0))/sizeof(node);}
987 
989  size_type bucket_count() const { return my_mask+1; }
990 
992  allocator_type get_allocator() const { return this->my_allocator; }
993 
995  void swap( concurrent_hash_map &table );
996 
997  //------------------------------------------------------------------------
998  // concurrent map operations
999  //------------------------------------------------------------------------
1000 
1002  size_type count( const Key &key ) const {
1003  return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, NULL, /*write=*/false, &do_not_allocate_node );
1004  }
1005 
1007 
1008  bool find( const_accessor &result, const Key &key ) const {
1009  result.release();
1010  return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, &result, /*write=*/false, &do_not_allocate_node );
1011  }
1012 
1014 
1015  bool find( accessor &result, const Key &key ) {
1016  result.release();
1017  return lookup(/*insert*/false, key, NULL, &result, /*write=*/true, &do_not_allocate_node );
1018  }
1019 
1021 
1022  bool insert( const_accessor &result, const Key &key ) {
1023  result.release();
1024  return lookup(/*insert*/true, key, NULL, &result, /*write=*/false, &allocate_node_default_construct );
1025  }
1026 
1028 
1029  bool insert( accessor &result, const Key &key ) {
1030  result.release();
1031  return lookup(/*insert*/true, key, NULL, &result, /*write=*/true, &allocate_node_default_construct );
1032  }
1033 
1035 
1036  bool insert( const_accessor &result, const value_type &value ) {
1037  result.release();
1038  return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/false, &allocate_node_copy_construct );
1039  }
1040 
1042 
1043  bool insert( accessor &result, const value_type &value ) {
1044  result.release();
1045  return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/true, &allocate_node_copy_construct );
1046  }
1047 
1049 
1050  bool insert( const value_type &value ) {
1051  return lookup(/*insert*/true, value.first, &value.second, NULL, /*write=*/false, &allocate_node_copy_construct );
1052  }
1053 
1054 #if __TBB_CPP11_RVALUE_REF_PRESENT
1055 
1057  bool insert( const_accessor &result, value_type && value ) {
1058  return generic_move_insert(result, std::move(value));
1059  }
1060 
1062 
1063  bool insert( accessor &result, value_type && value ) {
1064  return generic_move_insert(result, std::move(value));
1065  }
1066 
1068 
1069  bool insert( value_type && value ) {
1070  return generic_move_insert(accessor_not_used(), std::move(value));
1071  }
1072 
1073 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1074 
1076  template<typename... Args>
1077  bool emplace( const_accessor &result, Args&&... args ) {
1078  return generic_emplace(result, std::forward<Args>(args)...);
1079  }
1080 
1082 
1083  template<typename... Args>
1084  bool emplace( accessor &result, Args&&... args ) {
1085  return generic_emplace(result, std::forward<Args>(args)...);
1086  }
1087 
1089 
1090  template<typename... Args>
1091  bool emplace( Args&&... args ) {
1092  return generic_emplace(accessor_not_used(), std::forward<Args>(args)...);
1093  }
1094 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1095 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
1096 
1098  template<typename I>
1099  void insert( I first, I last ) {
1100  for ( ; first != last; ++first )
1101  insert( *first );
1102  }
1103 
1104 #if __TBB_INITIALIZER_LISTS_PRESENT
1105  void insert( std::initializer_list<value_type> il ) {
1107  insert( il.begin(), il.end() );
1108  }
1109 #endif //__TBB_INITIALIZER_LISTS_PRESENT
1110 
1112 
1113  bool erase( const Key& key );
1114 
1116 
1117  bool erase( const_accessor& item_accessor ) {
1118  return exclude( item_accessor );
1119  }
1120 
1122 
1123  bool erase( accessor& item_accessor ) {
1124  return exclude( item_accessor );
1125  }
1126 
1127 protected:
1129  bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key &, const T * ), node *tmp_n = 0 ) ;
1130 
1131  struct accessor_not_used { void release(){}};
1132  friend const_accessor* accessor_location( accessor_not_used const& ){ return NULL;}
1133  friend const_accessor* accessor_location( const_accessor & a ) { return &a;}
1134 
1135  friend bool is_write_access_needed( accessor const& ) { return true;}
1136  friend bool is_write_access_needed( const_accessor const& ) { return false;}
1137  friend bool is_write_access_needed( accessor_not_used const& ) { return false;}
1138 
1139 #if __TBB_CPP11_RVALUE_REF_PRESENT
1140  template<typename Accessor>
1141  bool generic_move_insert( Accessor && result, value_type && value ) {
1142  result.release();
1143  return lookup(/*insert*/true, value.first, &value.second, accessor_location(result), is_write_access_needed(result), &allocate_node_move_construct );
1144  }
1145 
1146 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1147  template<typename Accessor, typename... Args>
1148  bool generic_emplace( Accessor && result, Args &&... args ) {
1149  result.release();
1150  node * node_ptr = create_node(my_allocator, std::forward<Args>(args)...);
1151  return lookup(/*insert*/true, node_ptr->value().first, NULL, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr );
1152  }
1153 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1154 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
1155 
1157  bool exclude( const_accessor &item_accessor );
1158 
1160  template<typename I>
1161  std::pair<I, I> internal_equal_range( const Key& key, I end ) const;
1162 
1164  void internal_copy( const concurrent_hash_map& source );
1165 
1166  template<typename I>
1167  void internal_copy( I first, I last, size_type reserve_size );
1168 
1169 #if __TBB_CPP11_RVALUE_REF_PRESENT
1170  // A compile-time dispatch to allow move assignment of containers with non-movable value_type if POCMA is true_type
1173  internal_move(std::move(other));
1174  }
1175 
1177  if (this->my_allocator == other.my_allocator) {
1178  internal_move(std::move(other));
1179  } else {
1180  //do per element move
1181  internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()), other.size());
1182  }
1183  }
1184 #endif
1185 
1187 
1189  const_pointer internal_fast_find( const Key& key ) const {
1190  hashcode_t h = my_hash_compare.hash( key );
1191  hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1192  node *n;
1193  restart:
1194  __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1195  bucket *b = get_bucket( h & m );
1196  // TODO: actually, notification is unnecessary here, just hiding double-check
1198  {
1200  if( lock.try_acquire( b->mutex, /*write=*/true ) ) {
1201  if( b->node_list == internal::rehash_req)
1202  const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m ); //recursive rehashing
1203  }
1204  else lock.acquire( b->mutex, /*write=*/false );
1206  }
1207  n = search_bucket( key, b );
1208  if( n )
1209  return &n->item;
1210  else if( check_mask_race( h, m ) )
1211  goto restart;
1212  return 0;
1213  }
1214 };
1215 
1216 #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1217 namespace internal {
1218 using namespace tbb::internal;
1219 
1220 template<template<typename...> typename Map, typename Key, typename T, typename... Args>
1221 using hash_map_t = Map<
1222  Key, T,
1223  std::conditional_t< (sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >,
1224  pack_element_t<0, Args...>, tbb_hash_compare<Key> >,
1225  std::conditional_t< (sizeof...(Args)>0) && is_allocator_v< pack_element_t<sizeof...(Args)-1, Args...> >,
1226  pack_element_t<sizeof...(Args)-1, Args...>, tbb_allocator<std::pair<const Key, T> > >
1227 >;
1228 }
1229 
1230 // Deduction guide for the constructor from two iterators and hash_compare/ allocator
1231 template<typename I, typename... Args>
1232 concurrent_hash_map(I, I, Args...)
1233 -> internal::hash_map_t<concurrent_hash_map, internal::iterator_key_t<I>,internal::iterator_mapped_t<I>, Args...>;
1234 
1235 // Deduction guide for the constructor from an initializer_list and hash_compare/ allocator
1236 // Deduction guide for an initializer_list, hash_compare and allocator is implicit
1237 template<typename Key, typename T, typename CompareOrAllocator>
1238 concurrent_hash_map(std::initializer_list<std::pair<const Key, T>>, CompareOrAllocator)
1239 -> internal::hash_map_t<concurrent_hash_map, Key, T, CompareOrAllocator>;
1240 
1241 #endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */
1242 
1243 template<typename Key, typename T, typename HashCompare, typename A>
1244 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key&, const T*), node *tmp_n ) {
1245  __TBB_ASSERT( !result || !result->my_node, NULL );
1246  bool return_value;
1247  hashcode_t const h = my_hash_compare.hash( key );
1248  hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1249  segment_index_t grow_segment = 0;
1250  node *n;
1251  restart:
1252  {//lock scope
1253  __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1254  return_value = false;
1255  // get bucket
1256  bucket_accessor b( this, h & m );
1257 
1258  // find a node
1259  n = search_bucket( key, b() );
1260  if( op_insert ) {
1261  // [opt] insert a key
1262  if( !n ) {
1263  if( !tmp_n ) {
1264  tmp_n = allocate_node(my_allocator, key, t);
1265  }
1266  if( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion
1267  // Rerun search_list, in case another thread inserted the item during the upgrade.
1268  n = search_bucket( key, b() );
1269  if( is_valid(n) ) { // unfortunately, it did
1270  b.downgrade_to_reader();
1271  goto exists;
1272  }
1273  }
1274  if( check_mask_race(h, m) )
1275  goto restart; // b.release() is done in ~b().
1276  // insert and set flag to grow the container
1277  grow_segment = insert_new_node( b(), n = tmp_n, m );
1278  tmp_n = 0;
1279  return_value = true;
1280  }
1281  } else { // find or count
1282  if( !n ) {
1283  if( check_mask_race( h, m ) )
1284  goto restart; // b.release() is done in ~b(). TODO: replace by continue
1285  return false;
1286  }
1287  return_value = true;
1288  }
1289  exists:
1290  if( !result ) goto check_growth;
1291  // TODO: the following seems as generic/regular operation
1292  // acquire the item
1293  if( !result->try_acquire( n->mutex, write ) ) {
1294  for( tbb::internal::atomic_backoff backoff(true);; ) {
1295  if( result->try_acquire( n->mutex, write ) ) break;
1296  if( !backoff.bounded_pause() ) {
1297  // the wait takes really long, restart the operation
1298  b.release();
1299  __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" );
1300  __TBB_Yield();
1301  m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1302  goto restart;
1303  }
1304  }
1305  }
1306  }//lock scope
1307  result->my_node = n;
1308  result->my_hash = h;
1309 check_growth:
1310  // [opt] grow the container
1311  if( grow_segment ) {
1312 #if __TBB_STATISTICS
1313  my_info_resizes++; // concurrent ones
1314 #endif
1315  enable_segment( grow_segment, my_allocator );
1316  }
1317  if( tmp_n ) // if op_insert only
1318  delete_node( tmp_n );
1319  return return_value;
1320 }
1321 
1322 template<typename Key, typename T, typename HashCompare, typename A>
1323 template<typename I>
1324 std::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end_ ) const {
1325  hashcode_t h = my_hash_compare.hash( key );
1326  hashcode_t m = my_mask;
1327  __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1328  h &= m;
1329  bucket *b = get_bucket( h );
1330  while( b->node_list == internal::rehash_req ) {
1331  m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1332  b = get_bucket( h &= m );
1333  }
1334  node *n = search_bucket( key, b );
1335  if( !n )
1336  return std::make_pair(end_, end_);
1337  iterator lower(*this, h, b, n), upper(lower);
1338  return std::make_pair(lower, ++upper);
1339 }
1340 
1341 template<typename Key, typename T, typename HashCompare, typename A>
1343  __TBB_ASSERT( item_accessor.my_node, NULL );
1344  node_base *const n = item_accessor.my_node;
1345  hashcode_t const h = item_accessor.my_hash;
1346  hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1347  do {
1348  // get bucket
1349  bucket_accessor b( this, h & m, /*writer=*/true );
1350  node_base **p = &b()->node_list;
1351  while( *p && *p != n )
1352  p = &(*p)->next;
1353  if( !*p ) { // someone else was first
1354  if( check_mask_race( h, m ) )
1355  continue;
1356  item_accessor.release();
1357  return false;
1358  }
1359  __TBB_ASSERT( *p == n, NULL );
1360  *p = n->next; // remove from container
1361  my_size--;
1362  break;
1363  } while(true);
1364  if( !item_accessor.is_writer() ) // need to get exclusive lock
1365  item_accessor.upgrade_to_writer(); // return value means nothing here
1366  item_accessor.release();
1367  delete_node( n ); // Only one thread can delete it
1368  return true;
1369 }
1370 
1371 template<typename Key, typename T, typename HashCompare, typename A>
1373  node_base *n;
1374  hashcode_t const h = my_hash_compare.hash( key );
1375  hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1376 restart:
1377  {//lock scope
1378  // get bucket
1379  bucket_accessor b( this, h & m );
1380  search:
1381  node_base **p = &b()->node_list;
1382  n = *p;
1383  while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->value().first ) ) {
1384  p = &n->next;
1385  n = *p;
1386  }
1387  if( !n ) { // not found, but mask could be changed
1388  if( check_mask_race( h, m ) )
1389  goto restart;
1390  return false;
1391  }
1392  else if( !b.is_writer() && !b.upgrade_to_writer() ) {
1393  if( check_mask_race( h, m ) ) // contended upgrade, check mask
1394  goto restart;
1395  goto search;
1396  }
1397  *p = n->next;
1398  my_size--;
1399  }
1400  {
1401  typename node::scoped_t item_locker( n->mutex, /*write=*/true );
1402  }
1403  // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor!
1404  delete_node( n ); // Only one thread can delete it due to write lock on the bucket
1405  return true;
1406 }
1407 
1408 template<typename Key, typename T, typename HashCompare, typename A>
1410  typedef typename node_allocator_traits::propagate_on_container_swap pocs_type;
1411  if (this != &table && (pocs_type::value || my_allocator == table.my_allocator)) {
1412  using std::swap;
1413  tbb::internal::allocator_swap(this->my_allocator, table.my_allocator, pocs_type());
1414  swap(this->my_hash_compare, table.my_hash_compare);
1415  internal_swap(table);
1416  }
1417 }
1418 
1419 template<typename Key, typename T, typename HashCompare, typename A>
1421  reserve( sz, my_allocator ); // TODO: add reduction of number of buckets as well
1422  hashcode_t mask = my_mask;
1423  hashcode_t b = (mask+1)>>1; // size or first index of the last segment
1424  __TBB_ASSERT((b&(b-1))==0, NULL); // zero or power of 2
1425  bucket *bp = get_bucket( b ); // only the last segment should be scanned for rehashing
1426  for(; b <= mask; b++, bp++ ) {
1427  node_base *n = bp->node_list;
1428  __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1429  __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1430  if( n == internal::rehash_req ) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one
1431  hashcode_t h = b; bucket *b_old = bp;
1432  do {
1433  __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
1434  hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1435  b_old = get_bucket( h &= m );
1436  } while( b_old->node_list == internal::rehash_req );
1437  // now h - is index of the root rehashed bucket b_old
1438  mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments
1439  for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
1440  hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->value().first );
1441  if( (c & mask) != h ) { // should be rehashed
1442  *p = q->next; // exclude from b_old
1443  bucket *b_new = get_bucket( c & mask );
1444  __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" );
1445  add_to_bucket( b_new, q );
1446  } else p = &q->next; // iterate to next item
1447  }
1448  }
1449  }
1450 #if TBB_USE_PERFORMANCE_WARNINGS
1451  int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1452  static bool reported = false;
1453 #endif
1454 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1455  for( b = 0; b <= mask; b++ ) {// only last segment should be scanned for rehashing
1456  if( b & (b-2) ) ++bp; // not the beginning of a segment
1457  else bp = get_bucket( b );
1458  node_base *n = bp->node_list;
1459  __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1460  __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" );
1461 #if TBB_USE_PERFORMANCE_WARNINGS
1462  if( n == internal::empty_rehashed ) empty_buckets++;
1463  else if( n->next ) overpopulated_buckets++;
1464 #endif
1465 #if TBB_USE_ASSERT
1466  for( ; is_valid(n); n = n->next ) {
1467  hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->value().first ) & mask;
1468  __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" );
1469  }
1470 #endif
1471  }
1472 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1473 #if TBB_USE_PERFORMANCE_WARNINGS
1474  if( buckets > current_size) empty_buckets -= buckets - current_size;
1475  else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1476  if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1478  "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1480  typeid(*this).name(),
1481 #else
1482  "concurrent_hash_map",
1483 #endif
1484  current_size, empty_buckets, overpopulated_buckets );
1485  reported = true;
1486  }
1487 #endif
1488 }
1489 
1490 template<typename Key, typename T, typename HashCompare, typename A>
1492  hashcode_t m = my_mask;
1493  __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1494 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1495 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1496  int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1497  static bool reported = false;
1498 #endif
1499  bucket *bp = 0;
1500  // check consistency
1501  for( segment_index_t b = 0; b <= m; b++ ) {
1502  if( b & (b-2) ) ++bp; // not the beginning of a segment
1503  else bp = get_bucket( b );
1504  node_base *n = bp->node_list;
1505  __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1506  __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" );
1507 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1508  if( n == internal::empty_rehashed ) empty_buckets++;
1509  else if( n == internal::rehash_req ) buckets--;
1510  else if( n->next ) overpopulated_buckets++;
1511 #endif
1512 #if __TBB_EXTRA_DEBUG
1513  for(; is_valid(n); n = n->next ) {
1514  hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->value().first );
1515  h &= m;
1516  __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" );
1517  }
1518 #endif
1519  }
1520 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1521 #if __TBB_STATISTICS
1522  printf( "items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d"
1523  " concurrent: resizes=%u rehashes=%u restarts=%u\n",
1524  current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets,
1525  unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) );
1526  my_info_resizes = 0; // concurrent ones
1527  my_info_restarts = 0; // race collisions
1528  my_info_rehashes = 0; // invocations of rehash_bucket
1529 #endif
1530  if( buckets > current_size) empty_buckets -= buckets - current_size;
1531  else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1532  if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1534  "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1536  typeid(*this).name(),
1537 #else
1538  "concurrent_hash_map",
1539 #endif
1540  current_size, empty_buckets, overpopulated_buckets );
1541  reported = true;
1542  }
1543 #endif
1544 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1545  my_size = 0;
1546  segment_index_t s = segment_index_of( m );
1547  __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" );
1548  do {
1549  __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" );
1550  segment_ptr_t buckets_ptr = my_table[s];
1551  size_type sz = segment_size( s ? s : 1 );
1552  for( segment_index_t i = 0; i < sz; i++ )
1553  for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
1554  buckets_ptr[i].node_list = n->next;
1555  delete_node( n );
1556  }
1557  delete_segment(s, my_allocator);
1558  } while(s-- > 0);
1559  my_mask = embedded_buckets - 1;
1560 }
1561 
1562 template<typename Key, typename T, typename HashCompare, typename A>
1564  hashcode_t mask = source.my_mask;
1565  if( my_mask == mask ) { // optimized version
1566  reserve( source.my_size, my_allocator ); // TODO: load_factor?
1567  bucket *dst = 0, *src = 0;
1568  bool rehash_required = false;
1569  for( hashcode_t k = 0; k <= mask; k++ ) {
1570  if( k & (k-2) ) ++dst,src++; // not the beginning of a segment
1571  else { dst = get_bucket( k ); src = source.get_bucket( k ); }
1572  __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table");
1573  node *n = static_cast<node*>( src->node_list );
1574  if( n == internal::rehash_req ) { // source is not rehashed, items are in previous buckets
1575  rehash_required = true;
1577  } else for(; n; n = static_cast<node*>( n->next ) ) {
1578  node* node_ptr = create_node(my_allocator, n->value().first, n->value().second);
1579  add_to_bucket( dst, node_ptr);
1580  ++my_size; // TODO: replace by non-atomic op
1581  }
1582  }
1583  if( rehash_required ) rehash();
1584  } else internal_copy( source.begin(), source.end(), source.my_size );
1585 }
1586 
1587 template<typename Key, typename T, typename HashCompare, typename A>
1588 template<typename I>
1589 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy(I first, I last, size_type reserve_size) {
1590  reserve( reserve_size, my_allocator ); // TODO: load_factor?
1591  hashcode_t m = my_mask;
1592  for(; first != last; ++first) {
1593  hashcode_t h = my_hash_compare.hash( (*first).first );
1594  bucket *b = get_bucket( h & m );
1595  __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table");
1596  node* node_ptr = create_node(my_allocator, (*first).first, (*first).second);
1597  add_to_bucket( b, node_ptr );
1598  ++my_size; // TODO: replace by non-atomic op
1599  }
1600 }
1601 
1602 } // namespace interface5
1603 
1605 
1606 
1607 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1609  if(a.size() != b.size()) return false;
1612  for(; i != i_end; ++i) {
1613  j = b.equal_range(i->first).first;
1614  if( j == j_end || !(i->second == j->second) ) return false;
1615  }
1616  return true;
1617 }
1618 
1619 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1621 { return !(a == b); }
1622 
1623 template<typename Key, typename T, typename HashCompare, typename A>
1625 { a.swap( b ); }
1626 
1627 #if _MSC_VER && !defined(__INTEL_COMPILER)
1628  #pragma warning( pop )
1629 #endif // warning 4127 is back
1630 
1631 } // namespace tbb
1632 
1633 #endif /* __TBB_concurrent_hash_map_H */
const_reference operator*() const
Return reference to associated value in hash table.
const concurrent_hash_map::value_type value_type
Type of value.
concurrent_hash_map(const HashCompare &compare, const allocator_type &a=allocator_type())
segment_index_t insert_new_node(bucket *b, node_base *n, hashcode_t mask)
Insert a node and check for load factor.
void itt_hide_store_word(T &dst, T src)
concurrent_hash_map(size_type n, const HashCompare &compare, const allocator_type &a=allocator_type())
range_type range(size_type grainsize=1)
static hash_map_node_base *const empty_rehashed
Rehashed empty bucket flag.
hash_map_iterator operator++(int)
Post increment.
friend bool operator==(const hash_map_iterator< C, T > &i, const hash_map_iterator< C, U > &j)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
static hash_map_node_base *const rehash_req
Incompleteness flag value.
bool operator==(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
void internal_swap(hash_map_base &table)
Swap hash_map_bases.
bool insert(value_type &&value)
Insert item by copying if there is no such key present already.
friend bool is_write_access_needed(accessor_not_used const &)
bool emplace(Args &&... args)
Insert item by copying if there is no such key present already.
size_t hashcode_t
Type of a hash code.
auto last(Container &c) -> decltype(begin(c))
concurrent_hash_map(I first, I last, const HashCompare &compare, const allocator_type &a=allocator_type())
size_type bucket_count() const
Returns the current number of buckets.
const_range_type range(size_type grainsize=1) const
reference operator*() const
Return reference to associated value in hash table.
bool erase(const Key &key)
Erase item.
bool is_writer
If mutex!=NULL, then is_writer is true if holding a writer lock, false if holding a reader lock...
Class that implements exponential backoff.
Definition: tbb_machine.h:348
atomic< size_type > my_size
Size of container in stored items.
bool insert(const_accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a read lock on the item...
concurrent_hash_map(concurrent_hash_map &&table, const allocator_type &a)
Move constructor.
Allows write access to elements and combines data access, locking, and garbage collection.
std::pair< iterator, iterator > equal_range(const Key &key)
size_t segment_index_t
Segment index type.
bucket my_embedded_segment[embedded_buckets]
Zero segment.
bool erase(accessor &item_accessor)
Erase item by accessor.
bucket accessor is to find, rehash, acquire a lock, and access a bucket
bool insert(const value_type &value)
Insert item by copying if there is no such key present already.
atomic< hashcode_t > my_mask
Hash mask = sum of allocated segment sizes - 1.
friend bool operator!=(const hash_map_iterator< C, T > &i, const hash_map_iterator< C, U > &j)
void const char const char int ITT_FORMAT __itt_group_sync s
hash_map_range(hash_map_range &r, split)
Split range.
size_t my_index
Index in hash table for current item.
void reserve(size_type buckets, const Allocator &allocator)
Prepare enough segments for number of buckets.
internal::hash_map_range< const_iterator > const_range_type
void internal_move_assign(concurrent_hash_map &&other, tbb::internal::traits_false_type)
allocator_type get_allocator() const
return allocator object
bool insert(const_accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a read lock on the item...
tbb::internal::allocator_traits< node_allocator_type > node_allocator_traits
pointer operator->() const
Return pointer to associated value in hash table.
allocator_traits< Alloc >::template rebind_alloc< T >::other type
size_type size() const
Number of items in table.
friend const_accessor * accessor_location(const_accessor &a)
void delete_segment(segment_index_t s, const Allocator &allocator)
auto first(Container &c) -> decltype(begin(c))
The graph class.
hash_map_iterator(const hash_map_iterator< Container, typename Container::value_type > &other)
~concurrent_hash_map()
Clear table and destroy it.
hash_compare that is default argument for concurrent_hash_map
void allocator_swap(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
hash_map_range(hash_map_range< U > &r)
type conversion
tbb::internal::allocator_rebind< Allocator, node >::type node_allocator_type
friend const_accessor * accessor_location(accessor_not_used const &)
bool is_divisible() const
True if range can be partitioned into two subranges.
bool emplace(const_accessor &result, Args &&... args)
Insert item by copying if there is no such key present already and acquire a read lock on the item...
size_type grainsize() const
The grain size for this range.
static void add_to_bucket(bucket *b, node_base *n)
Add node.
static node * allocate_node_move_construct(node_allocator_type &allocator, const Key &key, const T *t)
Release.
Definition: atomic.h:45
static segment_index_t segment_index_of(size_type index)
bool check_mask_race(const hashcode_t h, hashcode_t &m) const
Check for mask race.
node_scoped_guard(node *n, node_allocator_type &alloc)
const_pointer internal_fast_find(const Key &key) const
Fast find when no concurrent erasure is used. For internal use inside TBB only!
size_type count(const Key &key) const
Return count of items (0 or 1)
static segment_index_t segment_base(segment_index_t k)
node * search_bucket(const key_type &key, bucket *b) const
bool erase(const_accessor &item_accessor)
Erase item by const_accessor.
static node * allocate_node_copy_construct(node_allocator_type &allocator, const Key &key, const T *t)
hash_map_range(const map_type &map, size_type grainsize_=1)
Init range with container and grainsize specified.
segments_table_t my_table
Segment pointers table. Also prevents false sharing between my_mask and my_size.
atomic< T > & as_atomic(T &t)
Definition: atomic.h:543
bool exclude(const_accessor &item_accessor)
delete item by accessor
base class of concurrent_hash_map
Combines data access, locking, and garbage collection.
void allocator_copy_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
concurrent_hash_map(const allocator_type &a=allocator_type())
Construct empty table.
tbb::aligned_space< value_type > my_value
void rehash_bucket(bucket *b_new, const hashcode_t h)
void itt_store_word_with_release(tbb::atomic< T > &dst, U src)
std::size_t size_type
Type for size of a range.
void acquire(spin_rw_mutex &m, bool write=true)
Acquire lock on given mutex.
bool generic_emplace(Accessor &&result, Args &&... args)
concurrent_hash_map(size_type n, const allocator_type &a=allocator_type())
Construct empty table with n preallocated buckets. This number serves also as initial concurrency lev...
void __TBB_EXPORTED_FUNC runtime_warning(const char *format,...)
Report a runtime warning.
T itt_hide_load_word(const T &src)
enable_segment_failsafe(segments_table_t &table, segment_index_t k)
bool operator!=(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
Dummy type that distinguishes splitting constructor from copy constructor.
Definition: tbb_stddef.h:395
friend bool is_write_access_needed(const_accessor const &)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
~const_accessor()
Destroy result after releasing the underlying reference.
static node * allocate_node_default_construct(node_allocator_type &allocator, const Key &key, const T *)
spin_rw_mutex mutex_t
Mutex type for buckets.
internal::hash_map_iterator< concurrent_hash_map, const value_type > const_iterator
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:305
node * my_node
Pointer to node that has current item.
bucket_accessor(concurrent_hash_map *base, const hashcode_t h, bool writer=false)
bucket * get_bucket(hashcode_t h) const
Get bucket by (masked) hashcode.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle * key
tick_count::interval_t operator-(const tick_count &t1, const tick_count &t0)
Definition: tick_count.h:126
hash_map_iterator()
Construct undefined iterator.
bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node *(*allocate_node)(node_allocator_type &, const Key &, const T *), node *tmp_n=0)
Insert or find item and optionally acquire a lock on the item.
#define __TBB_Yield()
Definition: ibm_aix51.h:44
void __TBB_store_with_release(volatile T &location, V value)
Definition: tbb_machine.h:716
#define __TBB_FORWARDING_REF(A)
Definition: tbb_stddef.h:496
bool check_rehashing_collision(const hashcode_t h, hashcode_t m_old, hashcode_t m) const
Process mask race, check for rehashing collision.
bool insert(accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a write lock on the item...
void swap(concurrent_hash_map &table)
swap two instances. Iterators are invalidated
T * begin() const
Pointer to beginning of array.
Definition: aligned_space.h:35
void acquire(concurrent_hash_map *base, const hashcode_t h, bool writer=false)
find a bucket by masked hashcode, optionally rehash, and acquire the lock
Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.
Definition: tbb_allocator.h:58
bool insert(accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a write lock on the item...
concurrent_hash_map & operator=(const concurrent_hash_map &table)
Assignment.
T __TBB_load_with_acquire(const volatile T &location)
Definition: tbb_machine.h:712
bool emplace(accessor &result, Args &&... args)
Insert item by copying if there is no such key present already and acquire a write lock on the item...
void enable_segment(segment_index_t k, const Allocator &allocator, bool is_initial=false)
Enable segment.
concurrent_hash_map(std::initializer_list< value_type > il, const HashCompare &compare, const allocator_type &a=allocator_type())
std::pair< I, I > internal_equal_range(const Key &key, I end) const
Returns an iterator for an item defined by the key, or for the next item after it (if upper==true) ...
bool insert(const_accessor &result, const Key &key)
Insert item (if not already present) and acquire a read lock on the item.
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:331
void insert(I first, I last)
Insert range [first, last)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
static node * create_node(node_allocator_type &allocator, Args &&... args)
concurrent_hash_map(const concurrent_hash_map &table, const allocator_type &a)
intptr_t __TBB_Log2(uintptr_t x)
Definition: tbb_machine.h:863
internal::hash_map_iterator< concurrent_hash_map, value_type > iterator
mutex_t::scoped_lock scoped_t
Scoped lock type for mutex.
bool is_writer()
check whether bucket is locked for write
bool insert(accessor &result, const Key &key)
Insert item (if not already present) and acquire a write lock on the item.
void set_midpoint() const
Set my_midpoint to point approximately half way between my_begin and my_end.
T itt_load_word_with_acquire(const tbb::atomic< T > &src)
void allocator_move_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
Identifiers declared inside namespace internal should never be used directly by client code...
Definition: atomic.h:51
concurrent_hash_map::value_type value_type
Type of value.
The scoped locking pattern.
Definition: spin_rw_mutex.h:86
const Container * my_map
concurrent_hash_map over which we are iterating.
friend bool is_write_access_needed(accessor const &)
static void init_buckets(segment_ptr_t ptr, size_type sz, bool is_initial)
Initialize buckets.
const_pointer operator->() const
Return pointer to associated value in hash table.
static node * do_not_allocate_node(node_allocator_type &, const Key &, const T *)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
Meets requirements of a forward iterator for STL */.
concurrent_hash_map(const concurrent_hash_map &table)
Copy constructor.
std::pair< const_iterator, const_iterator > equal_range(const Key &key) const
bool empty() const
True if range is empty.
void const char const char int ITT_FORMAT __itt_group_sync p
bool generic_move_insert(Accessor &&result, value_type &&value)
bool find(const_accessor &result, const Key &key) const
Find item and acquire a read lock on the item.
hash_map_node_base * next
Next node in chain.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp end
Unordered map from Key to T.
void swap(concurrent_hash_map< Key, T, HashCompare, A > &a, concurrent_hash_map< Key, T, HashCompare, A > &b)
hash_map_node_base node_base
Node base type.
bool empty() const
True if size()==0.
internal::hash_map_range< iterator > range_type
Acquire.
Definition: atomic.h:43
void internal_move_assign(concurrent_hash_map &&other, tbb::internal::traits_true_type)
const bucket * my_bucket
Pointer to bucket.
size_type max_size() const
Upper bound on size.
static size_type segment_size(segment_index_t k)
mutex_t::scoped_lock scoped_t
Scoped lock type for mutex.
Fast, unfair, spinning reader-writer lock with backoff and writer-preference.
Definition: spin_rw_mutex.h:38
void rehash(size_type n=0)
Rehashes and optionally resizes the whole table.
concurrent_hash_map(I first, I last, const allocator_type &a=allocator_type())
Construction with copying iteration range and given allocator instance.
Range class used with concurrent_hash_map.
void internal_copy(const concurrent_hash_map &source)
Copy "source" to *this, where *this must start out empty.
bool find(accessor &result, const Key &key)
Find item and acquire a write lock on the item.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h
#define __TBB_USE_OPTIONAL_RTTI
Definition: tbb_config.h:125

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.