Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
concurrent_vector.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef __TBB_concurrent_vector_H
18 #define __TBB_concurrent_vector_H
19 
20 #include "tbb_stddef.h"
21 #include "tbb_exception.h"
22 #include "atomic.h"
24 #include "blocked_range.h"
25 #include "tbb_machine.h"
26 #include "tbb_profiling.h"
27 #include <new>
28 #include <cstring> // for memset()
29 #include __TBB_STD_SWAP_HEADER
30 #include <algorithm>
31 #include <iterator>
32 
34 
35 #if _MSC_VER==1500 && !__INTEL_COMPILER
36  // VS2008/VC9 seems to have an issue; limits pull in math.h
37  #pragma warning( push )
38  #pragma warning( disable: 4985 )
39 #endif
40 #include <limits> /* std::numeric_limits */
41 #if _MSC_VER==1500 && !__INTEL_COMPILER
42  #pragma warning( pop )
43 #endif
44 
45 #if __TBB_INITIALIZER_LISTS_PRESENT
46  #include <initializer_list>
47 #endif
48 
49 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
50  // Workaround for overzealous compiler warnings in /Wp64 mode
51  #pragma warning (push)
52 #if defined(_Wp64)
53  #pragma warning (disable: 4267)
54 #endif
55  #pragma warning (disable: 4127) //warning C4127: conditional expression is constant
56 #endif
57 
58 namespace tbb {
59 
60 template<typename T, class A = cache_aligned_allocator<T> >
62 
64 namespace internal {
65 
66  template<typename Container, typename Value>
68 
70  static void *const vector_allocation_error_flag = reinterpret_cast<void*>(size_t(63));
71 
73  template<typename T>
74  void handle_unconstructed_elements(T* array, size_t n_of_elements){
75  std::memset( static_cast<void*>(array), 0, n_of_elements * sizeof( T ) );
76  }
77 
79 
81  protected:
82 
83  // Basic types declarations
84  typedef size_t segment_index_t;
85  typedef size_t size_type;
86 
87  // Using enumerations due to Mac linking problems of static const variables
88  enum {
89  // Size constants
90  default_initial_segments = 1, // 2 initial items
92  pointers_per_short_table = 3, // to fit into 8 words of entire structure
93  pointers_per_long_table = sizeof(segment_index_t) * 8 // one segment per bit
94  };
95 
96  struct segment_not_used {};
97  struct segment_allocated {};
99 
100  class segment_t;
102  void* array;
103  private:
104  //TODO: More elegant way to grant access to selected functions _only_?
105  friend class segment_t;
106  explicit segment_value_t(void* an_array):array(an_array) {}
107  public:
108  friend bool operator==(segment_value_t const& lhs, segment_not_used ) { return lhs.array == 0;}
111  template<typename argument_type>
112  friend bool operator!=(segment_value_t const& lhs, argument_type arg) { return ! (lhs == arg);}
113 
114  template<typename T>
115  T* pointer() const { return static_cast<T*>(const_cast<void*>(array)); }
116  };
117 
119  if(s != segment_allocated()){
120  internal::throw_exception(exception);
121  }
122  }
123 
124  // Segment pointer.
125  class segment_t {
127  public:
128  segment_t(){ store<relaxed>(segment_not_used());}
129  //Copy ctor and assignment operator are defined to ease using of stl algorithms.
130  //These algorithms usually not a synchronization point, so, semantic is
131  //intentionally relaxed here.
132  segment_t(segment_t const& rhs ){ array.store<relaxed>(rhs.array.load<relaxed>());}
133 
134  void swap(segment_t & rhs ){
135  tbb::internal::swap<relaxed>(array, rhs.array);
136  }
137 
139  array.store<relaxed>(rhs.array.load<relaxed>());
140  return *this;
141  }
142 
143  template<memory_semantics M>
144  segment_value_t load() const { return segment_value_t(array.load<M>());}
145 
146  template<memory_semantics M>
148  array.store<M>(0);
149  }
150 
151  template<memory_semantics M>
153  __TBB_ASSERT(load<relaxed>() != segment_allocated(),"transition from \"allocated\" to \"allocation failed\" state looks non-logical");
155  }
156 
157  template<memory_semantics M>
158  void store(void* allocated_segment_pointer) __TBB_NOEXCEPT(true) {
159  __TBB_ASSERT(segment_value_t(allocated_segment_pointer) == segment_allocated(),
160  "other overloads of store should be used for marking segment as not_used or allocation_failed" );
161  array.store<M>(allocated_segment_pointer);
162  }
163 
164 #if TBB_USE_ASSERT
165  ~segment_t() {
166  __TBB_ASSERT(load<relaxed>() != segment_allocated(), "should have been freed by clear" );
167  }
168 #endif /* TBB_USE_ASSERT */
169  };
170  friend void swap(segment_t & , segment_t & ) __TBB_NOEXCEPT(true);
171 
172  // Data fields
173 
175  void* (*vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t);
176 
179 
182 
185 
188 
189  // Methods
190 
192  //Here the semantic is intentionally relaxed.
193  //The reason this is next:
194  //Object that is in middle of construction (i.e. its constructor is not yet finished)
195  //cannot be used concurrently until the construction is finished.
196  //Thus to flag other threads that construction is finished, some synchronization with
197  //acquire-release semantic should be done by the (external) code that uses the vector.
198  //So, no need to do the synchronization inside the vector.
199 
200  my_early_size.store<relaxed>(0);
201  my_first_block.store<relaxed>(0); // here is not default_initial_segments
202  my_segment.store<relaxed>(my_storage);
203  }
204 
206 
207  //these helpers methods use the fact that segments are allocated so
208  //that every segment size is a (increasing) power of 2.
209  //with one exception 0 segment has size of 2 as well segment 1;
210  //e.g. size of segment with index of 3 is 2^3=8;
211  static segment_index_t segment_index_of( size_type index ) {
212  return segment_index_t( __TBB_Log2( index|1 ) );
213  }
214 
215  static segment_index_t segment_base( segment_index_t k ) {
216  return (segment_index_t(1)<<k & ~segment_index_t(1));
217  }
218 
219  static inline segment_index_t segment_base_index_of( segment_index_t &index ) {
220  segment_index_t k = segment_index_of( index );
221  index -= segment_base(k);
222  return k;
223  }
224 
225  static size_type segment_size( segment_index_t k ) {
226  return segment_index_t(1)<<k; // fake value for k==0
227  }
228 
229 
230  static bool is_first_element_in_segment(size_type element_index){
231  //check if element_index is a power of 2 that is at least 2.
232  //The idea is to detect if the iterator crosses a segment boundary,
233  //and 2 is the minimal index for which it's true
234  __TBB_ASSERT(element_index, "there should be no need to call "
235  "is_first_element_in_segment for 0th element" );
236  return is_power_of_two_at_least( element_index, 2 );
237  }
238 
240  typedef void (__TBB_EXPORTED_FUNC *internal_array_op1)(void* begin, size_type n );
241 
243  typedef void (__TBB_EXPORTED_FUNC *internal_array_op2)(void* dst, const void* src, size_type n );
244 
247  segment_index_t first_block;
249  };
250 
251  void __TBB_EXPORTED_METHOD internal_reserve( size_type n, size_type element_size, size_type max_size );
252  size_type __TBB_EXPORTED_METHOD internal_capacity() const;
253  void internal_grow( size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src );
254  size_type __TBB_EXPORTED_METHOD internal_grow_by( size_type delta, size_type element_size, internal_array_op2 init, const void *src );
255  void* __TBB_EXPORTED_METHOD internal_push_back( size_type element_size, size_type& index );
256  segment_index_t __TBB_EXPORTED_METHOD internal_clear( internal_array_op1 destroy );
257  void* __TBB_EXPORTED_METHOD internal_compact( size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy );
258  void __TBB_EXPORTED_METHOD internal_copy( const concurrent_vector_base_v3& src, size_type element_size, internal_array_op2 copy );
259  void __TBB_EXPORTED_METHOD internal_assign( const concurrent_vector_base_v3& src, size_type element_size,
262  void __TBB_EXPORTED_METHOD internal_throw_exception(size_type) const;
263  void __TBB_EXPORTED_METHOD internal_swap(concurrent_vector_base_v3& v);
264 
265  void __TBB_EXPORTED_METHOD internal_resize( size_type n, size_type element_size, size_type max_size, const void *src,
266  internal_array_op1 destroy, internal_array_op2 init );
267  size_type __TBB_EXPORTED_METHOD internal_grow_to_at_least_with_result( size_type new_size, size_type element_size, internal_array_op2 init, const void *src );
268 
270  void __TBB_EXPORTED_METHOD internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op2 init, const void *src );
271 private:
273  class helper;
274  friend class helper;
275 
276  template<typename Container, typename Value>
277  friend class vector_iterator;
278 
279  };
280 
282  lhs.swap(rhs);
283  }
284 
286 
288 
290  template<typename Container, typename Value>
291  class vector_iterator
292  {
294  Container* my_vector;
295 
297  size_t my_index;
298 
300 
301  mutable Value* my_item;
302 
303  template<typename C, typename T>
304  friend vector_iterator<C,T> operator+( ptrdiff_t offset, const vector_iterator<C,T>& v );
305 
306  template<typename C, typename T, typename U>
307  friend bool operator==( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
308 
309  template<typename C, typename T, typename U>
310  friend bool operator<( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
311 
312  template<typename C, typename T, typename U>
313  friend ptrdiff_t operator-( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
314 
315  template<typename C, typename U>
316  friend class internal::vector_iterator;
317 
318 #if !__TBB_TEMPLATE_FRIENDS_BROKEN
319  template<typename T, class A>
321 #else
322 public:
323 #endif
324 
325  vector_iterator( const Container& vector, size_t index, void *ptr = 0 ) :
326  my_vector(const_cast<Container*>(&vector)),
327  my_index(index),
328  my_item(static_cast<Value*>(ptr))
329  {}
330 
331  public:
333  vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {}
334 
336  my_vector(other.my_vector),
337  my_index(other.my_index),
338  my_item(other.my_item)
339  {}
340 
341  vector_iterator operator+( ptrdiff_t offset ) const {
342  return vector_iterator( *my_vector, my_index+offset );
343  }
344  vector_iterator &operator+=( ptrdiff_t offset ) {
345  my_index+=offset;
346  my_item = NULL;
347  return *this;
348  }
349  vector_iterator operator-( ptrdiff_t offset ) const {
350  return vector_iterator( *my_vector, my_index-offset );
351  }
352  vector_iterator &operator-=( ptrdiff_t offset ) {
353  my_index-=offset;
354  my_item = NULL;
355  return *this;
356  }
357  Value& operator*() const {
358  Value* item = my_item;
359  if( !item ) {
360  item = my_item = &my_vector->internal_subscript(my_index);
361  }
362  __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "corrupt cache" );
363  return *item;
364  }
365  Value& operator[]( ptrdiff_t k ) const {
366  return my_vector->internal_subscript(my_index+k);
367  }
368  Value* operator->() const {return &operator*();}
369 
372  size_t element_index = ++my_index;
373  if( my_item ) {
374  //TODO: consider using of knowledge about "first_block optimization" here as well?
376  //if the iterator crosses a segment boundary, the pointer become invalid
377  //as possibly next segment is in another memory location
378  my_item= NULL;
379  } else {
380  ++my_item;
381  }
382  }
383  return *this;
384  }
385 
388  __TBB_ASSERT( my_index>0, "operator--() applied to iterator already at beginning of concurrent_vector" );
389  size_t element_index = my_index--;
390  if( my_item ) {
392  //if the iterator crosses a segment boundary, the pointer become invalid
393  //as possibly next segment is in another memory location
394  my_item= NULL;
395  } else {
396  --my_item;
397  }
398  }
399  return *this;
400  }
401 
404  vector_iterator result = *this;
405  operator++();
406  return result;
407  }
408 
411  vector_iterator result = *this;
412  operator--();
413  return result;
414  }
415 
416  // STL support
417 
418  typedef ptrdiff_t difference_type;
419  typedef Value value_type;
420  typedef Value* pointer;
421  typedef Value& reference;
422  typedef std::random_access_iterator_tag iterator_category;
423  };
424 
425  template<typename Container, typename T>
427  return vector_iterator<Container,T>( *v.my_vector, v.my_index+offset );
428  }
429 
430  template<typename Container, typename T, typename U>
432  return i.my_index==j.my_index && i.my_vector == j.my_vector;
433  }
434 
435  template<typename Container, typename T, typename U>
437  return !(i==j);
438  }
439 
440  template<typename Container, typename T, typename U>
441  bool operator<( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
442  return i.my_index<j.my_index;
443  }
444 
445  template<typename Container, typename T, typename U>
447  return j<i;
448  }
449 
450  template<typename Container, typename T, typename U>
452  return !(i<j);
453  }
454 
455  template<typename Container, typename T, typename U>
456  bool operator<=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
457  return !(j<i);
458  }
459 
460  template<typename Container, typename T, typename U>
462  return ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index);
463  }
464 
465  template<typename T, class A>
467  public:
469  allocator_type my_allocator;
470  allocator_base(const allocator_type &a = allocator_type() ) : my_allocator(a) {}
471  };
472 
473 } // namespace internal
475 
477 
538 template<typename T, class A>
539 class concurrent_vector: protected internal::allocator_base<T, A>,
541 private:
542  template<typename I>
544  public:
545  typedef T value_type;
546  typedef T& reference;
547  typedef const T& const_reference;
548  typedef I iterator;
549  typedef ptrdiff_t difference_type;
550  generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}
551  template<typename U>
552  generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {}
554  };
555 
556  template<typename C, typename U>
557  friend class internal::vector_iterator;
558 
559 public:
560  //------------------------------------------------------------------------
561  // STL compatible types
562  //------------------------------------------------------------------------
565 
566  typedef T value_type;
567  typedef ptrdiff_t difference_type;
568  typedef T& reference;
569  typedef const T& const_reference;
570  typedef T *pointer;
571  typedef const T *const_pointer;
572 
573  typedef internal::vector_iterator<concurrent_vector,T> iterator;
574  typedef internal::vector_iterator<concurrent_vector,const T> const_iterator;
575 
576 #if !defined(_MSC_VER) || _CPPLIB_VER>=300
577  // Assume ISO standard definition of std::reverse_iterator
578  typedef std::reverse_iterator<iterator> reverse_iterator;
579  typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
580 #else
581  // Use non-standard std::reverse_iterator
582  typedef std::reverse_iterator<iterator,T,T&,T*> reverse_iterator;
583  typedef std::reverse_iterator<const_iterator,T,const T&,const T*> const_reverse_iterator;
584 #endif /* defined(_MSC_VER) && (_MSC_VER<1300) */
585 
586  //------------------------------------------------------------------------
587  // Parallel algorithm support
588  //------------------------------------------------------------------------
589  typedef generic_range_type<iterator> range_type;
590  typedef generic_range_type<const_iterator> const_range_type;
591 
592  //------------------------------------------------------------------------
593  // STL compatible constructors & destructors
594  //------------------------------------------------------------------------
595 
597  explicit concurrent_vector(const allocator_type &a = allocator_type())
598  : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
599  {
600  vector_allocator_ptr = &internal_allocator;
601  }
602 
603  //Constructors are not required to have synchronization
604  //(for more details see comment in the concurrent_vector_base constructor).
605 #if __TBB_INITIALIZER_LISTS_PRESENT
606  concurrent_vector(std::initializer_list<T> init_list, const allocator_type &a = allocator_type())
608  : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
609  {
610  vector_allocator_ptr = &internal_allocator;
611  __TBB_TRY {
612  internal_assign_iterators(init_list.begin(), init_list.end());
613  } __TBB_CATCH(...) {
614  segment_t *table = my_segment.load<relaxed>();;
615  internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>());
616  __TBB_RETHROW();
617  }
618 
619  }
620 #endif //# __TBB_INITIALIZER_LISTS_PRESENT
621 
623  concurrent_vector( const concurrent_vector& vector, const allocator_type& a = allocator_type() )
624  : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
625  {
626  vector_allocator_ptr = &internal_allocator;
627  __TBB_TRY {
628  internal_copy(vector, sizeof(T), &copy_array);
629  } __TBB_CATCH(...) {
630  segment_t *table = my_segment.load<relaxed>();
631  internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>());
632  __TBB_RETHROW();
633  }
634  }
635 
636 #if __TBB_CPP11_RVALUE_REF_PRESENT
637  //TODO add __TBB_NOEXCEPT(true) and static_assert(std::has_nothrow_move_constructor<A>::value)
640  : internal::allocator_base<T, A>(std::move(source)), internal::concurrent_vector_base()
641  {
642  vector_allocator_ptr = &internal_allocator;
644  }
645 
646  concurrent_vector( concurrent_vector&& source, const allocator_type& a)
647  : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
648  {
649  vector_allocator_ptr = &internal_allocator;
650  //C++ standard requires instances of an allocator being compared for equality,
651  //which means that memory allocated by one instance is possible to deallocate with the other one.
652  if (a == source.my_allocator) {
654  } else {
655  __TBB_TRY {
656  internal_copy(source, sizeof(T), &move_array);
657  } __TBB_CATCH(...) {
658  segment_t *table = my_segment.load<relaxed>();
659  internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>());
660  __TBB_RETHROW();
661  }
662  }
663  }
664 
665 #endif
666 
668  template<class M>
669  concurrent_vector( const concurrent_vector<T, M>& vector, const allocator_type& a = allocator_type() )
670  : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
671  {
672  vector_allocator_ptr = &internal_allocator;
673  __TBB_TRY {
674  internal_copy(vector.internal_vector_base(), sizeof(T), &copy_array);
675  } __TBB_CATCH(...) {
676  segment_t *table = my_segment.load<relaxed>();
677  internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
678  __TBB_RETHROW();
679  }
680  }
681 
683  explicit concurrent_vector(size_type n)
684  {
685  vector_allocator_ptr = &internal_allocator;
686  __TBB_TRY {
687  internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
688  } __TBB_CATCH(...) {
689  segment_t *table = my_segment.load<relaxed>();
690  internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
691  __TBB_RETHROW();
692  }
693  }
694 
696  concurrent_vector(size_type n, const_reference t, const allocator_type& a = allocator_type())
697  : internal::allocator_base<T, A>(a)
698  {
699  vector_allocator_ptr = &internal_allocator;
700  __TBB_TRY {
701  internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
702  } __TBB_CATCH(...) {
703  segment_t *table = my_segment.load<relaxed>();
704  internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
705  __TBB_RETHROW();
706  }
707  }
708 
710  template<class I>
711  concurrent_vector(I first, I last, const allocator_type &a = allocator_type())
712  : internal::allocator_base<T, A>(a)
713  {
714  vector_allocator_ptr = &internal_allocator;
715  __TBB_TRY {
716  internal_assign_range(first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );
717  } __TBB_CATCH(...) {
718  segment_t *table = my_segment.load<relaxed>();
719  internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
720  __TBB_RETHROW();
721  }
722  }
723 
726  if( this != &vector )
727  internal_assign(vector, sizeof(T), &destroy_array, &assign_array, &copy_array);
728  return *this;
729  }
730 
731 #if __TBB_CPP11_RVALUE_REF_PRESENT
732  //TODO: add __TBB_NOEXCEPT()
735  __TBB_ASSERT(this != &other, "Move assignment to itself is prohibited ");
737  if(pocma_t::value || this->my_allocator == other.my_allocator) {
738  concurrent_vector trash (std::move(*this));
739  internal_swap(other);
740  tbb::internal::allocator_move_assignment(this->my_allocator, other.my_allocator, pocma_t());
741  } else {
742  internal_assign(other, sizeof(T), &destroy_array, &move_assign_array, &move_array);
743  }
744  return *this;
745  }
746 #endif
747  //TODO: add an template assignment operator? (i.e. with different element type)
748 
750  template<class M>
752  if( static_cast<void*>( this ) != static_cast<const void*>( &vector ) )
754  sizeof(T), &destroy_array, &assign_array, &copy_array);
755  return *this;
756  }
757 
758 #if __TBB_INITIALIZER_LISTS_PRESENT
759  concurrent_vector& operator=( std::initializer_list<T> init_list ) {
761  internal_clear(&destroy_array);
762  internal_assign_iterators(init_list.begin(), init_list.end());
763  return *this;
764  }
765 #endif //#if __TBB_INITIALIZER_LISTS_PRESENT
766 
767  //------------------------------------------------------------------------
768  // Concurrent operations
769  //------------------------------------------------------------------------
771 
772  iterator grow_by( size_type delta ) {
773  return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size.load());
774  }
775 
777 
778  iterator grow_by( size_type delta, const_reference t ) {
779  return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast<const void*>(&t) ) : my_early_size.load());
780  }
781 
783  template<typename I>
784  iterator grow_by( I first, I last ) {
785  typename std::iterator_traits<I>::difference_type delta = std::distance(first, last);
786  __TBB_ASSERT( delta >= 0, NULL);
787 
788  return iterator(*this, delta ? internal_grow_by(delta, sizeof(T), &copy_range<I>, static_cast<const void*>(&first)) : my_early_size.load());
789  }
790 
791 #if __TBB_INITIALIZER_LISTS_PRESENT
792 
793  iterator grow_by( std::initializer_list<T> init_list ) {
794  return grow_by( init_list.begin(), init_list.end() );
795  }
796 #endif //#if __TBB_INITIALIZER_LISTS_PRESENT
797 
799 
803  iterator grow_to_at_least( size_type n ) {
804  size_type m=0;
805  if( n ) {
806  m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array, NULL );
807  if( m>n ) m=n;
808  }
809  return iterator(*this, m);
810  };
811 
814  iterator grow_to_at_least( size_type n, const_reference t ) {
815  size_type m=0;
816  if( n ) {
817  m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array_by, &t);
818  if( m>n ) m=n;
819  }
820  return iterator(*this, m);
821  };
822 
824 
825  iterator push_back( const_reference item )
826  {
827  push_back_helper prolog(*this);
828  new(prolog.internal_push_back_result()) T(item);
829  return prolog.return_iterator_and_dismiss();
830  }
831 
832 #if __TBB_CPP11_RVALUE_REF_PRESENT
833 
835  iterator push_back( T&& item )
836  {
837  push_back_helper prolog(*this);
838  new(prolog.internal_push_back_result()) T(std::move(item));
839  return prolog.return_iterator_and_dismiss();
840  }
841 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
842 
844  template<typename... Args>
845  iterator emplace_back( Args&&... args )
846  {
847  push_back_helper prolog(*this);
848  new(prolog.internal_push_back_result()) T(std::forward<Args>(args)...);
849  return prolog.return_iterator_and_dismiss();
850  }
851 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
852 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
853 
856  reference operator[]( size_type index ) {
857  return internal_subscript(index);
858  }
859 
861  const_reference operator[]( size_type index ) const {
862  return internal_subscript(index);
863  }
864 
866  reference at( size_type index ) {
867  return internal_subscript_with_exceptions(index);
868  }
869 
871  const_reference at( size_type index ) const {
872  return internal_subscript_with_exceptions(index);
873  }
874 
876  range_type range( size_t grainsize = 1 ) {
877  return range_type( begin(), end(), grainsize );
878  }
879 
881  const_range_type range( size_t grainsize = 1 ) const {
882  return const_range_type( begin(), end(), grainsize );
883  }
884 
885  //------------------------------------------------------------------------
886  // Capacity
887  //------------------------------------------------------------------------
889  size_type size() const {
890  size_type sz = my_early_size, cp = internal_capacity();
891  return cp < sz ? cp : sz;
892  }
893 
895  bool empty() const {return !my_early_size;}
896 
898  size_type capacity() const {return internal_capacity();}
899 
901 
903  void reserve( size_type n ) {
904  if( n )
905  internal_reserve(n, sizeof(T), max_size());
906  }
907 
909  void resize( size_type n ) {
910  internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
911  }
912 
914  void resize( size_type n, const_reference t ) {
915  internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
916  }
917 
919  void shrink_to_fit();
920 
922  size_type max_size() const {return (~size_type(0))/sizeof(T);}
923 
924  //------------------------------------------------------------------------
925  // STL support
926  //------------------------------------------------------------------------
927 
929  iterator begin() {return iterator(*this,0);}
931  iterator end() {return iterator(*this,size());}
933  const_iterator begin() const {return const_iterator(*this,0);}
935  const_iterator end() const {return const_iterator(*this,size());}
937  const_iterator cbegin() const {return const_iterator(*this,0);}
939  const_iterator cend() const {return const_iterator(*this,size());}
941  reverse_iterator rbegin() {return reverse_iterator(end());}
943  reverse_iterator rend() {return reverse_iterator(begin());}
945  const_reverse_iterator rbegin() const {return const_reverse_iterator(end());}
947  const_reverse_iterator rend() const {return const_reverse_iterator(begin());}
949  const_reverse_iterator crbegin() const {return const_reverse_iterator(end());}
951  const_reverse_iterator crend() const {return const_reverse_iterator(begin());}
953  reference front() {
954  __TBB_ASSERT( size()>0, NULL);
955  const segment_value_t& segment_value = my_segment[0].template load<relaxed>();
956  return (segment_value.template pointer<T>())[0];
957  }
959  const_reference front() const {
960  __TBB_ASSERT( size()>0, NULL);
961  const segment_value_t& segment_value = my_segment[0].template load<relaxed>();
962  return (segment_value.template pointer<const T>())[0];
963  }
965  reference back() {
966  __TBB_ASSERT( size()>0, NULL);
967  return internal_subscript( size()-1 );
968  }
970  const_reference back() const {
971  __TBB_ASSERT( size()>0, NULL);
972  return internal_subscript( size()-1 );
973  }
975  allocator_type get_allocator() const { return this->my_allocator; }
976 
978  void assign(size_type n, const_reference t) {
979  clear();
980  internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
981  }
982 
984  template<class I>
985  void assign(I first, I last) {
986  clear(); internal_assign_range( first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );
987  }
988 
989 #if __TBB_INITIALIZER_LISTS_PRESENT
990  void assign(std::initializer_list<T> init_list) {
992  clear(); internal_assign_iterators( init_list.begin(), init_list.end());
993  }
994 #endif //# __TBB_INITIALIZER_LISTS_PRESENT
995 
997  void swap(concurrent_vector &vector) {
999  if( this != &vector && (this->my_allocator == vector.my_allocator || pocs_t::value) ) {
1000  concurrent_vector_base_v3::internal_swap(static_cast<concurrent_vector_base_v3&>(vector));
1001  tbb::internal::allocator_swap(this->my_allocator, vector.my_allocator, pocs_t());
1002  }
1003  }
1004 
1006 
1007  void clear() {
1008  internal_clear(&destroy_array);
1009  }
1010 
1013  segment_t *table = my_segment.load<relaxed>();
1014  internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
1015  // base class destructor call should be then
1016  }
1017 
1018  const internal::concurrent_vector_base_v3 &internal_vector_base() const { return *this; }
1019 private:
1021  static void *internal_allocator(internal::concurrent_vector_base_v3 &vb, size_t k) {
1022  return static_cast<concurrent_vector<T, A>&>(vb).my_allocator.allocate(k);
1023  }
1025  void internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block);
1026 
1028  T& internal_subscript( size_type index ) const;
1029 
1031  T& internal_subscript_with_exceptions( size_type index ) const;
1032 
1034  void internal_assign_n(size_type n, const_pointer p) {
1035  internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(p), &destroy_array, p? &initialize_array_by : &initialize_array );
1036  }
1037 
1039  /* Functions declarations:
1040  * void foo(is_integer_tag<true>*);
1041  * void foo(is_integer_tag<false>*);
1042  * Usage example:
1043  * foo(static_cast<is_integer_tag<std::numeric_limits<T>::is_integer>*>(0));
1044  */
1045  template<bool B> class is_integer_tag;
1046 
1048  template<class I>
1049  void internal_assign_range(I first, I last, is_integer_tag<true> *) {
1050  internal_assign_n(static_cast<size_type>(first), &static_cast<T&>(last));
1051  }
1053  template<class I>
1054  void internal_assign_range(I first, I last, is_integer_tag<false> *) {
1055  internal_assign_iterators(first, last);
1056  }
1058  template<class I>
1059  void internal_assign_iterators(I first, I last);
1060 
1061  //these functions are marked __TBB_EXPORTED_FUNC as they are called from within the library
1062 
1064  static void __TBB_EXPORTED_FUNC initialize_array( void* begin, const void*, size_type n );
1065 
1067  static void __TBB_EXPORTED_FUNC initialize_array_by( void* begin, const void* src, size_type n );
1068 
1070  static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src, size_type n );
1071 
1072 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1073  static void __TBB_EXPORTED_FUNC move_array_if_noexcept( void* dst, const void* src, size_type n );
1075 #endif //__TBB_MOVE_IF_NO_EXCEPT_PRESENT
1076 
1077 #if __TBB_CPP11_RVALUE_REF_PRESENT
1078  static void __TBB_EXPORTED_FUNC move_array( void* dst, const void* src, size_type n );
1080 
1082  static void __TBB_EXPORTED_FUNC move_assign_array( void* dst, const void* src, size_type n );
1083 #endif
1084  template<typename Iterator>
1086  static void __TBB_EXPORTED_FUNC copy_range( void* dst, const void* p_type_erased_iterator, size_type n );
1087 
1089  static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* src, size_type n );
1090 
1092  static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n );
1093 
1095  class internal_loop_guide : internal::no_copy {
1096  public:
1097  const pointer array;
1098  const size_type n;
1099  size_type i;
1100 
1101  static const T* as_const_pointer(const void *ptr) { return static_cast<const T *>(ptr); }
1102  static T* as_pointer(const void *src) { return static_cast<T*>(const_cast<void *>(src)); }
1103 
1104  internal_loop_guide(size_type ntrials, void *ptr)
1105  : array(as_pointer(ptr)), n(ntrials), i(0) {}
1106  void init() { for(; i < n; ++i) new( &array[i] ) T(); }
1107  void init(const void *src) { for(; i < n; ++i) new( &array[i] ) T(*as_const_pointer(src)); }
1108  void copy(const void *src) { for(; i < n; ++i) new( &array[i] ) T(as_const_pointer(src)[i]); }
1109  void assign(const void *src) { for(; i < n; ++i) array[i] = as_const_pointer(src)[i]; }
1110 #if __TBB_CPP11_RVALUE_REF_PRESENT
1111  void move_assign(const void *src) { for(; i < n; ++i) array[i] = std::move(as_pointer(src)[i]); }
1112  void move_construct(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move(as_pointer(src)[i]) ); }
1113 #endif
1114 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1115  void move_construct_if_noexcept(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move_if_noexcept(as_pointer(src)[i]) ); }
1116 #endif //__TBB_MOVE_IF_NOEXCEPT_PRESENT
1117 
1118  //TODO: rename to construct_range
1119  template<class I> void iterate(I &src) { for(; i < n; ++i, ++src) new( &array[i] ) T( *src ); }
1121  if(i < n) {// if an exception was raised, fill the rest of items with zeros
1123  }
1124  }
1125  };
1126 
1127  struct push_back_helper : internal::no_copy{
1128  struct element_construction_guard : internal::no_copy{
1129  pointer element;
1130 
1131  element_construction_guard(pointer an_element) : element (an_element){}
1132  void dismiss(){ element = NULL; }
1134  if (element){
1136  }
1137  }
1138  };
1139 
1141  size_type k;
1143 
1145  v(vector),
1146  g (static_cast<T*>(v.internal_push_back(sizeof(T),k)))
1147  {}
1148 
1149  pointer internal_push_back_result(){ return g.element;}
1151  pointer ptr = g.element;
1152  g.dismiss();
1153  return iterator(v, k, ptr);
1154  }
1155  };
1156 };
1157 
1158 #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1159 // Deduction guide for the constructor from two iterators
1160 template<typename I,
1161  typename T = typename std::iterator_traits<I>::value_type,
1162  typename A = cache_aligned_allocator<T>
1163 > concurrent_vector(I, I, const A& = A())
1165 
1166 // Deduction guide for the constructor from a vector and allocator
1167 template<typename T, typename A1, typename A2>
1168 concurrent_vector(const concurrent_vector<T, A1> &, const A2 &)
1170 
1171 // Deduction guide for the constructor from an initializer_list
1172 template<typename T, typename A = cache_aligned_allocator<T>
1173 > concurrent_vector(std::initializer_list<T>, const A& = A())
1175 #endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */
1176 
1177 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1178 #pragma warning (push)
1179 #pragma warning (disable: 4701) // potentially uninitialized local variable "old"
1180 #endif
1181 template<typename T, class A>
1184  __TBB_TRY {
1185  internal_array_op2 copy_or_move_array =
1186 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1187  &move_array_if_noexcept
1188 #else
1189  &copy_array
1190 #endif
1191  ;
1192  if( internal_compact( sizeof(T), &old, &destroy_array, copy_or_move_array ) )
1193  internal_free_segments( old.table, pointers_per_long_table, old.first_block ); // free joined and unnecessary segments
1194  } __TBB_CATCH(...) {
1195  if( old.first_block ) // free segment allocated for compacting. Only for support of exceptions in ctor of user T[ype]
1196  internal_free_segments( old.table, 1, old.first_block );
1197  __TBB_RETHROW();
1198  }
1199 }
1200 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1201 #pragma warning (pop)
1202 #endif // warning 4701 is back
1203 
1204 template<typename T, class A>
1206  // Free the arrays
1207  while( k > first_block ) {
1208  --k;
1209  segment_value_t segment_value = table[k].load<relaxed>();
1210  table[k].store<relaxed>(segment_not_used());
1211  if( segment_value == segment_allocated() ) // check for correct segment pointer
1212  this->my_allocator.deallocate( (segment_value.pointer<T>()), segment_size(k) );
1213  }
1214  segment_value_t segment_value = table[0].load<relaxed>();
1215  if( segment_value == segment_allocated() ) {
1216  __TBB_ASSERT( first_block > 0, NULL );
1217  while(k > 0) table[--k].store<relaxed>(segment_not_used());
1218  this->my_allocator.deallocate( (segment_value.pointer<T>()), segment_size(first_block) );
1219  }
1220 }
1221 
1222 template<typename T, class A>
1223 T& concurrent_vector<T, A>::internal_subscript( size_type index ) const {
1224  //TODO: unify both versions of internal_subscript
1225  __TBB_ASSERT( index < my_early_size, "index out of bounds" );
1226  size_type j = index;
1228  __TBB_ASSERT( my_segment.load<acquire>() != my_storage || k < pointers_per_short_table, "index is being allocated" );
1229  //no need in load with acquire (load<acquire>) since thread works in own space or gets
1230  //the information about added elements via some form of external synchronization
1231  //TODO: why not make a load of my_segment relaxed as well ?
1232  //TODO: add an assertion that my_segment[k] is properly aligned to please ITT
1233  segment_value_t segment_value = my_segment[k].template load<relaxed>();
1234  __TBB_ASSERT( segment_value != segment_allocation_failed(), "the instance is broken by bad allocation. Use at() instead" );
1235  __TBB_ASSERT( segment_value != segment_not_used(), "index is being allocated" );
1236  return (( segment_value.pointer<T>()))[j];
1237 }
1238 
1239 template<typename T, class A>
1241  if( index >= my_early_size )
1242  internal::throw_exception(internal::eid_out_of_range); // throw std::out_of_range
1243  size_type j = index;
1245  //TODO: refactor this condition into separate helper function, e.g. fits_into_small_table
1246  if( my_segment.load<acquire>() == my_storage && k >= pointers_per_short_table )
1248  // no need in load with acquire (load<acquire>) since thread works in own space or gets
1249  //the information about added elements via some form of external synchronization
1250  //TODO: why not make a load of my_segment relaxed as well ?
1251  //TODO: add an assertion that my_segment[k] is properly aligned to please ITT
1252  segment_value_t segment_value = my_segment[k].template load<relaxed>();
1254  return (segment_value.pointer<T>())[j];
1255 }
1256 
1257 template<typename T, class A> template<class I>
1259  __TBB_ASSERT(my_early_size == 0, NULL);
1260  size_type n = std::distance(first, last);
1261  if( !n ) return;
1262  internal_reserve(n, sizeof(T), max_size());
1263  my_early_size = n;
1264  segment_index_t k = 0;
1265  //TODO: unify segment iteration code with concurrent_base_v3::helper
1266  size_type sz = segment_size( my_first_block );
1267  while( sz < n ) {
1268  internal_loop_guide loop(sz, my_segment[k].template load<relaxed>().template pointer<void>());
1269  loop.iterate(first);
1270  n -= sz;
1271  if( !k ) k = my_first_block;
1272  else { ++k; sz <<= 1; }
1273  }
1274  internal_loop_guide loop(n, my_segment[k].template load<relaxed>().template pointer<void>());
1275  loop.iterate(first);
1276 }
1277 
1278 template<typename T, class A>
1279 void concurrent_vector<T, A>::initialize_array( void* begin, const void *, size_type n ) {
1280  internal_loop_guide loop(n, begin); loop.init();
1281 }
1282 
1283 template<typename T, class A>
1284 void concurrent_vector<T, A>::initialize_array_by( void* begin, const void *src, size_type n ) {
1285  internal_loop_guide loop(n, begin); loop.init(src);
1286 }
1287 
1288 template<typename T, class A>
1289 void concurrent_vector<T, A>::copy_array( void* dst, const void* src, size_type n ) {
1290  internal_loop_guide loop(n, dst); loop.copy(src);
1291 }
1292 
1293 #if __TBB_CPP11_RVALUE_REF_PRESENT
1294 template<typename T, class A>
1295 void concurrent_vector<T, A>::move_array( void* dst, const void* src, size_type n ) {
1296  internal_loop_guide loop(n, dst); loop.move_construct(src);
1297 }
1298 template<typename T, class A>
1299 void concurrent_vector<T, A>::move_assign_array( void* dst, const void* src, size_type n ) {
1300  internal_loop_guide loop(n, dst); loop.move_assign(src);
1301 }
1302 #endif
1303 
1304 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1305 template<typename T, class A>
1306 void concurrent_vector<T, A>::move_array_if_noexcept( void* dst, const void* src, size_type n ) {
1307  internal_loop_guide loop(n, dst); loop.move_construct_if_noexcept(src);
1308 }
1309 #endif //__TBB_MOVE_IF_NOEXCEPT_PRESENT
1310 
1311 template<typename T, class A>
1312 template<typename I>
1313 void concurrent_vector<T, A>::copy_range( void* dst, const void* p_type_erased_iterator, size_type n ){
1314  internal_loop_guide loop(n, dst);
1315  loop.iterate( *(static_cast<I*>(const_cast<void*>(p_type_erased_iterator))) );
1316 }
1317 
1318 template<typename T, class A>
1319 void concurrent_vector<T, A>::assign_array( void* dst, const void* src, size_type n ) {
1320  internal_loop_guide loop(n, dst); loop.assign(src);
1321 }
1322 
1323 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1324  // Workaround for overzealous compiler warning
1325  #pragma warning (push)
1326  #pragma warning (disable: 4189)
1327 #endif
1328 template<typename T, class A>
1329 void concurrent_vector<T, A>::destroy_array( void* begin, size_type n ) {
1330  T* array = static_cast<T*>(begin);
1331  for( size_type j=n; j>0; --j )
1332  array[j-1].~T(); // destructors are supposed to not throw any exceptions
1333 }
1334 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1335  #pragma warning (pop)
1336 #endif // warning 4189 is back
1337 
1338 // concurrent_vector's template functions
1339 template<typename T, class A1, class A2>
1341  //TODO: call size() only once per vector (in operator==)
1342  // Simply: return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin());
1343  if(a.size() != b.size()) return false;
1346  for(; i != a.end(); ++i, ++j)
1347  if( !(*i == *j) ) return false;
1348  return true;
1349 }
1350 
1351 template<typename T, class A1, class A2>
1353 { return !(a == b); }
1354 
1355 template<typename T, class A1, class A2>
1356 inline bool operator<(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1357 { return (std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end())); }
1358 
1359 template<typename T, class A1, class A2>
1361 { return b < a; }
1362 
1363 template<typename T, class A1, class A2>
1364 inline bool operator<=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1365 { return !(b < a); }
1366 
1367 template<typename T, class A1, class A2>
1369 { return !(a < b); }
1370 
1371 template<typename T, class A>
1373 { a.swap( b ); }
1374 
1375 } // namespace tbb
1376 
1377 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1378  #pragma warning (pop)
1379 #endif // warning 4267,4127 are back
1380 
1381 #endif /* __TBB_concurrent_vector_H */
bool operator>(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
concurrent_vector(size_type n, const_reference t, const allocator_type &a=allocator_type())
Construction with initial size specified by argument n, initialization by copying of t...
reverse_iterator rend()
reverse end iterator
const_reverse_iterator rend() const
reverse end const iterator
static void * internal_allocator(internal::concurrent_vector_base_v3 &vb, size_t k)
Allocate k items.
vector_iterator()
Default constructor.
concurrent_vector_base_v3 concurrent_vector_base
generic_range_type(generic_range_type &r, split)
value_type load() const
Definition: atomic.h:302
iterator grow_to_at_least(size_type n, const_reference t)
vector_iterator & operator++()
Pre increment.
Value & operator[](ptrdiff_t k) const
const_iterator end() const
end const iterator
static void __TBB_EXPORTED_FUNC initialize_array_by(void *begin, const void *src, size_type n)
Copy-construct n instances of T, starting at "begin".
allocator_base(const allocator_type &a=allocator_type())
reference back()
the last item
const_reverse_iterator rbegin() const
reverse start const iterator
internal::concurrent_vector_base_v3::size_type size_type
iterator begin()
start iterator
auto last(Container &c) -> decltype(begin(c))
T & internal_subscript_with_exceptions(size_type index) const
Get reference to element at given index with errors checks.
Value * my_item
Caches my_vector->internal_subscript(my_index)
segment_t my_storage[pointers_per_short_table]
embedded storage of segment pointers
void internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block)
Free k segments from table.
Number of slots for segment pointers inside the class.
vector_iterator< Container, T > operator+(ptrdiff_t offset, const vector_iterator< Container, T > &v)
friend void enforce_segment_allocated(segment_value_t const &s, internal::exception_id exception=eid_bad_last_alloc)
#define __TBB_EXPORTED_METHOD
Definition: tbb_stddef.h:98
static void __TBB_EXPORTED_FUNC copy_range(void *dst, const void *p_type_erased_iterator, size_type n)
Copy-construct n instances of T, starting at "dst" by iterator range of [p_type_erased_iterator, p_type_erased_iterator+n).
Concurrent vector container.
reference front()
the first item
iterator end()
end iterator
static void *const vector_allocation_error_flag
Bad allocation marker.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t new_size
vector_iterator & operator+=(ptrdiff_t offset)
static void __TBB_EXPORTED_FUNC move_assign_array(void *dst, const void *src, size_type n)
Move-assign (using operator=) n instances of T, starting at "dst" by assigning according element of s...
atomic< size_type > my_first_block
count of segments in the first block
void const char const char int ITT_FORMAT __itt_group_sync s
const_reference back() const
the last item const
size_type size() const
Return size of vector. It may include elements under construction.
friend bool operator==(segment_value_t const &lhs, segment_not_used)
static bool is_first_element_in_segment(size_type element_index)
vector_iterator(const vector_iterator< Container, typename Container::value_type > &other)
bool operator==(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
A range over which to iterate.
Definition: blocked_range.h:45
internal_loop_guide(size_type ntrials, void *ptr)
iterator grow_by(size_type delta)
Grow by "delta" elements.
allocator_traits< Alloc >::template rebind_alloc< T >::other type
#define __TBB_EXPORTED_FUNC
void shrink_to_fit()
Optimize memory usage and fragmentation.
push_back_helper(concurrent_vector &vector)
iterator grow_by(I first, I last)
auto first(Container &c) -> decltype(begin(c))
size_type max_size() const
Upper bound on argument to reserve.
The graph class.
static const T * as_const_pointer(const void *ptr)
void allocator_swap(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
void __TBB_EXPORTED_METHOD internal_copy(const concurrent_vector_base_v3 &src, size_type element_size, internal_array_op2 copy)
const internal::concurrent_vector_base_v3 & internal_vector_base() const
const_range_type range(size_t grainsize=1) const
Get const range for iterating with parallel algorithms.
bool empty() const
Return false if vector is not empty or has elements under construction at least.
T & internal_subscript(size_type index) const
Get reference to element at given index.
void store(value_type value)
Definition: atomic.h:313
atomic< segment_t * > my_segment
Pointer to the segments table.
void __TBB_EXPORTED_METHOD internal_swap(concurrent_vector_base_v3 &v)
void swap(concurrent_vector &vector)
swap two instances
void __TBB_EXPORTED_METHOD internal_resize(size_type n, size_type element_size, size_type max_size, const void *src, internal_array_op1 destroy, internal_array_op2 init)
std::reverse_iterator< iterator > reverse_iterator
static void __TBB_EXPORTED_FUNC move_array(void *dst, const void *src, size_type n)
Move-construct n instances of T, starting at "dst" by copying according element of src array...
vector_iterator(const Container &vector, size_t index, void *ptr=0)
void internal_assign_range(I first, I last, is_integer_tag< false > *)
inline proxy assign by iterators
segment_index_t __TBB_EXPORTED_METHOD internal_clear(internal_array_op1 destroy)
void *__TBB_EXPORTED_METHOD internal_compact(size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy)
iterator push_back(T &&item)
Push item, move-aware.
concurrent_vector(const allocator_type &a=allocator_type())
Construct empty vector.
static segment_index_t segment_index_of(size_type index)
concurrent_vector(const concurrent_vector &vector, const allocator_type &a=allocator_type())
Copying constructor.
Base class of concurrent vector implementation.
allocator_type get_allocator() const
return allocator object
void resize(size_type n)
Resize the vector. Not thread-safe.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
static void __TBB_EXPORTED_FUNC copy_array(void *dst, const void *src, size_type n)
Copy-construct n instances of T by copying single element pointed to by src, starting at "dst"...
std::random_access_iterator_tag iterator_category
#define __TBB_NOEXCEPT(expression)
Definition: tbb_stddef.h:110
const_reverse_iterator crbegin() const
reverse start const iterator
vector_iterator operator++(int)
Post increment.
const_iterator cend() const
end const iterator
vector_iterator & operator--()
Pre decrement.
void(__TBB_EXPORTED_FUNC * internal_array_op2)(void *dst, const void *src, size_type n)
An operation on n-element destination array and n-element source array.
reference operator[](size_type index)
Get reference to element at given index.
friend bool operator==(segment_value_t const &lhs, segment_allocated)
const_reverse_iterator crend() const
reverse end const iterator
Dummy type that distinguishes splitting constructor from copy constructor.
Definition: tbb_stddef.h:395
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
concurrent_vector(size_type n)
Construction with initial size specified by argument n.
internal::vector_iterator< concurrent_vector, const T > const_iterator
size_type __TBB_EXPORTED_METHOD internal_capacity() const
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:305
internal::allocator_base< T, A >::allocator_type allocator_type
void __TBB_EXPORTED_METHOD internal_reserve(size_type n, size_type element_size, size_type max_size)
friend bool operator!=(segment_value_t const &lhs, argument_type arg)
void store(void *allocated_segment_pointer) __TBB_NOEXCEPT(true)
Meets requirements of a forward iterator for STL and a Value for a blocked_range.*/.
void assign(size_type n, const_reference t)
assign n items by copying t item
void __TBB_EXPORTED_METHOD internal_grow_to_at_least(size_type new_size, size_type element_size, internal_array_op2 init, const void *src)
Deprecated entry point for backwards compatibility to TBB 2.1.
const_iterator cbegin() const
start const iterator
generic_range_type(I begin_, I end_, size_t grainsize_=1)
generic_range_type< iterator > range_type
vector_iterator operator+(ptrdiff_t offset) const
tbb::internal::allocator_rebind< A, T >::type allocator_type
const_reference at(size_type index) const
Get const reference to element at given index. Throws exceptions on errors.
static segment_index_t segment_base(segment_index_t k)
void clear()
Clear container while keeping memory allocated.
range_type range(size_t grainsize=1)
Get range for iterating with parallel algorithms.
void *(* vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t)
allocator function pointer
void *__TBB_EXPORTED_METHOD internal_push_back(size_type element_size, size_type &index)
concurrent_vector & operator=(const concurrent_vector< T, M > &vector)
Assignment for vector with different allocator type.
Exception-aware helper class for filling a segment by exception-danger operators of user class...
void assign(I first, I last)
assign range [first, last)
void reserve(size_type n)
Allocate enough space to grow to size n without having to allocate more memory later.
generic_range_type(const generic_range_type< U > &r)
void internal_assign_range(I first, I last, is_integer_tag< true > *)
assign integer items by copying when arguments are treated as iterators. See C++ Standard 2003 23...
void __TBB_EXPORTED_METHOD internal_assign(const concurrent_vector_base_v3 &src, size_type element_size, internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy)
const_reference front() const
the first item const
size_type capacity() const
Maximum size to which array can grow without allocating more memory. Concurrent allocations are not i...
STL namespace.
iterator grow_to_at_least(size_type n)
Append minimal sequence of elements such that size()>=n.
void internal_assign_n(size_type n, const_pointer p)
assign n items by copying t
reference at(size_type index)
Get reference to element at given index. Throws exceptions on errors.
friend void swap(segment_t &, segment_t &) __TBB_NOEXCEPT(true)
void resize(size_type n, const_reference t)
Resize the vector, copy t for new elements. Not thread-safe.
Specialization for atomic<void*>, for sake of not allowing arithmetic or operator->.
Definition: atomic.h:499
concurrent_vector & operator=(const concurrent_vector &vector)
Assignment.
#define __TBB_CATCH(e)
Definition: tbb_stddef.h:284
std::reverse_iterator< const_iterator > const_reverse_iterator
#define __TBB_TRY
Definition: tbb_stddef.h:283
ptrdiff_t operator-(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
static segment_index_t segment_base_index_of(segment_index_t &index)
bool operator!=(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
bool operator>=(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
void __TBB_EXPORTED_METHOD internal_throw_exception(size_type) const
Obsolete.
iterator grow_by(std::initializer_list< T > init_list)
No ordering.
Definition: atomic.h:47
intptr_t __TBB_Log2(uintptr_t x)
Definition: tbb_machine.h:863
concurrent_vector(I first, I last, const allocator_type &a=allocator_type())
Construction with copying iteration range and given allocator instance.
static size_type segment_size(segment_index_t k)
atomic< size_type > my_early_size
Requested size of vector.
Container * my_vector
concurrent_vector over which we are iterating.
internal::vector_iterator< concurrent_vector, T > iterator
reverse_iterator rbegin()
reverse start iterator
bool is_power_of_two_at_least(argument_integer_type arg, power2_integer_type power2)
A function to determine if arg is a power of 2 at least as big as another power of 2...
Definition: tbb_stddef.h:371
True/false function override helper.
Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.
vector_iterator & operator-=(ptrdiff_t offset)
friend bool operator==(segment_value_t const &lhs, segment_allocation_failed)
void allocator_move_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
iterator emplace_back(Args &&... args)
Push item, create item "in place" with provided arguments.
iterator grow_by(size_type delta, const_reference t)
Grow by "delta" elements using copying constructor.
size_type __TBB_EXPORTED_METHOD internal_grow_to_at_least_with_result(size_type new_size, size_type element_size, internal_array_op2 init, const void *src)
void internal_assign_iterators(I first, I last)
assign by iterators
concurrent_vector(concurrent_vector &&source, const allocator_type &a)
const_iterator begin() const
start const iterator
concurrent_vector(const concurrent_vector< T, M > &vector, const allocator_type &a=allocator_type())
Copying constructor for vector with different allocator type.
void(__TBB_EXPORTED_FUNC * internal_array_op1)(void *begin, size_type n)
An operation on an n-element array starting at begin.
vector_iterator operator--(int)
Post decrement.
~concurrent_vector()
Clear and destroy vector.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp begin
__TBB_EXPORTED_METHOD ~concurrent_vector_base_v3()
void const char const char int ITT_FORMAT __itt_group_sync p
static void __TBB_EXPORTED_FUNC assign_array(void *dst, const void *src, size_type n)
Assign (using operator=) n instances of T, starting at "dst" by assigning according element of src ar...
const_reference operator[](size_type index) const
Get const reference to element at given index.
iterator push_back(const_reference item)
Push item.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp end
vector_iterator operator-(ptrdiff_t offset) const
#define __TBB_RETHROW()
Definition: tbb_stddef.h:286
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
size_t my_index
Index into the vector.
void internal_grow(size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src)
concurrent_vector(concurrent_vector &&source)
Move constructor.
static void __TBB_EXPORTED_FUNC destroy_array(void *begin, size_type n)
Destroy n instances of T, starting at "begin".
Acquire.
Definition: atomic.h:43
static void __TBB_EXPORTED_FUNC initialize_array(void *begin, const void *, size_type n)
Construct n instances of T, starting at "begin".
generic_range_type< const_iterator > const_range_type
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
void handle_unconstructed_elements(T *array, size_t n_of_elements)
Exception helper function.
size_type __TBB_EXPORTED_METHOD internal_grow_by(size_type delta, size_type element_size, internal_array_op2 init, const void *src)
concurrent_vector & operator=(concurrent_vector &&other)
Move assignment.

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.