Kokkos Core Kernels Package  Version of the Day
Kokkos_UnorderedMap.hpp
Go to the documentation of this file.
1 /*
2 //@HEADER
3 // ************************************************************************
4 //
5 // Kokkos v. 2.0
6 // Copyright (2014) Sandia Corporation
7 //
8 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9 // the U.S. Government retains certain rights in this software.
10 //
11 // Redistribution and use in source and binary forms, with or without
12 // modification, are permitted provided that the following conditions are
13 // met:
14 //
15 // 1. Redistributions of source code must retain the above copyright
16 // notice, this list of conditions and the following disclaimer.
17 //
18 // 2. Redistributions in binary form must reproduce the above copyright
19 // notice, this list of conditions and the following disclaimer in the
20 // documentation and/or other materials provided with the distribution.
21 //
22 // 3. Neither the name of the Corporation nor the names of the
23 // contributors may be used to endorse or promote products derived from
24 // this software without specific prior written permission.
25 //
26 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 //
38 // Questions? Contact H. Carter Edwards (hcedwar@sandia.gov)
39 //
40 // ************************************************************************
41 //@HEADER
42 */
43 
49 
50 #ifndef KOKKOS_UNORDERED_MAP_HPP
51 #define KOKKOS_UNORDERED_MAP_HPP
52 
53 #include <Kokkos_Core.hpp>
54 #include <Kokkos_Functional.hpp>
55 
56 #include <Kokkos_Bitset.hpp>
57 
58 #include <impl/Kokkos_Traits.hpp>
59 #include <impl/Kokkos_UnorderedMap_impl.hpp>
60 
61 
62 #include <iostream>
63 
64 #include <stdint.h>
65 #include <stdexcept>
66 
67 
68 namespace Kokkos {
69 
70 enum { UnorderedMapInvalidIndex = ~0u };
71 
85 
87 {
88 private:
89  enum Status{
90  SUCCESS = 1u << 31
91  , EXISTING = 1u << 30
92  , FREED_EXISTING = 1u << 29
93  , LIST_LENGTH_MASK = ~(SUCCESS | EXISTING | FREED_EXISTING)
94  };
95 
96 public:
98  KOKKOS_FORCEINLINE_FUNCTION
99  bool success() const { return (m_status & SUCCESS); }
100 
102  KOKKOS_FORCEINLINE_FUNCTION
103  bool existing() const { return (m_status & EXISTING); }
104 
106  KOKKOS_FORCEINLINE_FUNCTION
107  bool failed() const { return m_index == UnorderedMapInvalidIndex; }
108 
111  KOKKOS_FORCEINLINE_FUNCTION
112  bool freed_existing() const { return (m_status & FREED_EXISTING); }
113 
116  KOKKOS_FORCEINLINE_FUNCTION
117  uint32_t list_position() const { return (m_status & LIST_LENGTH_MASK); }
118 
120  KOKKOS_FORCEINLINE_FUNCTION
121  uint32_t index() const { return m_index; }
122 
123  KOKKOS_FORCEINLINE_FUNCTION
125  : m_index(UnorderedMapInvalidIndex)
126  , m_status(0)
127  {}
128 
129  KOKKOS_FORCEINLINE_FUNCTION
130  void increment_list_position()
131  {
132  m_status += (list_position() < LIST_LENGTH_MASK) ? 1u : 0u;
133  }
134 
135  KOKKOS_FORCEINLINE_FUNCTION
136  void set_existing(uint32_t i, bool arg_freed_existing)
137  {
138  m_index = i;
139  m_status = EXISTING | (arg_freed_existing ? FREED_EXISTING : 0u) | list_position();
140  }
141 
142  KOKKOS_FORCEINLINE_FUNCTION
143  void set_success(uint32_t i)
144  {
145  m_index = i;
146  m_status = SUCCESS | list_position();
147  }
148 
149 private:
150  uint32_t m_index;
151  uint32_t m_status;
152 };
153 
209 template < typename Key
210  , typename Value
211  , typename Device = Kokkos::DefaultExecutionSpace
212  , typename Hasher = pod_hash<typename Impl::remove_const<Key>::type>
213  , typename EqualTo = pod_equal_to<typename Impl::remove_const<Key>::type>
214  >
216 {
217 private:
218  typedef typename ViewTraits<Key,Device,void,void>::host_mirror_space host_mirror_space ;
219 public:
221 
222 
223  //key_types
224  typedef Key declared_key_type;
225  typedef typename Impl::remove_const<declared_key_type>::type key_type;
226  typedef typename Impl::add_const<key_type>::type const_key_type;
227 
228  //value_types
229  typedef Value declared_value_type;
230  typedef typename Impl::remove_const<declared_value_type>::type value_type;
231  typedef typename Impl::add_const<value_type>::type const_value_type;
232 
233  typedef Device execution_space;
234  typedef Hasher hasher_type;
235  typedef EqualTo equal_to_type;
236  typedef uint32_t size_type;
237 
238  //map_types
243 
244  static const bool is_set = Impl::is_same<void,value_type>::value;
245  static const bool has_const_key = Impl::is_same<const_key_type,declared_key_type>::value;
246  static const bool has_const_value = is_set || Impl::is_same<const_value_type,declared_value_type>::value;
247 
248  static const bool is_insertable_map = !has_const_key && (is_set || !has_const_value);
249  static const bool is_modifiable_map = has_const_key && !has_const_value;
250  static const bool is_const_map = has_const_key && has_const_value;
251 
252 
254 
256 
257  typedef Impl::UnorderedMapHistogram<const_map_type> histogram_type;
258 
260 
261 private:
262  enum { invalid_index = ~static_cast<size_type>(0) };
263 
264  typedef typename Impl::if_c< is_set, int, declared_value_type>::type impl_value_type;
265 
266  typedef typename Impl::if_c< is_insertable_map
269  >::type key_type_view;
270 
271  typedef typename Impl::if_c< is_insertable_map || is_modifiable_map
274  >::type value_type_view;
275 
276  typedef typename Impl::if_c< is_insertable_map
279  >::type size_type_view;
280 
281  typedef typename Impl::if_c< is_insertable_map
284  >::type bitset_type;
285 
286  enum { modified_idx = 0, erasable_idx = 1, failed_insert_idx = 2 };
287  enum { num_scalars = 3 };
289 
290 public:
292 
293 
294  UnorderedMap()
295  : m_bounded_insert()
296  , m_hasher()
297  , m_equal_to()
298  , m_size()
299  , m_available_indexes()
300  , m_hash_lists()
301  , m_next_index()
302  , m_keys()
303  , m_values()
304  , m_scalars()
305  {}
306 
312  UnorderedMap( size_type capacity_hint, hasher_type hasher = hasher_type(), equal_to_type equal_to = equal_to_type() )
313  : m_bounded_insert(true)
314  , m_hasher(hasher)
315  , m_equal_to(equal_to)
316  , m_size()
317  , m_available_indexes(calculate_capacity(capacity_hint))
318  , m_hash_lists(ViewAllocateWithoutInitializing("UnorderedMap hash list"), Impl::find_hash_size(capacity()))
319  , m_next_index(ViewAllocateWithoutInitializing("UnorderedMap next index"), capacity()+1) // +1 so that the *_at functions can always return a valid reference
320  , m_keys("UnorderedMap keys",capacity()+1)
321  , m_values("UnorderedMap values",(is_set? 1 : capacity()+1))
322  , m_scalars("UnorderedMap scalars")
323  {
324  if (!is_insertable_map) {
325  throw std::runtime_error("Cannot construct a non-insertable (i.e. const key_type) unordered_map");
326  }
327 
328  Kokkos::deep_copy(m_hash_lists, invalid_index);
329  Kokkos::deep_copy(m_next_index, invalid_index);
330  }
331 
332  void reset_failed_insert_flag()
333  {
334  reset_flag(failed_insert_idx);
335  }
336 
337  histogram_type get_histogram()
338  {
339  return histogram_type(*this);
340  }
341 
343  void clear()
344  {
345  m_bounded_insert = true;
346 
347  if (capacity() == 0) return;
348 
349  m_available_indexes.clear();
350 
351  Kokkos::deep_copy(m_hash_lists, invalid_index);
352  Kokkos::deep_copy(m_next_index, invalid_index);
353  {
354  const key_type tmp = key_type();
355  Kokkos::deep_copy(m_keys,tmp);
356  }
357  if (is_set){
358  const impl_value_type tmp = impl_value_type();
359  Kokkos::deep_copy(m_values,tmp);
360  }
361  {
362  Kokkos::deep_copy(m_scalars, 0);
363  }
364  }
365 
376  bool rehash(size_type requested_capacity = 0)
377  {
378  const bool bounded_insert = (capacity() == 0) || (size() == 0u);
379  return rehash(requested_capacity, bounded_insert );
380  }
381 
382  bool rehash(size_type requested_capacity, bool bounded_insert)
383  {
384  if(!is_insertable_map) return false;
385 
386  const size_type curr_size = size();
387  requested_capacity = (requested_capacity < curr_size) ? curr_size : requested_capacity;
388 
389  insertable_map_type tmp(requested_capacity, m_hasher, m_equal_to);
390 
391  if (curr_size) {
392  tmp.m_bounded_insert = false;
393  Impl::UnorderedMapRehash<insertable_map_type> f(tmp,*this);
394  f.apply();
395  }
396  tmp.m_bounded_insert = bounded_insert;
397 
398  *this = tmp;
399 
400  return true;
401  }
402 
410  size_type size() const
411  {
412  if( capacity() == 0u ) return 0u;
413  if (modified()) {
414  m_size = m_available_indexes.count();
415  reset_flag(modified_idx);
416  }
417  return m_size;
418  }
419 
425  bool failed_insert() const
426  {
427  return get_flag(failed_insert_idx);
428  }
429 
430  bool erasable() const
431  {
432  return is_insertable_map ? get_flag(erasable_idx) : false;
433  }
434 
435  bool begin_erase()
436  {
437  bool result = !erasable();
438  if (is_insertable_map && result) {
439  execution_space::fence();
440  set_flag(erasable_idx);
441  execution_space::fence();
442  }
443  return result;
444  }
445 
446  bool end_erase()
447  {
448  bool result = erasable();
449  if (is_insertable_map && result) {
450  execution_space::fence();
451  Impl::UnorderedMapErase<declared_map_type> f(*this);
452  f.apply();
453  execution_space::fence();
454  reset_flag(erasable_idx);
455  }
456  return result;
457  }
458 
463  KOKKOS_FORCEINLINE_FUNCTION
464  size_type capacity() const
465  { return m_available_indexes.size(); }
466 
477  KOKKOS_INLINE_FUNCTION
478  size_type hash_capacity() const
479  { return m_hash_lists.dimension_0(); }
480 
481  //---------------------------------------------------------------------------
482  //---------------------------------------------------------------------------
483 
484 
493  KOKKOS_INLINE_FUNCTION
494  insert_result insert(key_type const& k, impl_value_type const&v = impl_value_type()) const
495  {
496  insert_result result;
497 
498  if ( !is_insertable_map || capacity() == 0u || m_scalars((int)erasable_idx) ) {
499  return result;
500  }
501 
502  if ( !m_scalars((int)modified_idx) ) {
503  m_scalars((int)modified_idx) = true;
504  }
505 
506  int volatile & failed_insert_ref = m_scalars((int)failed_insert_idx) ;
507 
508  const size_type hash_value = m_hasher(k);
509  const size_type hash_list = hash_value % m_hash_lists.dimension_0();
510 
511  size_type * curr_ptr = & m_hash_lists[ hash_list ];
512  size_type new_index = invalid_index ;
513 
514  // Force integer multiply to long
515  size_type index_hint = static_cast<size_type>( (static_cast<double>(hash_list) * capacity()) / m_hash_lists.dimension_0());
516 
517  size_type find_attempts = 0;
518 
519  enum { bounded_find_attempts = 32u };
520  const size_type max_attempts = (m_bounded_insert && (bounded_find_attempts < m_available_indexes.max_hint()) ) ?
521  bounded_find_attempts :
522  m_available_indexes.max_hint();
523 
524  bool not_done = true ;
525 
526 #if defined( __MIC__ )
527  #pragma noprefetch
528 #endif
529  while ( not_done ) {
530 
531  // Continue searching the unordered list for this key,
532  // list will only be appended during insert phase.
533  // Need volatile_load as other threads may be appending.
534  size_type curr = volatile_load(curr_ptr);
535 
536  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
537 #if defined( __MIC__ )
538  #pragma noprefetch
539 #endif
540  while ( curr != invalid_index && ! m_equal_to( volatile_load(&m_keys[curr]), k) ) {
541  result.increment_list_position();
542  index_hint = curr;
543  curr_ptr = &m_next_index[curr];
544  curr = volatile_load(curr_ptr);
545  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
546  }
547 
548  //------------------------------------------------------------
549  // If key already present then return that index.
550  if ( curr != invalid_index ) {
551 
552  const bool free_existing = new_index != invalid_index;
553  if ( free_existing ) {
554  // Previously claimed an unused entry that was not inserted.
555  // Release this unused entry immediately.
556  if (!m_available_indexes.reset(new_index) ) {
557  printf("Unable to free existing\n");
558  }
559 
560  }
561 
562  result.set_existing(curr, free_existing);
563  not_done = false ;
564  }
565  //------------------------------------------------------------
566  // Key is not currently in the map.
567  // If the thread has claimed an entry try to insert now.
568  else {
569 
570  //------------------------------------------------------------
571  // If have not already claimed an unused entry then do so now.
572  if (new_index == invalid_index) {
573 
574  bool found = false;
575  // use the hash_list as the flag for the search direction
576  Kokkos::tie(found, index_hint) = m_available_indexes.find_any_unset_near( index_hint, hash_list );
577 
578  // found and index and this thread set it
579  if ( !found && ++find_attempts >= max_attempts ) {
580  failed_insert_ref = true;
581  not_done = false ;
582  }
583  else if (m_available_indexes.set(index_hint) ) {
584  new_index = index_hint;
585  // Set key and value
586  KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_keys[new_index]);
587  m_keys[new_index] = k ;
588 
589  if (!is_set) {
590  KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_values[new_index]);
591  m_values[new_index] = v ;
592  }
593 
594  // Do not proceed until key and value are updated in global memory
595  memory_fence();
596  }
597  }
598  else if (failed_insert_ref) {
599  not_done = false;
600  }
601 
602  // Attempt to append claimed entry into the list.
603  // Another thread may also be trying to append the same list so protect with atomic.
604  if ( new_index != invalid_index &&
605  curr == atomic_compare_exchange(curr_ptr, static_cast<size_type>(invalid_index), new_index) ) {
606  // Succeeded in appending
607  result.set_success(new_index);
608  not_done = false ;
609  }
610  }
611  } // while ( not_done )
612 
613  return result ;
614  }
615 
616  KOKKOS_INLINE_FUNCTION
617  bool erase(key_type const& k) const
618  {
619  bool result = false;
620 
621  if(is_insertable_map && 0u < capacity() && m_scalars((int)erasable_idx)) {
622 
623  if ( ! m_scalars((int)modified_idx) ) {
624  m_scalars((int)modified_idx) = true;
625  }
626 
627  size_type index = find(k);
628  if (valid_at(index)) {
629  m_available_indexes.reset(index);
630  result = true;
631  }
632  }
633 
634  return result;
635  }
636 
644  KOKKOS_INLINE_FUNCTION
645  size_type find( const key_type & k) const
646  {
647  size_type curr = 0u < capacity() ? m_hash_lists( m_hasher(k) % m_hash_lists.dimension_0() ) : invalid_index ;
648 
649  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
650  while (curr != invalid_index && !m_equal_to( m_keys[curr], k) ) {
651  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
652  curr = m_next_index[curr];
653  }
654 
655  return curr;
656  }
657 
662  KOKKOS_INLINE_FUNCTION
663  bool exists( const key_type & k) const
664  {
665  return valid_at(find(k));
666  }
667 
668 
677  KOKKOS_FORCEINLINE_FUNCTION
678  typename Impl::if_c< (is_set || has_const_value), impl_value_type, impl_value_type &>::type
679  value_at(size_type i) const
680  {
681  return m_values[ is_set ? 0 : (i < capacity() ? i : capacity()) ];
682  }
683 
690  KOKKOS_FORCEINLINE_FUNCTION
691  key_type key_at(size_type i) const
692  {
693  return m_keys[ i < capacity() ? i : capacity() ];
694  }
695 
696  KOKKOS_FORCEINLINE_FUNCTION
697  bool valid_at(size_type i) const
698  {
699  return m_available_indexes.test(i);
700  }
701 
702  template <typename SKey, typename SValue>
704  typename Impl::enable_if< Impl::UnorderedMapCanAssign<declared_key_type,declared_value_type,SKey,SValue>::value,int>::type = 0
705  )
706  : m_bounded_insert(src.m_bounded_insert)
707  , m_hasher(src.m_hasher)
708  , m_equal_to(src.m_equal_to)
709  , m_size(src.m_size)
710  , m_available_indexes(src.m_available_indexes)
711  , m_hash_lists(src.m_hash_lists)
712  , m_next_index(src.m_next_index)
713  , m_keys(src.m_keys)
714  , m_values(src.m_values)
715  , m_scalars(src.m_scalars)
716  {}
717 
718 
719  template <typename SKey, typename SValue>
720  typename Impl::enable_if< Impl::UnorderedMapCanAssign<declared_key_type,declared_value_type,SKey,SValue>::value
721  ,declared_map_type & >::type
723  {
724  m_bounded_insert = src.m_bounded_insert;
725  m_hasher = src.m_hasher;
726  m_equal_to = src.m_equal_to;
727  m_size = src.m_size;
728  m_available_indexes = src.m_available_indexes;
729  m_hash_lists = src.m_hash_lists;
730  m_next_index = src.m_next_index;
731  m_keys = src.m_keys;
732  m_values = src.m_values;
733  m_scalars = src.m_scalars;
734  return *this;
735  }
736 
737  template <typename SKey, typename SValue, typename SDevice>
738  typename Impl::enable_if< Impl::is_same< typename Impl::remove_const<SKey>::type, key_type>::value &&
739  Impl::is_same< typename Impl::remove_const<SValue>::type, value_type>::value
740  >::type
741  create_copy_view( UnorderedMap<SKey, SValue, SDevice, Hasher,EqualTo> const& src)
742  {
743  if (m_hash_lists.ptr_on_device() != src.m_hash_lists.ptr_on_device()) {
744 
745  insertable_map_type tmp;
746 
747  tmp.m_bounded_insert = src.m_bounded_insert;
748  tmp.m_hasher = src.m_hasher;
749  tmp.m_equal_to = src.m_equal_to;
750  tmp.m_size = src.size();
751  tmp.m_available_indexes = bitset_type( src.capacity() );
752  tmp.m_hash_lists = size_type_view( ViewAllocateWithoutInitializing("UnorderedMap hash list"), src.m_hash_lists.dimension_0() );
753  tmp.m_next_index = size_type_view( ViewAllocateWithoutInitializing("UnorderedMap next index"), src.m_next_index.dimension_0() );
754  tmp.m_keys = key_type_view( ViewAllocateWithoutInitializing("UnorderedMap keys"), src.m_keys.dimension_0() );
755  tmp.m_values = value_type_view( ViewAllocateWithoutInitializing("UnorderedMap values"), src.m_values.dimension_0() );
756  tmp.m_scalars = scalars_view("UnorderedMap scalars");
757 
758  Kokkos::deep_copy(tmp.m_available_indexes, src.m_available_indexes);
759 
760  typedef Kokkos::Impl::DeepCopy< typename execution_space::memory_space, typename SDevice::memory_space > raw_deep_copy;
761 
762  raw_deep_copy(tmp.m_hash_lists.ptr_on_device(), src.m_hash_lists.ptr_on_device(), sizeof(size_type)*src.m_hash_lists.dimension_0());
763  raw_deep_copy(tmp.m_next_index.ptr_on_device(), src.m_next_index.ptr_on_device(), sizeof(size_type)*src.m_next_index.dimension_0());
764  raw_deep_copy(tmp.m_keys.ptr_on_device(), src.m_keys.ptr_on_device(), sizeof(key_type)*src.m_keys.dimension_0());
765  if (!is_set) {
766  raw_deep_copy(tmp.m_values.ptr_on_device(), src.m_values.ptr_on_device(), sizeof(impl_value_type)*src.m_values.dimension_0());
767  }
768  raw_deep_copy(tmp.m_scalars.ptr_on_device(), src.m_scalars.ptr_on_device(), sizeof(int)*num_scalars );
769 
770  *this = tmp;
771  }
772  }
773 
775 private: // private member functions
776 
777  bool modified() const
778  {
779  return get_flag(modified_idx);
780  }
781 
782  void set_flag(int flag) const
783  {
784  typedef Kokkos::Impl::DeepCopy< typename execution_space::memory_space, Kokkos::HostSpace > raw_deep_copy;
785  const int true_ = true;
786  raw_deep_copy(m_scalars.ptr_on_device() + flag, &true_, sizeof(int));
787  }
788 
789  void reset_flag(int flag) const
790  {
791  typedef Kokkos::Impl::DeepCopy< typename execution_space::memory_space, Kokkos::HostSpace > raw_deep_copy;
792  const int false_ = false;
793  raw_deep_copy(m_scalars.ptr_on_device() + flag, &false_, sizeof(int));
794  }
795 
796  bool get_flag(int flag) const
797  {
798  typedef Kokkos::Impl::DeepCopy< Kokkos::HostSpace, typename execution_space::memory_space > raw_deep_copy;
799  int result = false;
800  raw_deep_copy(&result, m_scalars.ptr_on_device() + flag, sizeof(int));
801  return result;
802  }
803 
804  static uint32_t calculate_capacity(uint32_t capacity_hint)
805  {
806  // increase by 16% and round to nears multiple of 128
807  return capacity_hint ? ((static_cast<uint32_t>(7ull*capacity_hint/6u) + 127u)/128u)*128u : 128u;
808  }
809 
810 private: // private members
811  bool m_bounded_insert;
812  hasher_type m_hasher;
813  equal_to_type m_equal_to;
814  mutable size_type m_size;
815  bitset_type m_available_indexes;
816  size_type_view m_hash_lists;
817  size_type_view m_next_index;
818  key_type_view m_keys;
819  value_type_view m_values;
820  scalars_view m_scalars;
821 
822  template <typename KKey, typename VValue, typename DDevice, typename HHash, typename EEqualTo>
823  friend class UnorderedMap;
824 
825  template <typename UMap>
826  friend struct Impl::UnorderedMapErase;
827 
828  template <typename UMap>
829  friend struct Impl::UnorderedMapHistogram;
830 
831  template <typename UMap>
832  friend struct Impl::UnorderedMapPrint;
833 };
834 
835 // Specialization of deep_copy for two UnorderedMap objects.
836 template < typename DKey, typename DT, typename DDevice
837  , typename SKey, typename ST, typename SDevice
838  , typename Hasher, typename EqualTo >
841 {
842  dst.create_copy_view(src);
843 }
844 
845 
846 } // namespace Kokkos
847 
848 #endif //KOKKOS_UNORDERED_MAP_HPP
KOKKOS_FORCEINLINE_FUNCTION bool success() const
Did the map successful insert the key/value pair.
A thread safe view to a bitset.
KOKKOS_FORCEINLINE_FUNCTION size_type capacity() const
The maximum number of entries that the table can hold.
void deep_copy(const View< DT, DL, DD, DM, DS > &dst, typename Impl::enable_if<(Impl::is_same< typename ViewTraits< DT, DL, DD, DM >::non_const_value_type, typename ViewTraits< DT, DL, DD, DM >::value_type >::value), typename ViewTraits< DT, DL, DD, DM >::const_value_type >::type &value)
Deep copy a value into a view.
KOKKOS_FORCEINLINE_FUNCTION uint32_t list_position() const
UnorderedMap(size_type capacity_hint, hasher_type hasher=hasher_type(), equal_to_type equal_to=equal_to_type())
Constructor.
KOKKOS_INLINE_FUNCTION insert_result insert(key_type const &k, impl_value_type const &v=impl_value_type()) const
size_type size() const
The number of entries in the table.
KOKKOS_FORCEINLINE_FUNCTION uint32_t index() const
Index where the key can be found as long as the insert did not fail.
void clear()
Clear all entries in the table.
View to an array of data.
KOKKOS_INLINE_FUNCTION size_type hash_capacity() const
The number of hash table "buckets.".
Memory space for main process and CPU execution spaces.
First element of the return value of UnorderedMap::insert().
KOKKOS_FORCEINLINE_FUNCTION bool freed_existing() const
bool rehash(size_type requested_capacity=0)
Change the capacity of the the map.
KOKKOS_INLINE_FUNCTION bool exists(const key_type &k) const
Does the key exist in the map.
bool failed_insert() const
The current number of failed insert() calls.
KOKKOS_FORCEINLINE_FUNCTION Impl::if_c< (is_set||has_const_value), impl_value_type, impl_value_type & >::type value_at(size_type i) const
Get the value with i as its direct index.
KOKKOS_FORCEINLINE_FUNCTION key_type key_at(size_type i) const
Get the key with i as its direct index.
KOKKOS_INLINE_FUNCTION size_type find(const key_type &k) const
Find the given key k, if it exists in the table.
Thread-safe, performance-portable lookup table.
KOKKOS_FORCEINLINE_FUNCTION bool failed() const
Did the map fail to insert the key due to insufficent capacity.
KOKKOS_FORCEINLINE_FUNCTION bool existing() const
Was the key already present in the map.
KOKKOS_FORCEINLINE_FUNCTION pair< T1 &, T2 & > tie(T1 &x, T2 &y)
Return a pair of references to the input arguments.