00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #ifndef __TBB_concurrent_hash_map_H
00022 #define __TBB_concurrent_hash_map_H
00023
00024 #include "tbb_stddef.h"
00025
00026 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00027
00028 #pragma warning (push)
00029 #pragma warning (disable: 4530)
00030 #endif
00031
00032 #include <iterator>
00033 #include <utility>
00034 #include <cstring>
00035
00036 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00037 #pragma warning (pop)
00038 #endif
00039
00040 #include "cache_aligned_allocator.h"
00041 #include "tbb_allocator.h"
00042 #include "spin_rw_mutex.h"
00043 #include "atomic.h"
00044 #include "aligned_space.h"
00045 #include "tbb_exception.h"
00046 #include "tbb_profiling.h"
00047 #include "internal/_concurrent_unordered_impl.h"
00048 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
00049 #include <typeinfo>
00050 #endif
00051 #if __TBB_STATISTICS
00052 #include <stdio.h>
00053 #endif
00054
00055 namespace tbb {
00056
00058 template<typename Key>
00059 struct tbb_hash_compare {
00060 static size_t hash( const Key& a ) { return tbb_hasher(a); }
00061 static bool equal( const Key& a, const Key& b ) { return a == b; }
00062 };
00063
00064 namespace interface5 {
00065
00066 template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<Key, T> > >
00067 class concurrent_hash_map;
00068
00070 namespace internal {
00071
00072
00074 typedef size_t hashcode_t;
00076 struct hash_map_node_base : tbb::internal::no_copy {
00078 typedef spin_rw_mutex mutex_t;
00080 typedef mutex_t::scoped_lock scoped_t;
00082 hash_map_node_base *next;
00083 mutex_t mutex;
00084 };
00086 static hash_map_node_base *const rehash_req = reinterpret_cast<hash_map_node_base*>(size_t(3));
00088 static hash_map_node_base *const empty_rehashed = reinterpret_cast<hash_map_node_base*>(size_t(0));
00090 class hash_map_base {
00091 public:
00093 typedef size_t size_type;
00095 typedef size_t hashcode_t;
00097 typedef size_t segment_index_t;
00099 typedef hash_map_node_base node_base;
00101 struct bucket : tbb::internal::no_copy {
00103 typedef spin_rw_mutex mutex_t;
00105 typedef mutex_t::scoped_lock scoped_t;
00106 mutex_t mutex;
00107 node_base *node_list;
00108 };
00110 static size_type const embedded_block = 1;
00112 static size_type const embedded_buckets = 1<<embedded_block;
00114 static size_type const first_block = 8;
00116 static size_type const pointers_per_table = sizeof(segment_index_t) * 8;
00118 typedef bucket *segment_ptr_t;
00120 typedef segment_ptr_t segments_table_t[pointers_per_table];
00122 atomic<hashcode_t> my_mask;
00124 segments_table_t my_table;
00126 atomic<size_type> my_size;
00128 bucket my_embedded_segment[embedded_buckets];
00129 #if __TBB_STATISTICS
00130 atomic<unsigned> my_info_resizes;
00131 mutable atomic<unsigned> my_info_restarts;
00132 atomic<unsigned> my_info_rehashes;
00133 #endif
00135 hash_map_base() {
00136 std::memset( this, 0, pointers_per_table*sizeof(segment_ptr_t)
00137 + sizeof(my_size) + sizeof(my_mask)
00138 + embedded_buckets*sizeof(bucket) );
00139 for( size_type i = 0; i < embedded_block; i++ )
00140 my_table[i] = my_embedded_segment + segment_base(i);
00141 my_mask = embedded_buckets - 1;
00142 __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
00143 #if __TBB_STATISTICS
00144 my_info_resizes = 0;
00145 my_info_restarts = 0;
00146 my_info_rehashes = 0;
00147 #endif
00148 }
00149
00151 static segment_index_t segment_index_of( size_type index ) {
00152 return segment_index_t( __TBB_Log2( index|1 ) );
00153 }
00154
00156 static segment_index_t segment_base( segment_index_t k ) {
00157 return (segment_index_t(1)<<k & ~segment_index_t(1));
00158 }
00159
00161 static size_type segment_size( segment_index_t k ) {
00162 return size_type(1)<<k;
00163 }
00164
00166 static bool is_valid( void *ptr ) {
00167 return reinterpret_cast<size_t>(ptr) > size_t(63);
00168 }
00169
00171 static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) {
00172 if( is_initial ) std::memset(ptr, 0, sz*sizeof(bucket) );
00173 else for(size_type i = 0; i < sz; i++, ptr++) {
00174 *reinterpret_cast<intptr_t*>(&ptr->mutex) = 0;
00175 ptr->node_list = rehash_req;
00176 }
00177 }
00178
00180 static void add_to_bucket( bucket *b, node_base *n ) {
00181 __TBB_ASSERT(b->node_list != rehash_req, NULL);
00182 n->next = b->node_list;
00183 b->node_list = n;
00184 }
00185
00187 struct enable_segment_failsafe {
00188 segment_ptr_t *my_segment_ptr;
00189 enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {}
00190 ~enable_segment_failsafe() {
00191 if( my_segment_ptr ) *my_segment_ptr = 0;
00192 }
00193 };
00194
00196 void enable_segment( segment_index_t k, bool is_initial = false ) {
00197 __TBB_ASSERT( k, "Zero segment must be embedded" );
00198 enable_segment_failsafe watchdog( my_table, k );
00199 cache_aligned_allocator<bucket> alloc;
00200 size_type sz;
00201 __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment");
00202 if( k >= first_block ) {
00203 sz = segment_size( k );
00204 segment_ptr_t ptr = alloc.allocate( sz );
00205 init_buckets( ptr, sz, is_initial );
00206 itt_hide_store_word( my_table[k], ptr );
00207 sz <<= 1;
00208 } else {
00209 __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
00210 sz = segment_size( first_block );
00211 segment_ptr_t ptr = alloc.allocate( sz - embedded_buckets );
00212 init_buckets( ptr, sz - embedded_buckets, is_initial );
00213 ptr -= segment_base(embedded_block);
00214 for(segment_index_t i = embedded_block; i < first_block; i++)
00215 itt_hide_store_word( my_table[i], ptr + segment_base(i) );
00216 }
00217 itt_store_word_with_release( my_mask, sz-1 );
00218 watchdog.my_segment_ptr = 0;
00219 }
00220
00222 bucket *get_bucket( hashcode_t h ) const throw() {
00223 segment_index_t s = segment_index_of( h );
00224 h -= segment_base(s);
00225 segment_ptr_t seg = my_table[s];
00226 __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
00227 return &seg[h];
00228 }
00229
00230
00231 void mark_rehashed_levels( hashcode_t h ) throw () {
00232 segment_index_t s = segment_index_of( h );
00233 while( segment_ptr_t seg = my_table[++s] )
00234 if( seg[h].node_list == rehash_req ) {
00235 seg[h].node_list = empty_rehashed;
00236 mark_rehashed_levels( h + ((hashcode_t)1<<s) );
00237 }
00238 }
00239
00241
00242 inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const {
00243 hashcode_t m_now, m_old = m;
00244 m_now = (hashcode_t) itt_load_word_with_acquire( my_mask );
00245 if( m_old != m_now )
00246 return check_rehashing_collision( h, m_old, m = m_now );
00247 return false;
00248 }
00249
00251 bool check_rehashing_collision( const hashcode_t h, hashcode_t m_old, hashcode_t m ) const {
00252 __TBB_ASSERT(m_old != m, NULL);
00253 if( (h & m_old) != (h & m) ) {
00254
00255
00256 for( ++m_old; !(h & m_old); m_old <<= 1 )
00257 ;
00258 m_old = (m_old<<1) - 1;
00259 __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
00260
00261 if( itt_load_word_with_acquire(get_bucket(h & m_old)->node_list) != rehash_req )
00262 {
00263 #if __TBB_STATISTICS
00264 my_info_restarts++;
00265 #endif
00266 return true;
00267 }
00268 }
00269 return false;
00270 }
00271
00273 segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) {
00274 size_type sz = ++my_size;
00275 add_to_bucket( b, n );
00276
00277 if( sz >= mask ) {
00278 segment_index_t new_seg = __TBB_Log2( mask+1 );
00279 __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated");
00280 if( !itt_hide_load_word(my_table[new_seg])
00281 && __TBB_CompareAndSwapW(&my_table[new_seg], 2, 0) == 0 )
00282 return new_seg;
00283 }
00284 return 0;
00285 }
00286
00288 void reserve(size_type buckets) {
00289 if( !buckets-- ) return;
00290 bool is_initial = !my_size;
00291 for( size_type m = my_mask; buckets > m; m = my_mask )
00292 enable_segment( segment_index_of( m+1 ), is_initial );
00293 }
00295 void internal_swap(hash_map_base &table) {
00296 std::swap(this->my_mask, table.my_mask);
00297 std::swap(this->my_size, table.my_size);
00298 for(size_type i = 0; i < embedded_buckets; i++)
00299 std::swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
00300 for(size_type i = embedded_block; i < pointers_per_table; i++)
00301 std::swap(this->my_table[i], table.my_table[i]);
00302 }
00303 };
00304
00305 template<typename Iterator>
00306 class hash_map_range;
00307
00309
00311 template<typename Container, typename Value>
00312 class hash_map_iterator
00313 : public std::iterator<std::forward_iterator_tag,Value>
00314 {
00315 typedef Container map_type;
00316 typedef typename Container::node node;
00317 typedef hash_map_base::node_base node_base;
00318 typedef hash_map_base::bucket bucket;
00319
00320 template<typename C, typename T, typename U>
00321 friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00322
00323 template<typename C, typename T, typename U>
00324 friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00325
00326 template<typename C, typename T, typename U>
00327 friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00328
00329 template<typename C, typename U>
00330 friend class hash_map_iterator;
00331
00332 template<typename I>
00333 friend class hash_map_range;
00334
00335 void advance_to_next_bucket() {
00336 size_t k = my_index+1;
00337 while( my_bucket && k <= my_map->my_mask ) {
00338
00339 if( k& (k-2) )
00340 ++my_bucket;
00341 else my_bucket = my_map->get_bucket( k );
00342 my_node = static_cast<node*>( my_bucket->node_list );
00343 if( hash_map_base::is_valid(my_node) ) {
00344 my_index = k; return;
00345 }
00346 ++k;
00347 }
00348 my_bucket = 0; my_node = 0; my_index = k;
00349 }
00350 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
00351 template<typename Key, typename T, typename HashCompare, typename A>
00352 friend class interface5::concurrent_hash_map;
00353 #else
00354 public:
00355 #endif
00357 const Container *my_map;
00358
00360 size_t my_index;
00361
00363 const bucket *my_bucket;
00364
00366 node *my_node;
00367
00368 hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n );
00369
00370 public:
00372 hash_map_iterator() {}
00373 hash_map_iterator( const hash_map_iterator<Container,typename Container::value_type> &other ) :
00374 my_map(other.my_map),
00375 my_index(other.my_index),
00376 my_bucket(other.my_bucket),
00377 my_node(other.my_node)
00378 {}
00379 Value& operator*() const {
00380 __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
00381 return my_node->item;
00382 }
00383 Value* operator->() const {return &operator*();}
00384 hash_map_iterator& operator++();
00385
00387 hash_map_iterator operator++(int) {
00388 hash_map_iterator old(*this);
00389 operator++();
00390 return old;
00391 }
00392 };
00393
00394 template<typename Container, typename Value>
00395 hash_map_iterator<Container,Value>::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) :
00396 my_map(&map),
00397 my_index(index),
00398 my_bucket(b),
00399 my_node( static_cast<node*>(n) )
00400 {
00401 if( b && !hash_map_base::is_valid(n) )
00402 advance_to_next_bucket();
00403 }
00404
00405 template<typename Container, typename Value>
00406 hash_map_iterator<Container,Value>& hash_map_iterator<Container,Value>::operator++() {
00407 my_node = static_cast<node*>( my_node->next );
00408 if( !my_node ) advance_to_next_bucket();
00409 return *this;
00410 }
00411
00412 template<typename Container, typename T, typename U>
00413 bool operator==( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
00414 return i.my_node == j.my_node && i.my_map == j.my_map;
00415 }
00416
00417 template<typename Container, typename T, typename U>
00418 bool operator!=( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
00419 return i.my_node != j.my_node || i.my_map != j.my_map;
00420 }
00421
00423
00424 template<typename Iterator>
00425 class hash_map_range {
00426 typedef typename Iterator::map_type map_type;
00427 Iterator my_begin;
00428 Iterator my_end;
00429 mutable Iterator my_midpoint;
00430 size_t my_grainsize;
00432 void set_midpoint() const;
00433 template<typename U> friend class hash_map_range;
00434 public:
00436 typedef std::size_t size_type;
00437 typedef typename Iterator::value_type value_type;
00438 typedef typename Iterator::reference reference;
00439 typedef typename Iterator::difference_type difference_type;
00440 typedef Iterator iterator;
00441
00443 bool empty() const {return my_begin==my_end;}
00444
00446 bool is_divisible() const {
00447 return my_midpoint!=my_end;
00448 }
00450 hash_map_range( hash_map_range& r, split ) :
00451 my_end(r.my_end),
00452 my_grainsize(r.my_grainsize)
00453 {
00454 r.my_end = my_begin = r.my_midpoint;
00455 __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
00456 __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
00457 set_midpoint();
00458 r.set_midpoint();
00459 }
00461 template<typename U>
00462 hash_map_range( hash_map_range<U>& r) :
00463 my_begin(r.my_begin),
00464 my_end(r.my_end),
00465 my_midpoint(r.my_midpoint),
00466 my_grainsize(r.my_grainsize)
00467 {}
00468 #if TBB_DEPRECATED
00470 hash_map_range( const Iterator& begin_, const Iterator& end_, size_type grainsize_ = 1 ) :
00471 my_begin(begin_),
00472 my_end(end_),
00473 my_grainsize(grainsize_)
00474 {
00475 if(!my_end.my_index && !my_end.my_bucket)
00476 my_end.my_index = my_end.my_map->my_mask + 1;
00477 set_midpoint();
00478 __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
00479 }
00480 #endif
00482 hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :
00483 my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
00484 my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
00485 my_grainsize( grainsize_ )
00486 {
00487 __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
00488 set_midpoint();
00489 }
00490 const Iterator& begin() const {return my_begin;}
00491 const Iterator& end() const {return my_end;}
00493 size_type grainsize() const {return my_grainsize;}
00494 };
00495
00496 template<typename Iterator>
00497 void hash_map_range<Iterator>::set_midpoint() const {
00498
00499 size_t m = my_end.my_index-my_begin.my_index;
00500 if( m > my_grainsize ) {
00501 m = my_begin.my_index + m/2u;
00502 hash_map_base::bucket *b = my_begin.my_map->get_bucket(m);
00503 my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list);
00504 } else {
00505 my_midpoint = my_end;
00506 }
00507 __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
00508 "my_begin is after my_midpoint" );
00509 __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
00510 "my_midpoint is after my_end" );
00511 __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
00512 "[my_begin, my_midpoint) range should not be empty" );
00513 }
00514
00515 }
00517
00519
00548 template<typename Key, typename T, typename HashCompare, typename Allocator>
00549 class concurrent_hash_map : protected internal::hash_map_base {
00550 template<typename Container, typename Value>
00551 friend class internal::hash_map_iterator;
00552
00553 template<typename I>
00554 friend class internal::hash_map_range;
00555
00556 public:
00557 typedef Key key_type;
00558 typedef T mapped_type;
00559 typedef std::pair<const Key,T> value_type;
00560 typedef hash_map_base::size_type size_type;
00561 typedef ptrdiff_t difference_type;
00562 typedef value_type *pointer;
00563 typedef const value_type *const_pointer;
00564 typedef value_type &reference;
00565 typedef const value_type &const_reference;
00566 typedef internal::hash_map_iterator<concurrent_hash_map,value_type> iterator;
00567 typedef internal::hash_map_iterator<concurrent_hash_map,const value_type> const_iterator;
00568 typedef internal::hash_map_range<iterator> range_type;
00569 typedef internal::hash_map_range<const_iterator> const_range_type;
00570 typedef Allocator allocator_type;
00571
00572 protected:
00573 friend class const_accessor;
00574 struct node;
00575 typedef typename Allocator::template rebind<node>::other node_allocator_type;
00576 node_allocator_type my_allocator;
00577 HashCompare my_hash_compare;
00578
00579 struct node : public node_base {
00580 value_type item;
00581 node( const Key &key ) : item(key, T()) {}
00582 node( const Key &key, const T &t ) : item(key, t) {}
00583
00584 void *operator new( size_t , node_allocator_type &a ) {
00585 void *ptr = a.allocate(1);
00586 if(!ptr)
00587 tbb::internal::throw_exception(tbb::internal::eid_bad_alloc);
00588 return ptr;
00589 }
00590
00591 void operator delete( void *ptr, node_allocator_type &a ) {return a.deallocate(static_cast<node*>(ptr),1); }
00592 };
00593
00594 void delete_node( node_base *n ) {
00595 my_allocator.destroy( static_cast<node*>(n) );
00596 my_allocator.deallocate( static_cast<node*>(n), 1);
00597 }
00598
00599 node *search_bucket( const key_type &key, bucket *b ) const {
00600 node *n = static_cast<node*>( b->node_list );
00601 while( is_valid(n) && !my_hash_compare.equal(key, n->item.first) )
00602 n = static_cast<node*>( n->next );
00603 __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket");
00604 return n;
00605 }
00606
00608 class bucket_accessor : public bucket::scoped_t {
00609 bucket *my_b;
00610 public:
00611 bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); }
00613 inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) {
00614 my_b = base->get_bucket( h );
00615
00616 if( itt_load_word_with_acquire(my_b->node_list) == internal::rehash_req
00617 && try_acquire( my_b->mutex, true ) )
00618 {
00619 if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h );
00620 }
00621 else bucket::scoped_t::acquire( my_b->mutex, writer );
00622 __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL);
00623 }
00625 bool is_writer() { return bucket::scoped_t::is_writer; }
00627 bucket *operator() () { return my_b; }
00628 };
00629
00630
00631 void rehash_bucket( bucket *b_new, const hashcode_t h ) {
00632 __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)");
00633 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
00634 __TBB_store_with_release(b_new->node_list, internal::empty_rehashed);
00635 hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1;
00636 #if __TBB_STATISTICS
00637 my_info_rehashes++;
00638 #endif
00639
00640 bucket_accessor b_old( this, h & mask );
00641
00642 mask = (mask<<1) | 1;
00643 __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
00644 restart:
00645 for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
00646 hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->item.first );
00647 #if TBB_USE_ASSERT
00648 hashcode_t bmask = h & (mask>>1);
00649 bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1;
00650 __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" );
00651 #endif
00652 if( (c & mask) == h ) {
00653 if( !b_old.is_writer() )
00654 if( !b_old.upgrade_to_writer() ) {
00655 goto restart;
00656 }
00657 *p = n->next;
00658 add_to_bucket( b_new, n );
00659 } else p = &n->next;
00660 }
00661 }
00662
00663 public:
00664
00665 class accessor;
00667 class const_accessor : private node::scoped_t {
00668 friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
00669 friend class accessor;
00670 public:
00672 typedef const typename concurrent_hash_map::value_type value_type;
00673
00675 bool empty() const {return !my_node;}
00676
00678 void release() {
00679 if( my_node ) {
00680 node::scoped_t::release();
00681 my_node = 0;
00682 }
00683 }
00684
00686 const_reference operator*() const {
00687 __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
00688 return my_node->item;
00689 }
00690
00692 const_pointer operator->() const {
00693 return &operator*();
00694 }
00695
00697 const_accessor() : my_node(NULL) {}
00698
00700 ~const_accessor() {
00701 my_node = NULL;
00702 }
00703 protected:
00704 bool is_writer() { return node::scoped_t::is_writer; }
00705 node *my_node;
00706 hashcode_t my_hash;
00707 };
00708
00710 class accessor: public const_accessor {
00711 public:
00713 typedef typename concurrent_hash_map::value_type value_type;
00714
00716 reference operator*() const {
00717 __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
00718 return this->my_node->item;
00719 }
00720
00722 pointer operator->() const {
00723 return &operator*();
00724 }
00725 };
00726
00728 concurrent_hash_map(const allocator_type &a = allocator_type())
00729 : internal::hash_map_base(), my_allocator(a)
00730 {}
00731
00733 concurrent_hash_map(size_type n, const allocator_type &a = allocator_type())
00734 : my_allocator(a)
00735 {
00736 reserve( n );
00737 }
00738
00740 concurrent_hash_map( const concurrent_hash_map& table, const allocator_type &a = allocator_type())
00741 : internal::hash_map_base(), my_allocator(a)
00742 {
00743 internal_copy(table);
00744 }
00745
00747 template<typename I>
00748 concurrent_hash_map(I first, I last, const allocator_type &a = allocator_type())
00749 : my_allocator(a)
00750 {
00751 reserve( std::distance(first, last) );
00752 internal_copy(first, last);
00753 }
00754
00756 concurrent_hash_map& operator=( const concurrent_hash_map& table ) {
00757 if( this!=&table ) {
00758 clear();
00759 internal_copy(table);
00760 }
00761 return *this;
00762 }
00763
00764
00766
00768 void rehash(size_type n = 0);
00769
00771 void clear();
00772
00774 ~concurrent_hash_map() { clear(); }
00775
00776
00777
00778
00779 range_type range( size_type grainsize=1 ) {
00780 return range_type( *this, grainsize );
00781 }
00782 const_range_type range( size_type grainsize=1 ) const {
00783 return const_range_type( *this, grainsize );
00784 }
00785
00786
00787
00788
00789 iterator begin() {return iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);}
00790 iterator end() {return iterator(*this,0,0,0);}
00791 const_iterator begin() const {return const_iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);}
00792 const_iterator end() const {return const_iterator(*this,0,0,0);}
00793 std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range(key, end()); }
00794 std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range(key, end()); }
00795
00797 size_type size() const { return my_size; }
00798
00800 bool empty() const { return my_size == 0; }
00801
00803 size_type max_size() const {return (~size_type(0))/sizeof(node);}
00804
00806 size_type bucket_count() const { return my_mask+1; }
00807
00809 allocator_type get_allocator() const { return this->my_allocator; }
00810
00812 void swap(concurrent_hash_map &table);
00813
00814
00815
00816
00817
00819 size_type count( const Key &key ) const {
00820 return const_cast<concurrent_hash_map*>(this)->lookup(false, key, NULL, NULL, false );
00821 }
00822
00824
00825 bool find( const_accessor &result, const Key &key ) const {
00826 result.release();
00827 return const_cast<concurrent_hash_map*>(this)->lookup(false, key, NULL, &result, false );
00828 }
00829
00831
00832 bool find( accessor &result, const Key &key ) {
00833 result.release();
00834 return lookup(false, key, NULL, &result, true );
00835 }
00836
00838
00839 bool insert( const_accessor &result, const Key &key ) {
00840 result.release();
00841 return lookup(true, key, NULL, &result, false );
00842 }
00843
00845
00846 bool insert( accessor &result, const Key &key ) {
00847 result.release();
00848 return lookup(true, key, NULL, &result, true );
00849 }
00850
00852
00853 bool insert( const_accessor &result, const value_type &value ) {
00854 result.release();
00855 return lookup(true, value.first, &value.second, &result, false );
00856 }
00857
00859
00860 bool insert( accessor &result, const value_type &value ) {
00861 result.release();
00862 return lookup(true, value.first, &value.second, &result, true );
00863 }
00864
00866
00867 bool insert( const value_type &value ) {
00868 return lookup(true, value.first, &value.second, NULL, false );
00869 }
00870
00872 template<typename I>
00873 void insert(I first, I last) {
00874 for(; first != last; ++first)
00875 insert( *first );
00876 }
00877
00879
00880 bool erase( const Key& key );
00881
00883
00884 bool erase( const_accessor& item_accessor ) {
00885 return exclude( item_accessor );
00886 }
00887
00889
00890 bool erase( accessor& item_accessor ) {
00891 return exclude( item_accessor );
00892 }
00893
00894 protected:
00896 bool lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write );
00897
00899 bool exclude( const_accessor &item_accessor );
00900
00902 template<typename I>
00903 std::pair<I, I> internal_equal_range( const Key& key, I end ) const;
00904
00906 void internal_copy( const concurrent_hash_map& source );
00907
00908 template<typename I>
00909 void internal_copy(I first, I last);
00910
00912
00914 const_pointer internal_fast_find( const Key& key ) const {
00915 hashcode_t h = my_hash_compare.hash( key );
00916 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
00917 node *n;
00918 restart:
00919 __TBB_ASSERT((m&(m+1))==0, NULL);
00920 bucket *b = get_bucket( h & m );
00921
00922 if( itt_load_word_with_acquire(b->node_list) == internal::rehash_req )
00923 {
00924 bucket::scoped_t lock;
00925 if( lock.try_acquire( b->mutex, true ) ) {
00926 if( b->node_list == internal::rehash_req)
00927 const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m );
00928 }
00929 else lock.acquire( b->mutex, false );
00930 __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL);
00931 }
00932 n = search_bucket( key, b );
00933 if( n )
00934 return &n->item;
00935 else if( check_mask_race( h, m ) )
00936 goto restart;
00937 return 0;
00938 }
00939 };
00940
00941 #if _MSC_VER && !defined(__INTEL_COMPILER)
00942
00943 #pragma warning( push )
00944 #pragma warning( disable: 4127 )
00945 #endif
00946
00947 template<typename Key, typename T, typename HashCompare, typename A>
00948 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write ) {
00949 __TBB_ASSERT( !result || !result->my_node, NULL );
00950 bool return_value;
00951 hashcode_t const h = my_hash_compare.hash( key );
00952 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
00953 segment_index_t grow_segment = 0;
00954 node *n, *tmp_n = 0;
00955 restart:
00956 {
00957 __TBB_ASSERT((m&(m+1))==0, NULL);
00958 return_value = false;
00959
00960 bucket_accessor b( this, h & m );
00961
00962
00963 n = search_bucket( key, b() );
00964 if( op_insert ) {
00965
00966 if( !n ) {
00967 if( !tmp_n ) {
00968 if(t) tmp_n = new( my_allocator ) node(key, *t);
00969 else tmp_n = new( my_allocator ) node(key);
00970 }
00971 if( !b.is_writer() && !b.upgrade_to_writer() ) {
00972
00973 n = search_bucket( key, b() );
00974 if( is_valid(n) ) {
00975 b.downgrade_to_reader();
00976 goto exists;
00977 }
00978 }
00979 if( check_mask_race(h, m) )
00980 goto restart;
00981
00982 grow_segment = insert_new_node( b(), n = tmp_n, m );
00983 tmp_n = 0;
00984 return_value = true;
00985 }
00986 } else {
00987 if( !n ) {
00988 if( check_mask_race( h, m ) )
00989 goto restart;
00990 return false;
00991 }
00992 return_value = true;
00993 }
00994 exists:
00995 if( !result ) goto check_growth;
00996
00997
00998 if( !result->try_acquire( n->mutex, write ) ) {
00999
01000 tbb::internal::atomic_backoff trials;
01001 do {
01002 if( !trials.bounded_pause() ) {
01003
01004 b.release();
01005 __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" );
01006 __TBB_Yield();
01007 m = (hashcode_t) itt_load_word_with_acquire( my_mask );
01008 goto restart;
01009 }
01010 } while( !result->try_acquire( n->mutex, write ) );
01011 }
01012 }
01013 result->my_node = n;
01014 result->my_hash = h;
01015 check_growth:
01016
01017 if( grow_segment ) {
01018 #if __TBB_STATISTICS
01019 my_info_resizes++;
01020 #endif
01021 enable_segment( grow_segment );
01022 }
01023 if( tmp_n )
01024 delete_node( tmp_n );
01025 return return_value;
01026 }
01027
01028 template<typename Key, typename T, typename HashCompare, typename A>
01029 template<typename I>
01030 std::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end_ ) const {
01031 hashcode_t h = my_hash_compare.hash( key );
01032 hashcode_t m = my_mask;
01033 __TBB_ASSERT((m&(m+1))==0, NULL);
01034 h &= m;
01035 bucket *b = get_bucket( h );
01036 while( b->node_list == internal::rehash_req ) {
01037 m = ( 1u<<__TBB_Log2( h ) ) - 1;
01038 b = get_bucket( h &= m );
01039 }
01040 node *n = search_bucket( key, b );
01041 if( !n )
01042 return std::make_pair(end_, end_);
01043 iterator lower(*this, h, b, n), upper(lower);
01044 return std::make_pair(lower, ++upper);
01045 }
01046
01047 template<typename Key, typename T, typename HashCompare, typename A>
01048 bool concurrent_hash_map<Key,T,HashCompare,A>::exclude( const_accessor &item_accessor ) {
01049 __TBB_ASSERT( item_accessor.my_node, NULL );
01050 node_base *const n = item_accessor.my_node;
01051 hashcode_t const h = item_accessor.my_hash;
01052 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
01053 do {
01054
01055 bucket_accessor b( this, h & m, true );
01056 node_base **p = &b()->node_list;
01057 while( *p && *p != n )
01058 p = &(*p)->next;
01059 if( !*p ) {
01060 if( check_mask_race( h, m ) )
01061 continue;
01062 item_accessor.release();
01063 return false;
01064 }
01065 __TBB_ASSERT( *p == n, NULL );
01066 *p = n->next;
01067 my_size--;
01068 break;
01069 } while(true);
01070 if( !item_accessor.is_writer() )
01071 item_accessor.upgrade_to_writer();
01072 item_accessor.release();
01073 delete_node( n );
01074 return true;
01075 }
01076
01077 template<typename Key, typename T, typename HashCompare, typename A>
01078 bool concurrent_hash_map<Key,T,HashCompare,A>::erase( const Key &key ) {
01079 node_base *n;
01080 hashcode_t const h = my_hash_compare.hash( key );
01081 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
01082 restart:
01083 {
01084
01085 bucket_accessor b( this, h & m );
01086 search:
01087 node_base **p = &b()->node_list;
01088 n = *p;
01089 while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->item.first ) ) {
01090 p = &n->next;
01091 n = *p;
01092 }
01093 if( !n ) {
01094 if( check_mask_race( h, m ) )
01095 goto restart;
01096 return false;
01097 }
01098 else if( !b.is_writer() && !b.upgrade_to_writer() ) {
01099 if( check_mask_race( h, m ) )
01100 goto restart;
01101 goto search;
01102 }
01103 *p = n->next;
01104 my_size--;
01105 }
01106 {
01107 typename node::scoped_t item_locker( n->mutex, true );
01108 }
01109
01110 delete_node( n );
01111 return true;
01112 }
01113
01114 template<typename Key, typename T, typename HashCompare, typename A>
01115 void concurrent_hash_map<Key,T,HashCompare,A>::swap(concurrent_hash_map<Key,T,HashCompare,A> &table) {
01116 std::swap(this->my_allocator, table.my_allocator);
01117 std::swap(this->my_hash_compare, table.my_hash_compare);
01118 internal_swap(table);
01119 }
01120
01121 template<typename Key, typename T, typename HashCompare, typename A>
01122 void concurrent_hash_map<Key,T,HashCompare,A>::rehash(size_type sz) {
01123 reserve( sz );
01124 hashcode_t mask = my_mask;
01125 hashcode_t b = (mask+1)>>1;
01126 __TBB_ASSERT((b&(b-1))==0, NULL);
01127 bucket *bp = get_bucket( b );
01128 for(; b <= mask; b++, bp++ ) {
01129 node_base *n = bp->node_list;
01130 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
01131 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
01132 if( n == internal::rehash_req ) {
01133 hashcode_t h = b; bucket *b_old = bp;
01134 do {
01135 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
01136 hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1;
01137 b_old = get_bucket( h &= m );
01138 } while( b_old->node_list == internal::rehash_req );
01139
01140 mark_rehashed_levels( h );
01141 for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
01142 hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->item.first );
01143 if( (c & mask) != h ) {
01144 *p = q->next;
01145 bucket *b_new = get_bucket( c & mask );
01146 __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" );
01147 add_to_bucket( b_new, q );
01148 } else p = &q->next;
01149 }
01150 }
01151 }
01152 #if TBB_USE_PERFORMANCE_WARNINGS
01153 int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0;
01154 static bool reported = false;
01155 #endif
01156 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01157 for( b = 0; b <= mask; b++ ) {
01158 if( b & (b-2) ) ++bp;
01159 else bp = get_bucket( b );
01160 node_base *n = bp->node_list;
01161 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
01162 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" );
01163 #if TBB_USE_PERFORMANCE_WARNINGS
01164 if( n == internal::empty_rehashed ) empty_buckets++;
01165 else if( n->next ) overpopulated_buckets++;
01166 #endif
01167 #if TBB_USE_ASSERT
01168 for( ; is_valid(n); n = n->next ) {
01169 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first ) & mask;
01170 __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" );
01171 }
01172 #endif
01173 }
01174 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01175 #if TBB_USE_PERFORMANCE_WARNINGS
01176 if( buckets > current_size) empty_buckets -= buckets - current_size;
01177 else overpopulated_buckets -= current_size - buckets;
01178 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
01179 tbb::internal::runtime_warning(
01180 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
01181 typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );
01182 reported = true;
01183 }
01184 #endif
01185 }
01186
01187 template<typename Key, typename T, typename HashCompare, typename A>
01188 void concurrent_hash_map<Key,T,HashCompare,A>::clear() {
01189 hashcode_t m = my_mask;
01190 __TBB_ASSERT((m&(m+1))==0, NULL);
01191 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
01192 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
01193 int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0;
01194 static bool reported = false;
01195 #endif
01196 bucket *bp = 0;
01197
01198 for( segment_index_t b = 0; b <= m; b++ ) {
01199 if( b & (b-2) ) ++bp;
01200 else bp = get_bucket( b );
01201 node_base *n = bp->node_list;
01202 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
01203 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" );
01204 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
01205 if( n == internal::empty_rehashed ) empty_buckets++;
01206 else if( n == internal::rehash_req ) buckets--;
01207 else if( n->next ) overpopulated_buckets++;
01208 #endif
01209 #if __TBB_EXTRA_DEBUG
01210 for(; is_valid(n); n = n->next ) {
01211 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first );
01212 h &= m;
01213 __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" );
01214 }
01215 #endif
01216 }
01217 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
01218 #if __TBB_STATISTICS
01219 printf( "items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d"
01220 " concurrent: resizes=%u rehashes=%u restarts=%u\n",
01221 current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets,
01222 unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) );
01223 my_info_resizes = 0;
01224 my_info_restarts = 0;
01225 my_info_rehashes = 0;
01226 #endif
01227 if( buckets > current_size) empty_buckets -= buckets - current_size;
01228 else overpopulated_buckets -= current_size - buckets;
01229 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
01230 tbb::internal::runtime_warning(
01231 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
01232 typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );
01233 reported = true;
01234 }
01235 #endif
01236 #endif//TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
01237 my_size = 0;
01238 segment_index_t s = segment_index_of( m );
01239 __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" );
01240 cache_aligned_allocator<bucket> alloc;
01241 do {
01242 __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" );
01243 segment_ptr_t buckets_ptr = my_table[s];
01244 size_type sz = segment_size( s ? s : 1 );
01245 for( segment_index_t i = 0; i < sz; i++ )
01246 for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
01247 buckets_ptr[i].node_list = n->next;
01248 delete_node( n );
01249 }
01250 if( s >= first_block)
01251 alloc.deallocate( buckets_ptr, sz );
01252 else if( s == embedded_block && embedded_block != first_block )
01253 alloc.deallocate( buckets_ptr, segment_size(first_block)-embedded_buckets );
01254 if( s >= embedded_block ) my_table[s] = 0;
01255 } while(s-- > 0);
01256 my_mask = embedded_buckets - 1;
01257 }
01258
01259 template<typename Key, typename T, typename HashCompare, typename A>
01260 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy( const concurrent_hash_map& source ) {
01261 reserve( source.my_size );
01262 hashcode_t mask = source.my_mask;
01263 if( my_mask == mask ) {
01264 bucket *dst = 0, *src = 0;
01265 bool rehash_required = false;
01266 for( hashcode_t k = 0; k <= mask; k++ ) {
01267 if( k & (k-2) ) ++dst,src++;
01268 else { dst = get_bucket( k ); src = source.get_bucket( k ); }
01269 __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table");
01270 node *n = static_cast<node*>( src->node_list );
01271 if( n == internal::rehash_req ) {
01272 rehash_required = true;
01273 dst->node_list = internal::rehash_req;
01274 } else for(; n; n = static_cast<node*>( n->next ) ) {
01275 add_to_bucket( dst, new( my_allocator ) node(n->item.first, n->item.second) );
01276 ++my_size;
01277 }
01278 }
01279 if( rehash_required ) rehash();
01280 } else internal_copy( source.begin(), source.end() );
01281 }
01282
01283 template<typename Key, typename T, typename HashCompare, typename A>
01284 template<typename I>
01285 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy(I first, I last) {
01286 hashcode_t m = my_mask;
01287 for(; first != last; ++first) {
01288 hashcode_t h = my_hash_compare.hash( first->first );
01289 bucket *b = get_bucket( h & m );
01290 __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table");
01291 node *n = new( my_allocator ) node(first->first, first->second);
01292 add_to_bucket( b, n );
01293 ++my_size;
01294 }
01295 }
01296
01297 }
01298
01299 using interface5::concurrent_hash_map;
01300
01301
01302 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
01303 inline bool operator==(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b) {
01304 if(a.size() != b.size()) return false;
01305 typename concurrent_hash_map<Key, T, HashCompare, A1>::const_iterator i(a.begin()), i_end(a.end());
01306 typename concurrent_hash_map<Key, T, HashCompare, A2>::const_iterator j, j_end(b.end());
01307 for(; i != i_end; ++i) {
01308 j = b.equal_range(i->first).first;
01309 if( j == j_end || !(i->second == j->second) ) return false;
01310 }
01311 return true;
01312 }
01313
01314 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
01315 inline bool operator!=(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b)
01316 { return !(a == b); }
01317
01318 template<typename Key, typename T, typename HashCompare, typename A>
01319 inline void swap(concurrent_hash_map<Key, T, HashCompare, A> &a, concurrent_hash_map<Key, T, HashCompare, A> &b)
01320 { a.swap( b ); }
01321
01322 #if _MSC_VER && !defined(__INTEL_COMPILER)
01323 #pragma warning( pop )
01324 #endif // warning 4127 is back
01325
01326 }
01327
01328 #endif