enumerable_thread_specific.h

00001 /*
00002     Copyright 2005-2011 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 #ifndef __TBB_enumerable_thread_specific_H
00022 #define __TBB_enumerable_thread_specific_H
00023 
00024 #include "concurrent_vector.h"
00025 #include "tbb_thread.h"
00026 #include "tbb_allocator.h"
00027 #include "cache_aligned_allocator.h"
00028 #include "aligned_space.h"
00029 #include <string.h>  // for memcpy
00030 
00031 #if _WIN32||_WIN64
00032 #include "machine/windows_api.h"
00033 #else
00034 #include <pthread.h>
00035 #endif
00036 
00037 namespace tbb {
00038 
00040 enum ets_key_usage_type { ets_key_per_instance, ets_no_key };
00041 
00042 namespace interface6 {
00043  
00045     namespace internal { 
00046 
00047         template<ets_key_usage_type ETS_key_type>
00048         class ets_base: tbb::internal::no_copy {
00049         protected:
00050 #if _WIN32||_WIN64
00051             typedef DWORD key_type;
00052 #else
00053             typedef pthread_t key_type;
00054 #endif
00055 #if __TBB_GCC_3_3_PROTECTED_BROKEN
00056         public:
00057 #endif
00058             struct slot;
00059 
00060             struct array {
00061                 array* next;
00062                 size_t lg_size;
00063                 slot& at( size_t k ) {
00064                     return ((slot*)(void*)(this+1))[k];
00065                 }
00066                 size_t size() const {return (size_t)1<<lg_size;}
00067                 size_t mask() const {return size()-1;}
00068                 size_t start( size_t h ) const {
00069                     return h>>(8*sizeof(size_t)-lg_size);
00070                 }
00071             };
00072             struct slot {
00073                 key_type key;
00074                 void* ptr;
00075                 bool empty() const {return !key;}
00076                 bool match( key_type k ) const {return key==k;}
00077                 bool claim( key_type k ) {
00078                     __TBB_ASSERT(sizeof(tbb::atomic<key_type>)==sizeof(key_type), NULL);
00079                     return tbb::internal::punned_cast<tbb::atomic<key_type>*>(&key)->compare_and_swap(k,0)==0;
00080                 }
00081             };
00082 #if __TBB_GCC_3_3_PROTECTED_BROKEN
00083         protected:
00084 #endif
00085         
00086             static key_type key_of_current_thread() {
00087                tbb::tbb_thread::id id = tbb::this_tbb_thread::get_id();
00088                key_type k;
00089                memcpy( &k, &id, sizeof(k) );
00090                return k;
00091             }
00092 
00094 
00096             atomic<array*> my_root;
00097             atomic<size_t> my_count;
00098             virtual void* create_local() = 0;
00099             virtual void* create_array(size_t _size) = 0;  // _size in bytes
00100             virtual void free_array(void* ptr, size_t _size) = 0; // _size in bytes
00101             array* allocate( size_t lg_size ) {
00102                 size_t n = 1<<lg_size;  
00103                 array* a = static_cast<array*>(create_array( sizeof(array)+n*sizeof(slot) ));
00104                 a->lg_size = lg_size;
00105                 std::memset( a+1, 0, n*sizeof(slot) );
00106                 return a;
00107             }
00108             void free(array* a) {
00109                 size_t n = 1<<(a->lg_size);  
00110                 free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) );
00111             }
00112             static size_t hash( key_type k ) {
00113                 // Multiplicative hashing.  Client should use *upper* bits.
00114                 // casts required for Mac gcc4.* compiler
00115 #if __TBB_WORDSIZE == 4
00116                 return uintptr_t(k)*0x9E3779B9;
00117 #else
00118                 return uintptr_t(k)*0x9E3779B97F4A7C15;
00119 #endif 
00120             } 
00121         
00122             ets_base() {my_root=NULL; my_count=0;}
00123             virtual ~ets_base();  // g++ complains if this is not virtual...
00124             void* table_lookup( bool& exists );
00125             void table_clear();
00126             slot& table_find( key_type k ) {
00127                 size_t h = hash(k);
00128                 array* r = my_root;
00129                 size_t mask = r->mask();
00130                 for(size_t i = r->start(h);;i=(i+1)&mask) {
00131                     slot& s = r->at(i);
00132                     if( s.empty() || s.match(k) )
00133                         return s;
00134                 }
00135             }
00136             void table_reserve_for_copy( const ets_base& other ) {
00137                 __TBB_ASSERT(!my_root,NULL);
00138                 __TBB_ASSERT(!my_count,NULL);
00139                 if( other.my_root ) {
00140                     array* a = allocate(other.my_root->lg_size);
00141                     a->next = NULL;
00142                     my_root = a;
00143                     my_count = other.my_count;
00144                 }
00145             }
00146         };
00147 
00148         template<ets_key_usage_type ETS_key_type>
00149         ets_base<ETS_key_type>::~ets_base() {
00150             __TBB_ASSERT(!my_root, NULL);
00151         }
00152 
00153         template<ets_key_usage_type ETS_key_type>
00154         void ets_base<ETS_key_type>::table_clear() {
00155             while( array* r = my_root ) {
00156                 my_root = r->next;
00157                 free(r);
00158             }
00159             my_count = 0;
00160         }
00161                 
00162         template<ets_key_usage_type ETS_key_type>
00163         void* ets_base<ETS_key_type>::table_lookup( bool& exists ) {
00164             const key_type k = key_of_current_thread(); 
00165 
00166             __TBB_ASSERT(k!=0,NULL);
00167             void* found;
00168             size_t h = hash(k);
00169             for( array* r=my_root; r; r=r->next ) {
00170                 size_t mask=r->mask();
00171                 for(size_t i = r->start(h); ;i=(i+1)&mask) {
00172                     slot& s = r->at(i);
00173                     if( s.empty() ) break;
00174                     if( s.match(k) ) {
00175                         if( r==my_root ) {
00176                             // Success at top level
00177                             exists = true;
00178                             return s.ptr;
00179                         } else {
00180                             // Success at some other level.  Need to insert at top level.
00181                             exists = true;
00182                             found = s.ptr;
00183                             goto insert;
00184                         }
00185                     }
00186                 }
00187             }
00188             // Key does not yet exist
00189             exists = false;
00190             found = create_local();
00191             {
00192                 size_t c = ++my_count;
00193                 array* r = my_root;
00194                 if( !r || c>r->size()/2 ) {
00195                     size_t s = r ? r->lg_size : 2;
00196                     while( c>size_t(1)<<(s-1) ) ++s;
00197                     array* a = allocate(s);
00198                     for(;;) {
00199                         a->next = my_root;
00200                         array* new_r = my_root.compare_and_swap(a,r);
00201                         if( new_r==r ) break;
00202                         if( new_r->lg_size>=s ) {
00203                             // Another thread inserted an equal or  bigger array, so our array is superfluous.
00204                             free(a);
00205                             break;
00206                         }
00207                         r = new_r;
00208                     }
00209                 }
00210             }
00211         insert:
00212             // Guaranteed to be room for it, and it is not present, so search for empty slot and grab it.
00213             array* ir = my_root;
00214             size_t mask = ir->mask();
00215             for(size_t i = ir->start(h);;i=(i+1)&mask) {
00216                 slot& s = ir->at(i);
00217                 if( s.empty() ) {
00218                     if( s.claim(k) ) {
00219                         s.ptr = found;
00220                         return found;
00221                     }
00222                 }
00223             }
00224         }
00225 
00227         template <>
00228         class ets_base<ets_key_per_instance>: protected ets_base<ets_no_key> {
00229             typedef ets_base<ets_no_key> super;
00230 #if _WIN32||_WIN64
00231             typedef DWORD tls_key_t;
00232             void create_key() { my_key = TlsAlloc(); }
00233             void destroy_key() { TlsFree(my_key); }
00234             void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); }
00235             void* get_tls() { return (void *)TlsGetValue(my_key); }
00236 #else
00237             typedef pthread_key_t tls_key_t;
00238             void create_key() { pthread_key_create(&my_key, NULL); }
00239             void destroy_key() { pthread_key_delete(my_key); }
00240             void set_tls( void * value ) const { pthread_setspecific(my_key, value); }
00241             void* get_tls() const { return pthread_getspecific(my_key); }
00242 #endif
00243             tls_key_t my_key;
00244             virtual void* create_local() = 0;
00245             virtual void* create_array(size_t _size) = 0;  // _size in bytes
00246             virtual void free_array(void* ptr, size_t _size) = 0; // size in bytes
00247         public:
00248             ets_base() {create_key();}
00249             ~ets_base() {destroy_key();}
00250             void* table_lookup( bool& exists ) {
00251                 void* found = get_tls();
00252                 if( found ) {
00253                     exists=true;
00254                 } else {
00255                     found = super::table_lookup(exists);
00256                     set_tls(found);
00257                 }
00258                 return found; 
00259             }
00260             void table_clear() {
00261                 destroy_key();
00262                 create_key(); 
00263                 super::table_clear();
00264             }
00265         };
00266 
00268         template< typename Container, typename Value >
00269         class enumerable_thread_specific_iterator 
00270 #if defined(_WIN64) && defined(_MSC_VER) 
00271             // Ensure that Microsoft's internal template function _Val_type works correctly.
00272             : public std::iterator<std::random_access_iterator_tag,Value>
00273 #endif /* defined(_WIN64) && defined(_MSC_VER) */
00274         {
00276         
00277             Container *my_container;
00278             typename Container::size_type my_index;
00279             mutable Value *my_value;
00280         
00281             template<typename C, typename T>
00282             friend enumerable_thread_specific_iterator<C,T> operator+( ptrdiff_t offset, 
00283                                                                        const enumerable_thread_specific_iterator<C,T>& v );
00284         
00285             template<typename C, typename T, typename U>
00286             friend bool operator==( const enumerable_thread_specific_iterator<C,T>& i, 
00287                                     const enumerable_thread_specific_iterator<C,U>& j );
00288         
00289             template<typename C, typename T, typename U>
00290             friend bool operator<( const enumerable_thread_specific_iterator<C,T>& i, 
00291                                    const enumerable_thread_specific_iterator<C,U>& j );
00292         
00293             template<typename C, typename T, typename U>
00294             friend ptrdiff_t operator-( const enumerable_thread_specific_iterator<C,T>& i, const enumerable_thread_specific_iterator<C,U>& j );
00295             
00296             template<typename C, typename U> 
00297             friend class enumerable_thread_specific_iterator;
00298         
00299             public:
00300         
00301             enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) : 
00302                 my_container(&const_cast<Container &>(container)), my_index(index), my_value(NULL) {}
00303         
00305             enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {}
00306         
00307             template<typename U>
00308             enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator<Container, U>& other ) :
00309                     my_container( other.my_container ), my_index( other.my_index), my_value( const_cast<Value *>(other.my_value) ) {}
00310         
00311             enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const {
00312                 return enumerable_thread_specific_iterator(*my_container, my_index + offset);
00313             }
00314         
00315             enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) {
00316                 my_index += offset;
00317                 my_value = NULL;
00318                 return *this;
00319             }
00320         
00321             enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const {
00322                 return enumerable_thread_specific_iterator( *my_container, my_index-offset );
00323             }
00324         
00325             enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) {
00326                 my_index -= offset;
00327                 my_value = NULL;
00328                 return *this;
00329             }
00330         
00331             Value& operator*() const {
00332                 Value* value = my_value;
00333                 if( !value ) {
00334                     value = my_value = reinterpret_cast<Value *>(&(*my_container)[my_index].value);
00335                 }
00336                 __TBB_ASSERT( value==reinterpret_cast<Value *>(&(*my_container)[my_index].value), "corrupt cache" );
00337                 return *value;
00338             }
00339         
00340             Value& operator[]( ptrdiff_t k ) const {
00341                return (*my_container)[my_index + k].value;
00342             }
00343         
00344             Value* operator->() const {return &operator*();}
00345         
00346             enumerable_thread_specific_iterator& operator++() {
00347                 ++my_index;
00348                 my_value = NULL;
00349                 return *this;
00350             }
00351         
00352             enumerable_thread_specific_iterator& operator--() {
00353                 --my_index;
00354                 my_value = NULL;
00355                 return *this;
00356             }
00357         
00359             enumerable_thread_specific_iterator operator++(int) {
00360                 enumerable_thread_specific_iterator result = *this;
00361                 ++my_index;
00362                 my_value = NULL;
00363                 return result;
00364             }
00365         
00367             enumerable_thread_specific_iterator operator--(int) {
00368                 enumerable_thread_specific_iterator result = *this;
00369                 --my_index;
00370                 my_value = NULL;
00371                 return result;
00372             }
00373         
00374             // STL support
00375             typedef ptrdiff_t difference_type;
00376             typedef Value value_type;
00377             typedef Value* pointer;
00378             typedef Value& reference;
00379             typedef std::random_access_iterator_tag iterator_category;
00380         };
00381         
00382         template<typename Container, typename T>
00383         enumerable_thread_specific_iterator<Container,T> operator+( ptrdiff_t offset, 
00384                                                                     const enumerable_thread_specific_iterator<Container,T>& v ) {
00385             return enumerable_thread_specific_iterator<Container,T>( v.my_container, v.my_index + offset );
00386         }
00387         
00388         template<typename Container, typename T, typename U>
00389         bool operator==( const enumerable_thread_specific_iterator<Container,T>& i, 
00390                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00391             return i.my_index==j.my_index && i.my_container == j.my_container;
00392         }
00393         
00394         template<typename Container, typename T, typename U>
00395         bool operator!=( const enumerable_thread_specific_iterator<Container,T>& i, 
00396                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00397             return !(i==j);
00398         }
00399         
00400         template<typename Container, typename T, typename U>
00401         bool operator<( const enumerable_thread_specific_iterator<Container,T>& i, 
00402                         const enumerable_thread_specific_iterator<Container,U>& j ) {
00403             return i.my_index<j.my_index;
00404         }
00405         
00406         template<typename Container, typename T, typename U>
00407         bool operator>( const enumerable_thread_specific_iterator<Container,T>& i, 
00408                         const enumerable_thread_specific_iterator<Container,U>& j ) {
00409             return j<i;
00410         }
00411         
00412         template<typename Container, typename T, typename U>
00413         bool operator>=( const enumerable_thread_specific_iterator<Container,T>& i, 
00414                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00415             return !(i<j);
00416         }
00417         
00418         template<typename Container, typename T, typename U>
00419         bool operator<=( const enumerable_thread_specific_iterator<Container,T>& i, 
00420                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00421             return !(j<i);
00422         }
00423         
00424         template<typename Container, typename T, typename U>
00425         ptrdiff_t operator-( const enumerable_thread_specific_iterator<Container,T>& i, 
00426                              const enumerable_thread_specific_iterator<Container,U>& j ) {
00427             return i.my_index-j.my_index;
00428         }
00429 
00430     template<typename SegmentedContainer, typename Value >
00431         class segmented_iterator
00432 #if defined(_WIN64) && defined(_MSC_VER)
00433         : public std::iterator<std::input_iterator_tag, Value>
00434 #endif
00435         {
00436             template<typename C, typename T, typename U>
00437             friend bool operator==(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
00438 
00439             template<typename C, typename T, typename U>
00440             friend bool operator!=(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
00441             
00442             template<typename C, typename U> 
00443             friend class segmented_iterator;
00444 
00445             public:
00446 
00447                 segmented_iterator() {my_segcont = NULL;}
00448 
00449                 segmented_iterator( const SegmentedContainer& _segmented_container ) : 
00450                     my_segcont(const_cast<SegmentedContainer*>(&_segmented_container)),
00451                     outer_iter(my_segcont->end()) { }
00452 
00453                 ~segmented_iterator() {}
00454 
00455                 typedef typename SegmentedContainer::iterator outer_iterator;
00456                 typedef typename SegmentedContainer::value_type InnerContainer;
00457                 typedef typename InnerContainer::iterator inner_iterator;
00458 
00459                 // STL support
00460                 typedef ptrdiff_t difference_type;
00461                 typedef Value value_type;
00462                 typedef typename SegmentedContainer::size_type size_type;
00463                 typedef Value* pointer;
00464                 typedef Value& reference;
00465                 typedef std::input_iterator_tag iterator_category;
00466 
00467                 // Copy Constructor
00468                 template<typename U>
00469                 segmented_iterator(const segmented_iterator<SegmentedContainer, U>& other) :
00470                     my_segcont(other.my_segcont),
00471                     outer_iter(other.outer_iter),
00472                     // can we assign a default-constructed iterator to inner if we're at the end?
00473                     inner_iter(other.inner_iter)
00474                 {}
00475 
00476                 // assignment
00477                 template<typename U>
00478                 segmented_iterator& operator=( const segmented_iterator<SegmentedContainer, U>& other) {
00479                     if(this != &other) {
00480                         my_segcont = other.my_segcont;
00481                         outer_iter = other.outer_iter;
00482                         if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter;
00483                     }
00484                     return *this;
00485                 }
00486 
00487                 // allow assignment of outer iterator to segmented iterator.  Once it is
00488                 // assigned, move forward until a non-empty inner container is found or
00489                 // the end of the outer container is reached.
00490                 segmented_iterator& operator=(const outer_iterator& new_outer_iter) {
00491                     __TBB_ASSERT(my_segcont != NULL, NULL);
00492                     // check that this iterator points to something inside the segmented container
00493                     for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) {
00494                         if( !outer_iter->empty() ) {
00495                             inner_iter = outer_iter->begin();
00496                             break;
00497                         }
00498                     }
00499                     return *this;
00500                 }
00501 
00502                 // pre-increment
00503                 segmented_iterator& operator++() {
00504                     advance_me();
00505                     return *this;
00506                 }
00507 
00508                 // post-increment
00509                 segmented_iterator operator++(int) {
00510                     segmented_iterator tmp = *this;
00511                     operator++();
00512                     return tmp;
00513                 }
00514 
00515                 bool operator==(const outer_iterator& other_outer) const {
00516                     __TBB_ASSERT(my_segcont != NULL, NULL);
00517                     return (outer_iter == other_outer &&
00518                             (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin()));
00519                 }
00520 
00521                 bool operator!=(const outer_iterator& other_outer) const {
00522                     return !operator==(other_outer);
00523 
00524                 }
00525 
00526                 // (i)* RHS
00527                 reference operator*() const {
00528                     __TBB_ASSERT(my_segcont != NULL, NULL);
00529                     __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container");
00530                     __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // should never happen
00531                     return *inner_iter;
00532                 }
00533 
00534                 // i->
00535                 pointer operator->() const { return &operator*();}
00536 
00537             private:
00538                 SegmentedContainer*             my_segcont;
00539                 outer_iterator outer_iter;
00540                 inner_iterator inner_iter;
00541 
00542                 void advance_me() {
00543                     __TBB_ASSERT(my_segcont != NULL, NULL);
00544                     __TBB_ASSERT(outer_iter != my_segcont->end(), NULL); // not true if there are no inner containers
00545                     __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // not true if the inner containers are all empty.
00546                     ++inner_iter;
00547                     while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) {
00548                         inner_iter = outer_iter->begin();
00549                     }
00550                 }
00551         };    // segmented_iterator
00552 
00553         template<typename SegmentedContainer, typename T, typename U>
00554         bool operator==( const segmented_iterator<SegmentedContainer,T>& i, 
00555                          const segmented_iterator<SegmentedContainer,U>& j ) {
00556             if(i.my_segcont != j.my_segcont) return false;
00557             if(i.my_segcont == NULL) return true;
00558             if(i.outer_iter != j.outer_iter) return false;
00559             if(i.outer_iter == i.my_segcont->end()) return true;
00560             return i.inner_iter == j.inner_iter;
00561         }
00562 
00563         // !=
00564         template<typename SegmentedContainer, typename T, typename U>
00565         bool operator!=( const segmented_iterator<SegmentedContainer,T>& i, 
00566                          const segmented_iterator<SegmentedContainer,U>& j ) {
00567             return !(i==j);
00568         }
00569 
00570         template<typename T>
00571         struct destruct_only: tbb::internal::no_copy {
00572             tbb::aligned_space<T,1> value;
00573             ~destruct_only() {value.begin()[0].~T();}
00574         };
00575 
00576         template<typename T>
00577         struct construct_by_default: tbb::internal::no_assign {
00578             void construct(void*where) {new(where) T();} // C++ note: the () in T() ensure zero initialization.
00579             construct_by_default( int ) {}
00580         };
00581 
00582         template<typename T>
00583         struct construct_by_exemplar: tbb::internal::no_assign {
00584             const T exemplar;
00585             void construct(void*where) {new(where) T(exemplar);}
00586             construct_by_exemplar( const T& t ) : exemplar(t) {}
00587         };
00588 
00589         template<typename T, typename Finit>
00590         struct construct_by_finit: tbb::internal::no_assign {
00591             Finit f;
00592             void construct(void* where) {new(where) T(f());}
00593             construct_by_finit( const Finit& f_ ) : f(f_) {}
00594         };
00595 
00596         // storage for initialization function pointer
00597         template<typename T>
00598         class callback_base {
00599         public:
00600             // Clone *this
00601             virtual callback_base* clone() = 0;
00602             // Destruct and free *this
00603             virtual void destroy() = 0;
00604             // Need virtual destructor to satisfy GCC compiler warning
00605             virtual ~callback_base() { }
00606             // Construct T at where
00607             virtual void construct(void* where) = 0;
00608         };
00609 
00610         template <typename T, typename Constructor>
00611         class callback_leaf: public callback_base<T>, Constructor {
00612             template<typename X> callback_leaf( const X& x ) : Constructor(x) {}
00613 
00614             typedef typename tbb::tbb_allocator<callback_leaf> my_allocator_type;
00615 
00616             /*override*/ callback_base<T>* clone() {
00617                 void* where = my_allocator_type().allocate(1);
00618                 return new(where) callback_leaf(*this);
00619             }
00620 
00621             /*override*/ void destroy() {
00622                 my_allocator_type().destroy(this);
00623                 my_allocator_type().deallocate(this,1);
00624             }
00625 
00626             /*override*/ void construct(void* where) {
00627                 Constructor::construct(where);
00628             }  
00629         public:
00630             template<typename X>
00631             static callback_base<T>* make( const X& x ) {
00632                 void* where = my_allocator_type().allocate(1);
00633                 return new(where) callback_leaf(x);
00634             }
00635         };
00636 
00638 
00643         template<typename U, size_t ModularSize>
00644         struct ets_element {
00645             char value[ModularSize==0 ? sizeof(U) : sizeof(U)+(tbb::internal::NFS_MaxLineSize-ModularSize)];
00646             void unconstruct() {
00647                 tbb::internal::punned_cast<U*>(&value)->~U();
00648             }
00649         };
00650 
00651     } // namespace internal
00653 
00655 
00674     template <typename T, 
00675               typename Allocator=cache_aligned_allocator<T>, 
00676               ets_key_usage_type ETS_key_type=ets_no_key > 
00677     class enumerable_thread_specific: internal::ets_base<ETS_key_type> { 
00678 
00679         template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific;
00680     
00681         typedef internal::ets_element<T,sizeof(T)%tbb::internal::NFS_MaxLineSize> padded_element;
00682 
00684         template<typename I>
00685         class generic_range_type: public blocked_range<I> {
00686         public:
00687             typedef T value_type;
00688             typedef T& reference;
00689             typedef const T& const_reference;
00690             typedef I iterator;
00691             typedef ptrdiff_t difference_type;
00692             generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {} 
00693             template<typename U>
00694             generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {} 
00695             generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
00696         };
00697     
00698         typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type;
00699         typedef tbb::concurrent_vector< padded_element, padded_allocator_type > internal_collection_type;
00700         
00701         internal::callback_base<T> *my_construct_callback;
00702 
00703         internal_collection_type my_locals;
00704    
00705         /*override*/ void* create_local() {
00706 #if TBB_DEPRECATED
00707             void* lref = &my_locals[my_locals.push_back(padded_element())];
00708 #else
00709             void* lref = &*my_locals.push_back(padded_element());
00710 #endif
00711             my_construct_callback->construct(lref);
00712             return lref;
00713         } 
00714 
00715         void unconstruct_locals() {
00716             for(typename internal_collection_type::iterator cvi = my_locals.begin(); cvi != my_locals.end(); ++cvi) {
00717                 cvi->unconstruct();
00718             }
00719         }
00720 
00721         typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type;
00722 
00723         // _size is in bytes
00724         /*override*/ void* create_array(size_t _size) {
00725             size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
00726             return array_allocator_type().allocate(nelements);
00727         }
00728 
00729         /*override*/ void free_array( void* _ptr, size_t _size) {
00730             size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
00731             array_allocator_type().deallocate( reinterpret_cast<uintptr_t *>(_ptr),nelements);
00732         }
00733    
00734     public:
00735     
00737         typedef Allocator allocator_type;
00738         typedef T value_type;
00739         typedef T& reference;
00740         typedef const T& const_reference;
00741         typedef T* pointer;
00742         typedef const T* const_pointer;
00743         typedef typename internal_collection_type::size_type size_type;
00744         typedef typename internal_collection_type::difference_type difference_type;
00745     
00746         // Iterator types
00747         typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator;
00748         typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator;
00749 
00750         // Parallel range types
00751         typedef generic_range_type< iterator > range_type;
00752         typedef generic_range_type< const_iterator > const_range_type;
00753     
00755         enumerable_thread_specific() : 
00756             my_construct_callback( internal::callback_leaf<T,internal::construct_by_default<T> >::make(/*dummy argument*/0) ) 
00757         {}
00758 
00760         template <typename Finit>
00761         enumerable_thread_specific( Finit finit ) : 
00762             my_construct_callback( internal::callback_leaf<T,internal::construct_by_finit<T,Finit> >::make( finit ) ) 
00763         {}
00764     
00766         enumerable_thread_specific(const T& exemplar) : 
00767             my_construct_callback( internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( exemplar ) )
00768         {}
00769     
00771         ~enumerable_thread_specific() { 
00772             my_construct_callback->destroy();
00773             this->clear();  // deallocation before the derived class is finished destructing
00774             // So free(array *) is still accessible
00775         }
00776       
00778         reference local() {
00779             bool exists;
00780             return local(exists);
00781         }
00782 
00784         reference local(bool& exists)  {
00785             void* ptr = this->table_lookup(exists);
00786             return *(T*)ptr;
00787         }
00788 
00790         size_type size() const { return my_locals.size(); }
00791     
00793         bool empty() const { return my_locals.empty(); }
00794     
00796         iterator begin() { return iterator( my_locals, 0 ); }
00798         iterator end() { return iterator(my_locals, my_locals.size() ); }
00799     
00801         const_iterator begin() const { return const_iterator(my_locals, 0); }
00802     
00804         const_iterator end() const { return const_iterator(my_locals, my_locals.size()); }
00805 
00807         range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); } 
00808         
00810         const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); }
00811 
00813         void clear() {
00814             unconstruct_locals();
00815             my_locals.clear();
00816             this->table_clear();
00817             // callback is not destroyed
00818             // exemplar is not destroyed
00819         }
00820 
00821     private:
00822 
00823         template<typename U, typename A2, ets_key_usage_type C2>
00824         void internal_copy( const enumerable_thread_specific<U, A2, C2>& other);
00825 
00826     public:
00827 
00828         template<typename U, typename Alloc, ets_key_usage_type Cachetype>
00829         enumerable_thread_specific( const enumerable_thread_specific<U, Alloc, Cachetype>& other ) : internal::ets_base<ETS_key_type> ()
00830         {
00831             internal_copy(other);
00832         }
00833 
00834         enumerable_thread_specific( const enumerable_thread_specific& other ) : internal::ets_base<ETS_key_type> ()
00835         {
00836             internal_copy(other);
00837         }
00838 
00839     private:
00840 
00841         template<typename U, typename A2, ets_key_usage_type C2>
00842         enumerable_thread_specific &
00843         internal_assign(const enumerable_thread_specific<U, A2, C2>& other) {
00844             if(static_cast<void *>( this ) != static_cast<const void *>( &other )) {
00845                 this->clear(); 
00846                 my_construct_callback->destroy();
00847                 my_construct_callback = 0;
00848                 internal_copy( other );
00849             }
00850             return *this;
00851         }
00852 
00853     public:
00854 
00855         // assignment
00856         enumerable_thread_specific& operator=(const enumerable_thread_specific& other) {
00857             return internal_assign(other);
00858         }
00859 
00860         template<typename U, typename Alloc, ets_key_usage_type Cachetype>
00861         enumerable_thread_specific& operator=(const enumerable_thread_specific<U, Alloc, Cachetype>& other)
00862         {
00863             return internal_assign(other);
00864         }
00865 
00866         // combine_func_t has signature T(T,T) or T(const T&, const T&)
00867         template <typename combine_func_t>
00868         T combine(combine_func_t f_combine) {
00869             if(begin() == end()) {
00870                 internal::destruct_only<T> location;
00871                 my_construct_callback->construct(location.value.begin());
00872                 return *location.value.begin();
00873             }
00874             const_iterator ci = begin();
00875             T my_result = *ci;
00876             while(++ci != end()) 
00877                 my_result = f_combine( my_result, *ci );
00878             return my_result;
00879         }
00880 
00881         // combine_func_t has signature void(T) or void(const T&)
00882         template <typename combine_func_t>
00883         void combine_each(combine_func_t f_combine) {
00884             for(const_iterator ci = begin(); ci != end(); ++ci) {
00885                 f_combine( *ci );
00886             }
00887         }
00888 
00889     }; // enumerable_thread_specific
00890 
00891     template <typename T, typename Allocator, ets_key_usage_type ETS_key_type> 
00892     template<typename U, typename A2, ets_key_usage_type C2>
00893     void enumerable_thread_specific<T,Allocator,ETS_key_type>::internal_copy( const enumerable_thread_specific<U, A2, C2>& other) {
00894         // Initialize my_construct_callback first, so that it is valid even if rest of this routine throws an exception.
00895         my_construct_callback = other.my_construct_callback->clone();
00896 
00897         typedef internal::ets_base<ets_no_key> base;
00898         __TBB_ASSERT(my_locals.size()==0,NULL);
00899         this->table_reserve_for_copy( other );
00900         for( base::array* r=other.my_root; r; r=r->next ) {
00901             for( size_t i=0; i<r->size(); ++i ) {
00902                 base::slot& s1 = r->at(i);
00903                 if( !s1.empty() ) {
00904                     base::slot& s2 = this->table_find(s1.key);
00905                     if( s2.empty() ) { 
00906 #if TBB_DEPRECATED
00907                         void* lref = &my_locals[my_locals.push_back(padded_element())];
00908 #else
00909                         void* lref = &*my_locals.push_back(padded_element());
00910 #endif
00911                         s2.ptr = new(lref) T(*(U*)s1.ptr);
00912                         s2.key = s1.key;
00913                     } else {
00914                         // Skip the duplicate
00915                     } 
00916                 }
00917             }
00918         }
00919     }
00920 
00921     template< typename Container >
00922     class flattened2d {
00923 
00924         // This intermediate typedef is to address issues with VC7.1 compilers
00925         typedef typename Container::value_type conval_type;
00926 
00927     public:
00928 
00930         typedef typename conval_type::size_type size_type;
00931         typedef typename conval_type::difference_type difference_type;
00932         typedef typename conval_type::allocator_type allocator_type;
00933         typedef typename conval_type::value_type value_type;
00934         typedef typename conval_type::reference reference;
00935         typedef typename conval_type::const_reference const_reference;
00936         typedef typename conval_type::pointer pointer;
00937         typedef typename conval_type::const_pointer const_pointer;
00938 
00939         typedef typename internal::segmented_iterator<Container, value_type> iterator;
00940         typedef typename internal::segmented_iterator<Container, const value_type> const_iterator;
00941 
00942         flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) : 
00943             my_container(const_cast<Container*>(&c)), my_begin(b), my_end(e) { }
00944 
00945         flattened2d( const Container &c ) : 
00946             my_container(const_cast<Container*>(&c)), my_begin(c.begin()), my_end(c.end()) { }
00947 
00948         iterator begin() { return iterator(*my_container) = my_begin; }
00949         iterator end() { return iterator(*my_container) = my_end; }
00950         const_iterator begin() const { return const_iterator(*my_container) = my_begin; }
00951         const_iterator end() const { return const_iterator(*my_container) = my_end; }
00952 
00953         size_type size() const {
00954             size_type tot_size = 0;
00955             for(typename Container::const_iterator i = my_begin; i != my_end; ++i) {
00956                 tot_size += i->size();
00957             }
00958             return tot_size;
00959         }
00960 
00961     private:
00962 
00963         Container *my_container;
00964         typename Container::const_iterator my_begin;
00965         typename Container::const_iterator my_end;
00966 
00967     };
00968 
00969     template <typename Container>
00970     flattened2d<Container> flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) {
00971         return flattened2d<Container>(c, b, e);
00972     }
00973 
00974     template <typename Container>
00975     flattened2d<Container> flatten2d(const Container &c) {
00976         return flattened2d<Container>(c);
00977     }
00978 
00979 } // interface6
00980 
00981 namespace internal {
00982 using interface6::internal::segmented_iterator;
00983 }
00984 
00985 using interface6::enumerable_thread_specific;
00986 using interface6::flattened2d;
00987 using interface6::flatten2d;
00988 
00989 } // namespace tbb
00990 
00991 #endif

Copyright © 2005-2011 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.