atomic.h

00001 /*
00002     Copyright 2005-2011 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 #ifndef __TBB_atomic_H
00022 #define __TBB_atomic_H
00023 
00024 #include <cstddef>
00025 #include "tbb_stddef.h"
00026 
00027 #if _MSC_VER 
00028 #define __TBB_LONG_LONG __int64
00029 #else
00030 #define __TBB_LONG_LONG long long
00031 #endif /* _MSC_VER */
00032 
00033 #include "tbb_machine.h"
00034 
00035 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
00036     // Workaround for overzealous compiler warnings 
00037     #pragma warning (push)
00038     #pragma warning (disable: 4244 4267)
00039 #endif
00040 
00041 namespace tbb {
00042 
00044 enum memory_semantics {
00046     full_fence,
00048     acquire,
00050     release,
00052     relaxed
00053 };
00054 
00056 namespace internal {
00057 
00058 #if __TBB_ATTRIBUTE_ALIGNED_PRESENT
00059     #define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f  __attribute__ ((aligned(a)));
00060 #elif __TBB_DECLSPEC_ALIGN_PRESENT
00061     #define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f;
00062 #else 
00063     #error Do not know syntax for forcing alignment.
00064 #endif
00065 
00066 template<size_t S>
00067 struct atomic_rep;           // Primary template declared, but never defined.
00068 
00069 template<>
00070 struct atomic_rep<1> {       // Specialization
00071     typedef int8_t word;
00072     int8_t value;
00073 };
00074 template<>
00075 struct atomic_rep<2> {       // Specialization
00076     typedef int16_t word;
00077     __TBB_DECL_ATOMIC_FIELD(int16_t,value,2)
00078 };
00079 template<>
00080 struct atomic_rep<4> {       // Specialization
00081 #if _MSC_VER && __TBB_WORDSIZE==4
00082     // Work-around that avoids spurious /Wp64 warnings
00083     typedef intptr_t word;
00084 #else
00085     typedef int32_t word;
00086 #endif
00087     __TBB_DECL_ATOMIC_FIELD(int32_t,value,4)
00088 };
00089 #if __TBB_64BIT_ATOMICS
00090 template<>
00091 struct atomic_rep<8> {       // Specialization
00092     typedef int64_t word;
00093     __TBB_DECL_ATOMIC_FIELD(int64_t,value,8)
00094 };
00095 #endif
00096 
00097 template<size_t Size, memory_semantics M>
00098 struct atomic_traits;        // Primary template declared, but not defined.
00099 
00100 #define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M)                                                         \
00101     template<> struct atomic_traits<S,M> {                                                               \
00102         typedef atomic_rep<S>::word word;                                                                \
00103         inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \
00104             return __TBB_machine_cmpswp##S##M(location,new_value,comparand);                             \
00105         }                                                                                                \
00106         inline static word fetch_and_add( volatile void* location, word addend ) {                       \
00107             return __TBB_machine_fetchadd##S##M(location,addend);                                        \
00108         }                                                                                                \
00109         inline static word fetch_and_store( volatile void* location, word value ) {                      \
00110             return __TBB_machine_fetchstore##S##M(location,value);                                       \
00111         }                                                                                                \
00112     };
00113 
00114 #define __TBB_DECL_ATOMIC_PRIMITIVES(S)                                                                  \
00115     template<memory_semantics M>                                                                         \
00116     struct atomic_traits<S,M> {                                                                          \
00117         typedef atomic_rep<S>::word word;                                                                \
00118         inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \
00119             return __TBB_machine_cmpswp##S(location,new_value,comparand);                                \
00120         }                                                                                                \
00121         inline static word fetch_and_add( volatile void* location, word addend ) {                       \
00122             return __TBB_machine_fetchadd##S(location,addend);                                           \
00123         }                                                                                                \
00124         inline static word fetch_and_store( volatile void* location, word value ) {                      \
00125             return __TBB_machine_fetchstore##S(location,value);                                          \
00126         }                                                                                                \
00127     };
00128 
00129 template<memory_semantics M>
00130 struct atomic_load_store_traits;    // Primary template declaration
00131 
00132 #define __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(M)                      \
00133     template<> struct atomic_load_store_traits<M> {                     \
00134         template <typename T>                                           \
00135         inline static T load( const volatile T& location ) {            \
00136             return __TBB_load_##M( location );                          \
00137         }                                                               \
00138         template <typename T>                                           \
00139         inline static void store( volatile T& location, T value ) {     \
00140             __TBB_store_##M( location, value );                         \
00141         }                                                               \
00142     }
00143 
00144 #if __TBB_USE_FENCED_ATOMICS
00145 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,full_fence)
00146 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,full_fence)
00147 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,full_fence)
00148 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire)
00149 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire)
00150 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire)
00151 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release)
00152 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release)
00153 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release)
00154 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,relaxed)
00155 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,relaxed)
00156 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,relaxed)
00157 #if __TBB_64BIT_ATOMICS
00158 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,full_fence)
00159 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire)
00160 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release)
00161 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,relaxed)
00162 #endif
00163 #else /* !__TBB_USE_FENCED_ATOMICS */
00164 __TBB_DECL_ATOMIC_PRIMITIVES(1)
00165 __TBB_DECL_ATOMIC_PRIMITIVES(2)
00166 __TBB_DECL_ATOMIC_PRIMITIVES(4)
00167 #if __TBB_64BIT_ATOMICS
00168 __TBB_DECL_ATOMIC_PRIMITIVES(8)
00169 #endif
00170 #endif /* !__TBB_USE_FENCED_ATOMICS */
00171 
00172 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(full_fence);
00173 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(acquire);
00174 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(release);
00175 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(relaxed);
00176 
00178 
00180 #define __TBB_MINUS_ONE(T) (T(T(0)-T(1)))
00181 
00183 
00185 template<typename T>
00186 struct atomic_impl {
00187 protected:
00188     atomic_rep<sizeof(T)> rep;
00189 private:
00191     union converter {
00192         T value;
00193         typename atomic_rep<sizeof(T)>::word bits;
00194     };
00195 public:
00196     typedef T value_type;
00197 
00198     template<memory_semantics M>
00199     value_type fetch_and_store( value_type value ) {
00200         converter u, w;
00201         u.value = value;
00202         w.bits = internal::atomic_traits<sizeof(value_type),M>::fetch_and_store(&rep.value,u.bits);
00203         return w.value;
00204     }
00205 
00206     value_type fetch_and_store( value_type value ) {
00207         return fetch_and_store<full_fence>(value);
00208     }
00209 
00210     template<memory_semantics M>
00211     value_type compare_and_swap( value_type value, value_type comparand ) {
00212         converter u, v, w;
00213         u.value = value;
00214         v.value = comparand;
00215         w.bits = internal::atomic_traits<sizeof(value_type),M>::compare_and_swap(&rep.value,u.bits,v.bits);
00216         return w.value;
00217     }
00218 
00219     value_type compare_and_swap( value_type value, value_type comparand ) {
00220         return compare_and_swap<full_fence>(value,comparand);
00221     }
00222 
00223     operator value_type() const volatile {                // volatile qualifier here for backwards compatibility 
00224         converter w;
00225         w.bits = __TBB_load_with_acquire( rep.value );
00226         return w.value;
00227     }
00228 
00229     template<memory_semantics M>
00230     value_type load () const {
00231         converter u;
00232         u.bits = internal::atomic_load_store_traits<M>::load( rep.value );
00233         return u.value;
00234     }
00235 
00236     value_type load () const {
00237         return load<acquire>();
00238     }
00239 
00240     template<memory_semantics M>
00241     void store ( value_type value ) {
00242         converter u;
00243         u.value = value;
00244         internal::atomic_load_store_traits<M>::store( rep.value, u.bits );
00245     }
00246 
00247     void store ( value_type value ) {
00248         store<release>( value );
00249     }
00250 
00251 protected:
00252     value_type store_with_release( value_type rhs ) {
00253         converter u;
00254         u.value = rhs;
00255         __TBB_store_with_release(rep.value,u.bits);
00256         return rhs;
00257     }
00258 };
00259 
00261 
00264 template<typename I, typename D, typename StepType>
00265 struct atomic_impl_with_arithmetic: atomic_impl<I> {
00266 public:
00267     typedef I value_type;
00268 
00269     template<memory_semantics M>
00270     value_type fetch_and_add( D addend ) {
00271         return value_type(internal::atomic_traits<sizeof(value_type),M>::fetch_and_add( &this->rep.value, addend*sizeof(StepType) ));
00272     }
00273 
00274     value_type fetch_and_add( D addend ) {
00275         return fetch_and_add<full_fence>(addend);
00276     }
00277 
00278     template<memory_semantics M>
00279     value_type fetch_and_increment() {
00280         return fetch_and_add<M>(1);
00281     }
00282 
00283     value_type fetch_and_increment() {
00284         return fetch_and_add(1);
00285     }
00286 
00287     template<memory_semantics M>
00288     value_type fetch_and_decrement() {
00289         return fetch_and_add<M>(__TBB_MINUS_ONE(D));
00290     }
00291 
00292     value_type fetch_and_decrement() {
00293         return fetch_and_add(__TBB_MINUS_ONE(D));
00294     }
00295 
00296 public:
00297     value_type operator+=( D addend ) {
00298         return fetch_and_add(addend)+addend;
00299     }
00300 
00301     value_type operator-=( D addend ) {
00302         // Additive inverse of addend computed using binary minus,
00303         // instead of unary minus, for sake of avoiding compiler warnings.
00304         return operator+=(D(0)-addend);    
00305     }
00306 
00307     value_type operator++() {
00308         return fetch_and_add(1)+1;
00309     }
00310 
00311     value_type operator--() {
00312         return fetch_and_add(__TBB_MINUS_ONE(D))-1;
00313     }
00314 
00315     value_type operator++(int) {
00316         return fetch_and_add(1);
00317     }
00318 
00319     value_type operator--(int) {
00320         return fetch_and_add(__TBB_MINUS_ONE(D));
00321     }
00322 };
00323 
00324 } /* Internal */
00326 
00328 
00330 template<typename T>
00331 struct atomic: internal::atomic_impl<T> {
00332     T operator=( T rhs ) {
00333         // "this" required here in strict ISO C++ because store_with_release is a dependent name
00334         return this->store_with_release(rhs);
00335     }
00336     atomic<T>& operator=( const atomic<T>& rhs ) {this->store_with_release(rhs); return *this;}
00337 };
00338 
00339 #define __TBB_DECL_ATOMIC(T) \
00340     template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> {  \
00341         T operator=( T rhs ) {return store_with_release(rhs);}  \
00342         atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}  \
00343     };
00344 
00345 #if __TBB_64BIT_ATOMICS
00346 __TBB_DECL_ATOMIC(__TBB_LONG_LONG)
00347 __TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG)
00348 #else
00349 // test_atomic will verify that sizeof(long long)==8
00350 #endif
00351 __TBB_DECL_ATOMIC(long)
00352 __TBB_DECL_ATOMIC(unsigned long)
00353 
00354 #if defined(_MSC_VER) && __TBB_WORDSIZE==4
00355 /* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option. 
00356    It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T) 
00357    with an operator=(U) that explicitly converts the U to a T.  Types T and U should be
00358    type synonyms on the platform.  Type U should be the wider variant of T from the
00359    perspective of /Wp64. */
00360 #define __TBB_DECL_ATOMIC_ALT(T,U) \
00361     template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> {  \
00362         T operator=( U rhs ) {return store_with_release(T(rhs));}  \
00363         atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}  \
00364     };
00365 __TBB_DECL_ATOMIC_ALT(unsigned,size_t)
00366 __TBB_DECL_ATOMIC_ALT(int,ptrdiff_t)
00367 #else
00368 __TBB_DECL_ATOMIC(unsigned)
00369 __TBB_DECL_ATOMIC(int)
00370 #endif /* defined(_MSC_VER) && __TBB_WORDSIZE==4 */
00371 
00372 __TBB_DECL_ATOMIC(unsigned short)
00373 __TBB_DECL_ATOMIC(short)
00374 __TBB_DECL_ATOMIC(char)
00375 __TBB_DECL_ATOMIC(signed char)
00376 __TBB_DECL_ATOMIC(unsigned char)
00377 
00378 #if !defined(_MSC_VER)||defined(_NATIVE_WCHAR_T_DEFINED) 
00379 __TBB_DECL_ATOMIC(wchar_t)
00380 #endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */
00381 
00383 template<typename T> struct atomic<T*>: internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T> {
00384     T* operator=( T* rhs ) {
00385         // "this" required here in strict ISO C++ because store_with_release is a dependent name
00386         return this->store_with_release(rhs);
00387     }
00388     atomic<T*>& operator=( const atomic<T*>& rhs ) {
00389         this->store_with_release(rhs); return *this;
00390     }
00391     T* operator->() const {
00392         return (*this);
00393     }
00394 };
00395 
00397 template<> struct atomic<void*>: internal::atomic_impl<void*> {
00398     void* operator=( void* rhs ) {
00399         // "this" required here in strict ISO C++ because store_with_release is a dependent name
00400         return this->store_with_release(rhs);
00401     }
00402     atomic<void*>& operator=( const atomic<void*>& rhs ) {
00403         this->store_with_release(rhs); return *this;
00404     }
00405 };
00406 
00407 // Helpers to workaround ugly syntax of calling template member function of a
00408 // template class with template argument dependent on template parameters.
00409 
00410 template <memory_semantics M, typename T>
00411 T load ( const atomic<T>& a ) { return a.template load<M>(); }
00412 
00413 template <memory_semantics M, typename T>
00414 void store ( atomic<T>& a, T value ) { return a.template store<M>(value); }
00415 
00416 } // namespace tbb
00417 
00418 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
00419     #pragma warning (pop)
00420 #endif // warnings 4244, 4267 are back
00421 
00422 #endif /* __TBB_atomic_H */

Copyright © 2005-2011 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.