00001 
00002 
00003 
00004 
00005 
00006 
00007 
00008 
00009 
00010 
00011 
00012 
00013 
00014 
00015 
00016 
00017 
00018 
00019 
00020 
00021 #ifndef __TBB_task_H
00022 #define __TBB_task_H
00023 
00024 #include "tbb_stddef.h"
00025 #include "tbb_machine.h"
00026 #include <climits>
00027 
00028 typedef struct ___itt_caller *__itt_caller;
00029 
00030 namespace tbb {
00031 
00032 class task;
00033 class task_list;
00034 
00035 #if __TBB_TASK_GROUP_CONTEXT
00036 class task_group_context;
00037 #endif 
00038 
00039 
00040 
00041 #if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3)
00042 #define __TBB_TASK_BASE_ACCESS public
00043 #else
00044 #define __TBB_TASK_BASE_ACCESS private
00045 #endif
00046 
00047 namespace internal {
00048 
00049     class allocate_additional_child_of_proxy: no_assign {
00051         task* self;
00052         task& parent;
00053     public:
00054         explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {}
00055         task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00056         void __TBB_EXPORTED_METHOD free( task& ) const;
00057     };
00058 
00059 }
00060 
00061 namespace interface5 {
00062     namespace internal {
00064 
00069         class task_base: tbb::internal::no_copy {
00070         __TBB_TASK_BASE_ACCESS:
00071             friend class tbb::task;
00072 
00074             static void spawn( task& t );
00075 
00077             static void spawn( task_list& list );
00078 
00080 
00082             static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) {
00083                 return tbb::internal::allocate_additional_child_of_proxy(t);
00084             }
00085 
00087 
00091             static void __TBB_EXPORTED_FUNC destroy( task& victim );
00092         };
00093     } 
00094 } 
00095 
00097 namespace internal {
00098 
00099     class scheduler: no_copy {
00100     public:
00102         virtual void spawn( task& first, task*& next ) = 0;
00103 
00105         virtual void wait_for_all( task& parent, task* child ) = 0;
00106 
00108         virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
00109 
00111         
00112         virtual ~scheduler() = 0;
00113 
00115         virtual void enqueue( task& t, void* reserved ) = 0;
00116     };
00117 
00119 
00120     typedef intptr_t reference_count;
00121 
00123     typedef unsigned short affinity_id;
00124 
00125 #if __TBB_TASK_GROUP_CONTEXT
00126     class generic_scheduler;
00127 
00128     struct context_list_node_t {
00129         context_list_node_t *my_prev,
00130                             *my_next;
00131     };
00132 
00133     class allocate_root_with_context_proxy: no_assign {
00134         task_group_context& my_context;
00135     public:
00136         allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {}
00137         task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00138         void __TBB_EXPORTED_METHOD free( task& ) const;
00139     };
00140 #endif 
00141 
00142     class allocate_root_proxy: no_assign {
00143     public:
00144         static task& __TBB_EXPORTED_FUNC allocate( size_t size );
00145         static void __TBB_EXPORTED_FUNC free( task& );
00146     };
00147 
00148     class allocate_continuation_proxy: no_assign {
00149     public:
00150         task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00151         void __TBB_EXPORTED_METHOD free( task& ) const;
00152     };
00153 
00154     class allocate_child_proxy: no_assign {
00155     public:
00156         task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00157         void __TBB_EXPORTED_METHOD free( task& ) const;
00158     };
00159 
00161 
00172     class task_prefix {
00173     private:
00174         friend class tbb::task;
00175         friend class tbb::interface5::internal::task_base;
00176         friend class tbb::task_list;
00177         friend class internal::scheduler;
00178         friend class internal::allocate_root_proxy;
00179         friend class internal::allocate_child_proxy;
00180         friend class internal::allocate_continuation_proxy;
00181         friend class internal::allocate_additional_child_of_proxy;
00182 
00183 #if __TBB_TASK_GROUP_CONTEXT
00185 
00188         task_group_context  *context;
00189 #endif 
00190 
00192 
00197         scheduler* origin;
00198 
00199 #if __TBB_TASK_PRIORITY
00200         union {
00201 #endif 
00203 
00205         scheduler* owner;
00206 
00207 #if __TBB_TASK_PRIORITY
00209 
00210         task* next_offloaded;
00211         };
00212 #endif 
00213 
00215 
00218         tbb::task* parent;
00219 
00221 
00225         __TBB_atomic reference_count ref_count;
00226 
00228 
00230         int depth;
00231 
00233 
00234         unsigned char state;
00235 
00237 
00242         unsigned char extra_state;
00243 
00244         affinity_id affinity;
00245 
00247         tbb::task* next;
00248 
00250         tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
00251     };
00252 
00253 } 
00255 
00256 #if __TBB_TASK_GROUP_CONTEXT
00257 
00258 #if __TBB_TASK_PRIORITY
00259 namespace internal {
00260     static const int priority_stride_v4 = INT_MAX / 4;
00261 }
00262 
00263 enum priority_t {
00264     priority_normal = internal::priority_stride_v4 * 2,
00265     priority_low = priority_normal - internal::priority_stride_v4,
00266     priority_high = priority_normal + internal::priority_stride_v4
00267 };
00268 
00269 #endif 
00270 
00271 #if TBB_USE_CAPTURED_EXCEPTION
00272     class tbb_exception;
00273 #else
00274     namespace internal {
00275         class tbb_exception_ptr;
00276     }
00277 #endif 
00278 
00279 class task_scheduler_init;
00280 
00282 
00302 class task_group_context : internal::no_copy {
00303 private:
00304     friend class internal::generic_scheduler;
00305     friend class task_scheduler_init;
00306 
00307 #if TBB_USE_CAPTURED_EXCEPTION
00308     typedef tbb_exception exception_container_type;
00309 #else
00310     typedef internal::tbb_exception_ptr exception_container_type;
00311 #endif
00312 
00313     enum version_traits_word_layout {
00314         traits_offset = 16,
00315         version_mask = 0xFFFF,
00316         traits_mask = 0xFFFFul << traits_offset
00317     };
00318 
00319 public:
00320     enum kind_type {
00321         isolated,
00322         bound
00323     };
00324 
00325     enum traits_type {
00326         exact_exception = 0x0001ul << traits_offset,
00327         concurrent_wait = 0x0004ul << traits_offset,
00328 #if TBB_USE_CAPTURED_EXCEPTION
00329         default_traits = 0
00330 #else
00331         default_traits = exact_exception
00332 #endif 
00333     };
00334 
00335 private:
00336     enum state {
00337         may_have_children = 1
00338     };
00339 
00340     union {
00342         kind_type my_kind;
00343         uintptr_t _my_kind_aligner;
00344     };
00345 
00347     task_group_context *my_parent;
00348 
00350 
00352     internal::context_list_node_t my_node;
00353 
00355     __itt_caller itt_caller;
00356 
00358 
00361     char _leading_padding[internal::NFS_MaxLineSize
00362                           - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)
00363                           - sizeof(__itt_caller)];
00364 
00366     uintptr_t my_cancellation_requested;
00367 
00369 
00372     uintptr_t  my_version_and_traits;
00373 
00375     exception_container_type *my_exception;
00376 
00378     internal::generic_scheduler *my_owner;
00379 
00381     uintptr_t my_state;
00382 
00383 #if __TBB_TASK_PRIORITY
00385     intptr_t my_priority;
00386 #endif 
00387 
00389 
00390     char _trailing_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t) - 2 * sizeof(void*)
00391 #if __TBB_TASK_PRIORITY
00392                             - sizeof(intptr_t)
00393 #endif 
00394                           ];
00395 
00396 public:
00398 
00426     task_group_context ( kind_type relation_with_parent = bound,
00427                          uintptr_t traits = default_traits )
00428         : my_kind(relation_with_parent)
00429         , my_version_and_traits(1 | traits)
00430     {
00431         init();
00432     }
00433 
00434     __TBB_EXPORTED_METHOD ~task_group_context ();
00435 
00437 
00444     void __TBB_EXPORTED_METHOD reset ();
00445 
00447 
00454     bool __TBB_EXPORTED_METHOD cancel_group_execution ();
00455 
00457     bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const;
00458 
00460 
00466     void __TBB_EXPORTED_METHOD register_pending_exception ();
00467 
00468 #if __TBB_TASK_PRIORITY
00470     void set_priority ( priority_t );
00471 
00473     priority_t priority () const;
00474 #endif 
00475 
00476 protected:
00478 
00479     void __TBB_EXPORTED_METHOD init ();
00480 
00481 private:
00482     friend class task;
00483     friend class internal::allocate_root_with_context_proxy;
00484 
00485     static const kind_type binding_required = bound;
00486     static const kind_type binding_completed = kind_type(bound+1);
00487     static const kind_type detached = kind_type(binding_completed+1);
00488     static const kind_type dying = kind_type(detached+1);
00489 
00491 
00493     template <typename T>
00494     void propagate_state_from_ancestors ( T task_group_context::*mptr_state, T new_state );
00495 
00497     inline void finish_initialization ( internal::generic_scheduler *local_sched );
00498 
00500     void bind_to ( internal::generic_scheduler *local_sched );
00501 
00503     void register_with ( internal::generic_scheduler *local_sched );
00504 
00505 }; 
00506 
00507 #endif 
00508 
00510 
00511 class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base {
00512 
00514     void __TBB_EXPORTED_METHOD internal_set_ref_count( int count );
00515 
00517     internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();
00518 
00519 protected:
00521     task() {prefix().extra_state=1;}
00522 
00523 public:
00525     virtual ~task() {}
00526 
00528     virtual task* execute() = 0;
00529 
00531     enum state_type {
00533         executing,
00535         reexecute,
00537         ready,
00539         allocated,
00541         freed,
00543         recycle
00544     };
00545 
00546     
00547     
00548     
00549 
00551     static internal::allocate_root_proxy allocate_root() {
00552         return internal::allocate_root_proxy();
00553     }
00554 
00555 #if __TBB_TASK_GROUP_CONTEXT
00557     static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {
00558         return internal::allocate_root_with_context_proxy(ctx);
00559     }
00560 #endif 
00561 
00563 
00564     internal::allocate_continuation_proxy& allocate_continuation() {
00565         return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
00566     }
00567 
00569     internal::allocate_child_proxy& allocate_child() {
00570         return *reinterpret_cast<internal::allocate_child_proxy*>(this);
00571     }
00572 
00574     using task_base::allocate_additional_child_of;
00575 
00576 #if __TBB_DEPRECATED_TASK_INTERFACE
00578 
00582     void __TBB_EXPORTED_METHOD destroy( task& t );
00583 #else 
00585     using task_base::destroy;
00586 #endif 
00587 
00588     
00589     
00590     
00591 
00593 
00599     void recycle_as_continuation() {
00600         __TBB_ASSERT( prefix().state==executing, "execute not running?" );
00601         prefix().state = allocated;
00602     }
00603 
00605 
00607     void recycle_as_safe_continuation() {
00608         __TBB_ASSERT( prefix().state==executing, "execute not running?" );
00609         prefix().state = recycle;
00610     }
00611 
00613     void recycle_as_child_of( task& new_parent ) {
00614         internal::task_prefix& p = prefix();
00615         __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
00616         __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
00617         __TBB_ASSERT( p.parent==NULL, "parent must be null" );
00618         __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
00619         __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
00620         p.state = allocated;
00621         p.parent = &new_parent;
00622 #if __TBB_TASK_GROUP_CONTEXT
00623         p.context = new_parent.prefix().context;
00624 #endif 
00625     }
00626 
00628 
00629     void recycle_to_reexecute() {
00630         __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
00631         __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
00632         prefix().state = reexecute;
00633     }
00634 
00635     
00636     
00637     intptr_t depth() const {return 0;}
00638     void set_depth( intptr_t ) {}
00639     void add_to_depth( int ) {}
00640 
00641 
00642     
00643     
00644     
00645 
00647     void set_ref_count( int count ) {
00648 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
00649         internal_set_ref_count(count);
00650 #else
00651         prefix().ref_count = count;
00652 #endif 
00653     }
00654 
00656 
00657     void increment_ref_count() {
00658         __TBB_FetchAndIncrementWacquire( &prefix().ref_count );
00659     }
00660 
00662 
00663     int decrement_ref_count() {
00664 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
00665         return int(internal_decrement_ref_count());
00666 #else
00667         return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1;
00668 #endif 
00669     }
00670 
00672     using task_base::spawn;
00673 
00675     void spawn_and_wait_for_all( task& child ) {
00676         prefix().owner->wait_for_all( *this, &child );
00677     }
00678 
00680     void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );
00681 
00683     static void spawn_root_and_wait( task& root ) {
00684         root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
00685     }
00686 
00688 
00690     static void spawn_root_and_wait( task_list& root_list );
00691 
00693 
00694     void wait_for_all() {
00695         prefix().owner->wait_for_all( *this, NULL );
00696     }
00697 
00699 #if __TBB_TASK_PRIORITY
00700 
00710 #endif 
00711     static void enqueue( task& t ) {
00712         t.prefix().owner->enqueue( t, NULL );
00713     }
00714 
00715 #if __TBB_TASK_PRIORITY
00717     static void enqueue( task& t, priority_t p ) {
00718         __TBB_ASSERT( p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value" );
00719         t.prefix().owner->enqueue( t, (void*)p );
00720     }
00721 #endif 
00722 
00724     static task& __TBB_EXPORTED_FUNC self();
00725 
00727     task* parent() const {return prefix().parent;}
00728 
00730     void set_parent(task* p) {
00731 #if __TBB_TASK_GROUP_CONTEXT
00732         __TBB_ASSERT(prefix().context == p->prefix().context, "The tasks must be in the same context");
00733 #endif
00734         prefix().parent = p;
00735     }
00736 
00737 #if __TBB_TASK_GROUP_CONTEXT
00739 
00740     task_group_context* context() {return prefix().context;}
00741 
00743     task_group_context* group () { return prefix().context; }
00744 #endif 
00745 
00747     bool is_stolen_task() const {
00748         return (prefix().extra_state & 0x80)!=0;
00749     }
00750 
00751     
00752     
00753     
00754 
00756     state_type state() const {return state_type(prefix().state);}
00757 
00759     int ref_count() const {
00760 #if TBB_USE_ASSERT
00761         internal::reference_count ref_count_ = prefix().ref_count;
00762         __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error");
00763 #endif
00764         return int(prefix().ref_count);
00765     }
00766 
00768     bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const;
00769 
00770     
00771     
00772     
00773 
00775 
00776     typedef internal::affinity_id affinity_id;
00777 
00779     void set_affinity( affinity_id id ) {prefix().affinity = id;}
00780 
00782     affinity_id affinity() const {return prefix().affinity;}
00783 
00785 
00789     virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id );
00790 
00791 #if __TBB_TASK_GROUP_CONTEXT
00793 
00803     void __TBB_EXPORTED_METHOD change_group ( task_group_context& ctx );
00804 
00806 
00807     bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }
00808 
00810     bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
00811 #endif 
00812 
00813 #if __TBB_TASK_PRIORITY
00815     void set_group_priority ( priority_t p ) {  prefix().context->set_priority(p); }
00816 
00818     priority_t group_priority () const { return prefix().context->priority(); }
00819 
00820 #endif 
00821 
00822 private:
00823     friend class interface5::internal::task_base;
00824     friend class task_list;
00825     friend class internal::scheduler;
00826     friend class internal::allocate_root_proxy;
00827 #if __TBB_TASK_GROUP_CONTEXT
00828     friend class internal::allocate_root_with_context_proxy;
00829 #endif 
00830     friend class internal::allocate_continuation_proxy;
00831     friend class internal::allocate_child_proxy;
00832     friend class internal::allocate_additional_child_of_proxy;
00833 
00835 
00836     internal::task_prefix& prefix( internal::version_tag* = NULL ) const {
00837         return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
00838     }
00839 }; 
00840 
00842 
00843 class empty_task: public task {
00844      task* execute() {
00845         return NULL;
00846     }
00847 };
00848 
00850 
00852 class task_list: internal::no_copy {
00853 private:
00854     task* first;
00855     task** next_ptr;
00856     friend class task;
00857     friend class interface5::internal::task_base;
00858 public:
00860     task_list() : first(NULL), next_ptr(&first) {}
00861 
00863     ~task_list() {}
00864 
00866     bool empty() const {return !first;}
00867 
00869     void push_back( task& task ) {
00870         task.prefix().next = NULL;
00871         *next_ptr = &task;
00872         next_ptr = &task.prefix().next;
00873     }
00874 
00876     task& pop_front() {
00877         __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
00878         task* result = first;
00879         first = result->prefix().next;
00880         if( !first ) next_ptr = &first;
00881         return *result;
00882     }
00883 
00885     void clear() {
00886         first=NULL;
00887         next_ptr=&first;
00888     }
00889 };
00890 
00891 inline void interface5::internal::task_base::spawn( task& t ) {
00892     t.prefix().owner->spawn( t, t.prefix().next );
00893 }
00894 
00895 inline void interface5::internal::task_base::spawn( task_list& list ) {
00896     if( task* t = list.first ) {
00897         t->prefix().owner->spawn( *t, *list.next_ptr );
00898         list.clear();
00899     }
00900 }
00901 
00902 inline void task::spawn_root_and_wait( task_list& root_list ) {
00903     if( task* t = root_list.first ) {
00904         t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
00905         root_list.clear();
00906     }
00907 }
00908 
00909 } 
00910 
00911 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
00912     return &tbb::internal::allocate_root_proxy::allocate(bytes);
00913 }
00914 
00915 inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
00916     tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
00917 }
00918 
00919 #if __TBB_TASK_GROUP_CONTEXT
00920 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
00921     return &p.allocate(bytes);
00922 }
00923 
00924 inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
00925     p.free( *static_cast<tbb::task*>(task) );
00926 }
00927 #endif 
00928 
00929 inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
00930     return &p.allocate(bytes);
00931 }
00932 
00933 inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
00934     p.free( *static_cast<tbb::task*>(task) );
00935 }
00936 
00937 inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
00938     return &p.allocate(bytes);
00939 }
00940 
00941 inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
00942     p.free( *static_cast<tbb::task*>(task) );
00943 }
00944 
00945 inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
00946     return &p.allocate(bytes);
00947 }
00948 
00949 inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
00950     p.free( *static_cast<tbb::task*>(task) );
00951 }
00952 
00953 #endif