35 extern generic_scheduler* (*AllocateSchedulerPtr)( market& );
41 #if __TBB_TASK_GROUP_CONTEXT 42 context_state_propagation_mutex_type the_context_state_propagation_mutex;
44 uintptr_t the_context_state_propagation_epoch = 0;
55 #if __TBB_TASK_GROUP_CONTEXT 59 #if __TBB_TASK_PRIORITY 78 #if _MSC_VER && !defined(__INTEL_COMPILER) 81 #pragma warning(disable:4355) 88 , my_small_task_count(1)
90 , my_cilk_state(cs_none)
97 #if __TBB_PREVIEW_CRITICAL_TASKS 101 #if __TBB_TASK_PRIORITY 102 my_ref_top_priority = &m.my_global_top_priority;
103 my_ref_reload_epoch = &m.my_global_reload_epoch;
105 #if __TBB_TASK_GROUP_CONTEXT 107 my_context_state_propagation_epoch = the_context_state_propagation_epoch;
108 my_context_list_head.my_prev = &my_context_list_head;
109 my_context_list_head.my_next = &my_context_list_head;
110 ITT_SYNC_CREATE(&my_context_list_mutex, SyncType_Scheduler, SyncObj_ContextsList);
116 #if _MSC_VER && !defined(__INTEL_COMPILER) 118 #endif // warning 4355 is back 120 #if TBB_USE_ASSERT > 1 131 for (
size_t i = 0; i < H; ++i )
132 __TBB_ASSERT( tp[i] == poisoned_ptr,
"Task pool corrupted" );
133 for (
size_t i = H; i < T; ++i ) {
137 tp[i]->prefix().extra_state ==
es_task_proxy,
"task in the deque has invalid state" );
141 __TBB_ASSERT( tp[i] == poisoned_ptr,
"Task pool corrupted" );
152 #if defined(_MSC_VER)&&_MSC_VER<1400 && !_WIN64 154 __asm mov eax, fs:[0x18]
157 NT_TIB *pteb = (NT_TIB*)NtCurrentTeb();
159 __TBB_ASSERT( &pteb < pteb->StackBase && &pteb > pteb->StackLimit,
"invalid stack info in TEB" );
160 __TBB_ASSERT( stack_size >0,
"stack_size not initialized?" );
178 void *stack_base = &stack_size;
179 #if __linux__ && !__bg__ 183 size_t np_stack_size = 0;
184 void *stack_limit = NULL;
185 pthread_attr_t np_attr_stack;
186 if( 0 == pthread_getattr_np(pthread_self(), &np_attr_stack) ) {
187 if ( 0 == pthread_attr_getstack(&np_attr_stack, &stack_limit, &np_stack_size) ) {
189 pthread_attr_t attr_stack;
190 if ( 0 == pthread_attr_init(&attr_stack) ) {
191 if ( 0 == pthread_attr_getstacksize(&attr_stack, &stack_size) ) {
192 if ( np_stack_size < stack_size ) {
195 rsb_base = stack_limit;
196 stack_size = np_stack_size/2;
198 stack_limit = (
char*)stack_limit + stack_size;
204 pthread_attr_destroy(&attr_stack);
207 my_rsb_stealing_threshold = (uintptr_t)((
char*)rsb_base + stack_size/2);
210 stack_size = size_t((
char*)stack_base - (
char*)stack_limit);
212 pthread_attr_destroy(&np_attr_stack);
215 __TBB_ASSERT( stack_size>0,
"stack size must be positive" );
220 #if __TBB_TASK_GROUP_CONTEXT 226 void generic_scheduler::cleanup_local_context_list () {
228 bool wait_for_concurrent_destroyers_to_leave =
false;
229 uintptr_t local_count_snapshot = my_context_state_propagation_epoch;
230 my_local_ctx_list_update.store<
relaxed>(1);
238 if ( my_nonlocal_ctx_list_update.load<
relaxed>() || local_count_snapshot != the_context_state_propagation_epoch )
239 lock.
acquire(my_context_list_mutex);
243 while ( node != &my_context_list_head ) {
250 wait_for_concurrent_destroyers_to_leave =
true;
253 my_local_ctx_list_update.store<
release>(0);
255 if ( wait_for_concurrent_destroyers_to_leave )
262 #if __TBB_PREVIEW_CRITICAL_TASKS 265 #if __TBB_TASK_GROUP_CONTEXT 266 cleanup_local_context_list();
268 free_task<small_local_task>( *my_dummy_task );
270 #if __TBB_HOARD_NONLOCAL_TASKS 271 while(
task* t = my_nonlocal_free_list ) {
273 my_nonlocal_free_list = p.
next;
290 #if __TBB_COUNT_TASK_NODES 291 my_market->update_task_node_count( my_task_node_count );
305 #if __TBB_HOARD_NONLOCAL_TASKS 306 if( (t = my_nonlocal_free_list) ) {
309 my_nonlocal_free_list = t->
prefix().next;
319 __TBB_ASSERT( t,
"another thread emptied the my_return_list" );
325 #if __TBB_COUNT_TASK_NODES 326 ++my_task_node_count;
328 t->
prefix().origin =
this;
332 #if __TBB_PREFETCHING 335 #if __TBB_HOARD_NONLOCAL_TASKS 351 #if __TBB_COUNT_TASK_NODES 352 ++my_task_node_count;
354 t->
prefix().origin = NULL;
357 #if __TBB_TASK_GROUP_CONTEXT 380 task* old = s.my_return_list;
386 if(
as_atomic(s.my_return_list).compare_and_swap(&t, old )==old ) {
387 #if __TBB_PREFETCHING 404 if ( T + num_tasks <= my_arena_slot->my_task_pool_size )
422 for (
size_t i = H; i < T; ++i )
431 if ( new_size < 2 * my_arena_slot->my_task_pool_size )
437 for (
size_t i = H; i < T; ++i )
459 bool sync_prepare_done =
false;
475 else if( !sync_prepare_done ) {
478 sync_prepare_done =
true;
501 task** victim_task_pool;
502 bool sync_prepare_done =
false;
504 victim_task_pool = victim_arena_slot->
task_pool;
510 if( sync_prepare_done )
521 else if( !sync_prepare_done ) {
524 sync_prepare_done =
true;
528 #if __TBB_STEALING_ABORT_ON_CONTENTION 529 if(!backoff.bounded_pause()) {
545 "not really locked victim's task pool?" );
546 return victim_task_pool;
550 task** victim_task_pool )
const {
551 __TBB_ASSERT( victim_arena_slot,
"empty victim arena slot pointer" );
564 __TBB_ASSERT( ref_count>=0,
"attempt to spawn task whose parent has a ref_count<0" );
565 __TBB_ASSERT( ref_count!=0,
"attempt to spawn task whose parent has a ref_count==0 (forgot to set_ref_count?)" );
571 "backwards compatibility to TBB 2.0 tasks is broken" );
572 #if __TBB_TASK_ISOLATION 574 t->
prefix().isolation = isolation;
584 #if __TBB_TASK_PRIORITY 596 #if __TBB_PREVIEW_CRITICAL_TASKS 597 bool generic_scheduler::handled_as_critical(
task& t ) {
600 #if __TBB_TASK_ISOLATION 606 my_arena->my_critical_task_stream.push(
621 if ( &first->
prefix().next == &next ) {
630 #if __TBB_PREVIEW_CRITICAL_TASKS 631 if( !handled_as_critical( *first ) )
656 for(
task* t = first; ; t = t_next ) {
661 t_next = t->
prefix().next;
662 #if __TBB_PREVIEW_CRITICAL_TASKS 663 if( !handled_as_critical( *t ) )
669 if(
size_t num_tasks = tasks.
size() ) {
689 t->
prefix().parent = &dummy;
690 if( &t->
prefix().next==&next )
break;
691 #if __TBB_TASK_GROUP_CONTEXT 693 "all the root tasks in list must share the same context");
717 #if __TBB_TASK_PRIORITY 718 class auto_indicator :
no_copy {
719 volatile bool& my_indicator;
721 auto_indicator (
volatile bool& indicator ) : my_indicator(indicator) { my_indicator = true ;}
722 ~auto_indicator () { my_indicator =
false; }
730 #if __TBB_TASK_ISOLATION 732 bool tasks_omitted =
false;
733 while ( !t && T>H0 ) {
734 t =
get_task( --T, isolation, tasks_omitted );
735 if ( !tasks_omitted ) {
741 if ( t && tasks_omitted ) {
771 #if __TBB_TASK_ISOLATION 786 __TBB_ASSERT( my_offloaded_tasks,
"At least one task is expected to be already offloaded" );
793 auto_indicator indicator( my_pool_reshuffling_pending );
803 for (
size_t src = H0; src<T0; ++src ) {
807 intptr_t
p = priority( *t );
808 if ( p<*my_ref_top_priority ) {
809 offload_task( *t, p );
825 #if __TBB_TASK_ISOLATION 836 task **link = &offloaded_tasks;
837 while (
task *t = *link ) {
838 task** next_ptr = &t->
prefix().next_offloaded;
840 if ( priority(*t) >= top_priority ) {
844 task* next = *next_ptr;
853 if ( link == &offloaded_tasks ) {
854 offloaded_tasks = NULL;
856 offloaded_task_list_link = NULL;
863 offloaded_task_list_link = link;
866 size_t num_tasks = tasks.
size();
879 if ( t ) --num_tasks;
887 uintptr_t reload_epoch = *my_ref_reload_epoch;
890 || my_local_reload_epoch - reload_epoch > uintptr_t(-1)/2,
891 "Reload epoch counter overflow?" );
892 if ( my_local_reload_epoch == reload_epoch )
895 intptr_t top_priority = effective_reference_priority();
897 task *t = reload_tasks( my_offloaded_tasks, my_offloaded_task_list_tail_link,
__TBB_ISOLATION_ARG( top_priority, isolation ) );
914 my_local_reload_epoch = reload_epoch;
919 #if __TBB_TASK_ISOLATION 929 __TBB_ASSERT( !is_poisoned( result ),
"The poisoned task is going to be processed" );
930 #if __TBB_TASK_ISOLATION 935 if ( !omit && !
is_proxy( *result ) )
938 tasks_omitted =
true;
943 if ( !result || !
is_proxy( *result ) )
954 #if __TBB_TASK_ISOLATION 956 if ( !tasks_omitted )
966 free_task<small_task>( tp );
967 #if __TBB_TASK_ISOLATION 979 size_t H0 = (size_t)-1, T = T0;
981 bool task_pool_empty =
false;
990 if ( (intptr_t)H0 > (intptr_t)T ) {
994 && H0 == T + 1,
"victim/thief arbitration algorithm failure" );
997 task_pool_empty =
true;
999 }
else if ( H0 == T ) {
1002 task_pool_empty =
true;
1011 #if __TBB_TASK_ISOLATION 1012 result =
get_task( T, isolation, tasks_omitted );
1016 }
else if ( !tasks_omitted ) {
1024 }
while ( !result && !task_pool_empty );
1026 #if __TBB_TASK_ISOLATION 1027 if ( tasks_omitted ) {
1028 if ( task_pool_empty ) {
1091 free_task<no_cache_small_task>(tp);
1099 t->
prefix().owner =
this;
1110 task* result = NULL;
1113 bool tasks_omitted =
false;
1125 result = victim_pool[H-1];
1141 tasks_omitted =
true;
1142 }
else if ( !tasks_omitted ) {
1148 }
while ( !result );
1152 ITT_NOTIFY( sync_acquired, (
void*)((uintptr_t)&victim_slot+
sizeof( uintptr_t )) );
1154 if ( tasks_omitted ) {
1156 victim_pool[H-1] = NULL;
1161 #if __TBB_PREFETCHING 1165 if ( tasks_omitted )
1171 #if __TBB_PREVIEW_CRITICAL_TASKS 1179 if(
my_arena->my_critical_task_stream.empty(0) )
1181 task* critical_task = NULL;
1184 #if __TBB_TASK_ISOLATION 1186 critical_task =
my_arena->my_critical_task_stream.pop_specific( 0, start_lane, isolation );
1192 return critical_task;
1205 free_task<no_cache_small_task>(*tp);
1212 __TBB_ASSERT ( my_arena_index < my_arena->my_num_slots,
"arena slot index is out-of-bound" );
1216 "entering arena without tasks to share" );
1255 t.
prefix().ref_count = 1;
1256 #if __TBB_TASK_GROUP_CONTEXT 1259 #if __TBB_FP_CONTEXT 1260 s->default_context()->capture_fp_settings();
1264 context_state_propagation_mutex_type::scoped_lock
lock(the_context_state_propagation_mutex);
1265 s->
my_market->my_masters.push_front( *s );
1272 a->my_default_ctx = s->default_context();
1278 s->
my_market->register_master( s->master_exec_resource );
1281 #if __TBB_ARENA_OBSERVER 1282 __TBB_ASSERT( !a || a->my_observers.empty(),
"Just created arena cannot have any observers associated with it" );
1284 #if __TBB_SCHEDULER_OBSERVER 1285 the_global_observer_list.notify_entry_observers( s->my_last_global_observer,
false );
1293 #if __TBB_SCHEDULER_OBSERVER 1295 the_global_observer_list.notify_exit_observers( s.my_last_global_observer,
true );
1321 #if __TBB_ARENA_OBSERVER 1323 a->my_observers.notify_exit_observers( my_last_local_observer,
false );
1325 #if __TBB_SCHEDULER_OBSERVER 1326 the_global_observer_list.notify_exit_observers( my_last_global_observer,
false );
1329 m->unregister_master( master_exec_resource );
1333 #if __TBB_STATISTICS 1338 #if __TBB_TASK_GROUP_CONTEXT 1340 default_context()->~task_group_context();
1343 context_state_propagation_mutex_type::scoped_lock
lock(the_context_state_propagation_mutex);
1353 return m->
release( a != NULL, blocking_terminate );
task * get_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Get a task from the local pool.
static const intptr_t mailbox_bit
market * my_market
The market I am in.
generic_scheduler * allocate_scheduler(market &m)
void free_scheduler()
Destroy and deallocate this scheduler object.
bool outermost
Indicates that a scheduler is on outermost level.
scheduler_properties my_properties
atomic< unsigned > my_limit
The maximal number of currently busy slots.
static const intptr_t num_priority_levels
static void cleanup_worker(void *arg, bool worker)
Perform necessary cleanup when a worker thread finishes.
int depth
Obsolete. Used to be scheduling depth before TBB 2.2.
void release_task_pool() const
Unlocks the local task pool.
virtual void local_wait_for_all(task &parent, task *child)=0
T __TBB_load_relaxed(const volatile T &location)
size_t prepare_task_pool(size_t n)
Makes sure that the task pool can accommodate at least n more elements.
static const intptr_t location_mask
static const unsigned ref_external
Reference increment values for externals and workers.
void unlock_task_pool(arena_slot *victim_arena_slot, task **victim_task_pool) const
Unlocks victim's task pool.
void local_spawn_root_and_wait(task *first, task *&next)
void init_stack_info()
Sets up the data necessary for the stealing limiting heuristics.
task_group_context * context()
This method is deprecated and will be removed in the future.
tbb::task * next
"next" field for list of task
task * my_dummy_task
Fake root task created by slave threads.
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
bool is_quiescent_local_task_pool_empty() const
task * extract_task()
Returns a pointer to the encapsulated task or NULL, and frees proxy if necessary. ...
void __TBB_store_relaxed(volatile T &location, V value)
void Scheduler_OneTimeInitialization(bool itt_present)
Defined in scheduler.cpp.
Class that implements exponential backoff.
void spin_wait_until_eq(const volatile T &location, const U value)
Spin UNTIL the value of the variable is equal to a given value.
#define __TBB_ISOLATION_EXPR(isolation)
void commit_spawned_tasks(size_t new_tail)
Makes newly spawned tasks visible to thieves.
intptr_t reference_count
A reference count.
static bool is_shared(intptr_t tat)
True if the proxy is stored both in its sender's pool and in the destination mailbox.
task object is freshly allocated or recycled.
static void sign_on(generic_scheduler *s)
Register TBB scheduler instance in thread-local storage.
void deallocate_task(task &t)
Return task object to the memory allocator.
#define ITT_SYNC_CREATE(obj, type, name)
bool is_task_pool_published() const
void copy_memory(T *dst) const
Copies the contents of the vector into the dst array.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t new_size
task **__TBB_atomic task_pool
void const char const char int ITT_FORMAT __itt_group_sync s
mail_outbox & mailbox(affinity_id id)
Get reference to mailbox corresponding to given affinity_id.
#define __TBB_get_object_ref(class_name, member_name, member_addr)
Returns address of the object containing a member with the given name and address.
void reset_task_pool_and_leave()
Resets head and tail indices to 0, and leaves task pool.
void fill_with_canary_pattern(size_t, size_t)
#define __TBB_FetchAndDecrementWrelease(P)
#define __TBB_cl_prefetch(p)
#define GATHER_STATISTIC(x)
static bool is_proxy(const task &t)
True if t is a task_proxy.
void spawn(task &first, task *&next) __TBB_override
For internal use only.
void poison_pointer(T *__TBB_atomic &)
__TBB_atomic size_t tail
Index of the element following the last ready task in the deque.
const size_t task_prefix_reservation_size
Number of bytes reserved for a task prefix.
task * prepare_for_spawning(task *t)
Checks if t is affinitized to another thread, and if so, bundles it as proxy.
static task * plugged_return_list()
Special value used to mark my_return_list as not taking any more entries.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p sync_cancel
auto first(Container &c) -> decltype(begin(c))
static bool is_version_3_task(task &t)
void push_back(const T &val)
void pause()
Pause for a while.
void acquire_task_pool() const
Locks the local task pool.
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
int my_num_workers_requested
The number of workers that are currently requested from the resource manager.
void enqueue(task &, void *reserved) __TBB_override
For internal use only.
void free_nonlocal_small_task(task &t)
Free a small task t that that was allocated by a different scheduler.
static const size_t min_task_pool_size
void attach_arena(arena *, size_t index, bool is_master)
A scheduler with a customized evaluation loop.
void * __TBB_get_bsp()
Retrieves the current RSE backing store pointer. IA64 specific.
size_t worker_stack_size() const
Returns the requested stack size of worker threads.
bool type
Indicates that a scheduler acts as a master or a worker.
void advertise_new_work()
If necessary, raise a flag that there is new job in arena.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
static generic_scheduler * create_master(arena *a)
Initialize a scheduler for a master thread.
unsigned short affinity_id
An id as used for specifying affinity.
static market & global_market(bool is_public, unsigned max_num_workers=0, size_t stack_size=0)
Factory method creating new market object.
task ** lock_task_pool(arena_slot *victim_arena_slot) const
Locks victim's task pool, and returns pointer to it. The pointer can be NULL.
atomic< T > & as_atomic(T &t)
static generic_scheduler * create_worker(market &m, size_t index)
Initialize a scheduler for a worker thread.
task * steal_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempts to steal a task from a randomly chosen thread/scheduler.
task & allocate_task(size_t number_of_bytes, __TBB_CONTEXT_ARG(task *parent, task_group_context *context))
Allocate task object, either from the heap or a free list.
scheduler * owner
Obsolete. The scheduler that owns the task.
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
void acquire(spin_mutex &m)
Acquire lock.
static const kind_type binding_required
affinity_id my_affinity_id
The mailbox id assigned to this scheduler.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
task_proxy * pop(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Get next piece of mail, or NULL if mailbox is empty.
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
task * my_return_list
List of small tasks that have been returned to this scheduler by other schedulers.
bool is_worker() const
True if running on a worker thread, false otherwise.
task * steal_task_from(__TBB_ISOLATION_ARG(arena_slot &victim_arena_slot, isolation_tag isolation))
Steal task from another scheduler's ready pool.
Memory prefix to a task object.
static const size_t quick_task_size
If sizeof(task) is <=quick_task_size, it is handled on a free list instead of malloc'd.
Represents acquisition of a mutex.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
void assert_task_valid(const task *)
size_t my_task_pool_size
Capacity of the primary task pool (number of elements - pointers to task).
void allocate_task_pool(size_t n)
virtual ~scheduler()=0
Pure virtual destructor;.
#define __TBB_control_consistency_helper()
state_type state() const
Current execution state.
uintptr_t my_stealing_threshold
Position in the call stack specifying its maximal filling when stealing is still allowed.
#define __TBB_CONTEXT_ARG(arg1, context)
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
void leave_task_pool()
Leave the task pool.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
void push(task_proxy *t)
Push task_proxy onto the mailbox queue of another thread.
void __TBB_store_with_release(volatile T &location, V value)
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
task * get_mailbox_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempt to get a task from the mailbox.
unsigned char state
A task::state_type, stored as a byte for compactness.
Used to form groups of tasks.
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
#define __TBB_cl_evict(p)
void enqueue_task(task &, intptr_t, FastRandom &)
enqueue a task into starvation-resistance queue
task is in ready pool, or is going to be put there, or was just taken off.
void spawn_root_and_wait(task &first, task *&next) __TBB_override
For internal use only.
virtual void __TBB_EXPORTED_METHOD note_affinity(affinity_id id)
Invoked by scheduler to notify task that it ran on unexpected thread.
static void sign_off(generic_scheduler *s)
Unregister TBB scheduler instance from thread-local storage.
task object is on free list, or is going to be put there, or was just taken off.
#define ITT_NOTIFY(name, obj)
intptr_t my_priority
Priority level of the task group (in normalized representation)
tbb::task * parent
The task whose reference count includes me.
generic_scheduler(market &)
static const kind_type detached
bool is_critical(task &t)
static const kind_type dying
Base class for types that should not be copied or assigned.
Vector that grows without reallocations, and stores items in the reverse order.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
static const intptr_t pool_bit
Set if ref_count might be changed by another thread. Used for debugging.
intptr_t isolation_tag
A tag for task isolation.
Base class for user-defined tasks.
Work stealing task scheduler.
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
isolation_tag isolation
The tag used for task isolation.
task * my_free_list
Free list of small tasks that can be reused.
void publish_task_pool()
Used by workers to enter the task pool.
#define __TBB_ISOLATION_ARG(arg1, isolation)
const isolation_tag no_isolation
context_list_node_t * my_next
generic_scheduler *(* AllocateSchedulerPtr)(market &)
Pointer to the scheduler factory function.
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
bool is_local_task_pool_quiescent() const
void commit_relocated_tasks(size_t new_tail)
Makes relocated tasks visible to thieves and releases the local task pool.
bool cleanup_master(bool blocking_terminate)
Perform necessary cleanup when a master thread stops using TBB.
void atomic_fence()
Sequentially consistent full memory fence.
__TBB_atomic intptr_t my_small_task_count
Number of small tasks that have been allocated by this scheduler.
T max(const T &val1, const T &val2)
Utility template function returning greater of the two values.
void const char const char int ITT_FORMAT __itt_group_sync p
Smart holder for the empty task class with automatic destruction.
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
mail_outbox * outbox
Mailbox to which this was mailed.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp end
task **__TBB_atomic task_pool_ptr
void local_spawn(task *first, task *&next)
bool is_quiescent_local_task_pool_reset() const
unsigned short get()
Get a random number.
void on_thread_leaving()
Notification that worker or master leaves its arena.
__TBB_atomic size_t head
Index of the first ready task in the deque.
scheduler * origin
The scheduler that allocated the task, or NULL if the task is big.
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
bool release(bool is_public, bool blocking_terminate)
Decrements market's refcount and destroys it in the end.
uintptr_t my_state
Internal state (combination of state flags, currently only may_have_children).
void assert_task_pool_valid() const
Set if the task has been stolen.
bool recipient_is_idle()
True if thread that owns this mailbox is looking for work.
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.