17 #ifndef _TBB_custom_scheduler_H 18 #define _TBB_custom_scheduler_H 38 #if __TBB_x86_32||__TBB_x86_64 41 static const bool has_slow_atomic =
false;
51 template<
typename SchedulerTraits>
74 if( SchedulerTraits::itt_possible )
76 if( SchedulerTraits::has_slow_atomic && p.
ref_count==1 )
86 if( SchedulerTraits::itt_possible )
91 #if __TBB_TASK_ISOLATION 99 #if __TBB_RECYCLE_TO_ENQUEUE 100 if (p.
state==task::to_enqueue) {
103 my_arena->enqueue_task(s, 0, my_random );
106 if( bypass_slot==NULL )
108 #if __TBB_PREVIEW_CRITICAL_TASKS 110 local_spawn( bypass_slot, bypass_slot->
prefix().next );
115 local_spawn( &s, s.
prefix().next );
121 std::memset(p, 0,
sizeof(scheduler_type));
122 scheduler_type* s =
new(
p ) scheduler_type( m );
137 template<
typename SchedulerTraits>
140 bool outermost_worker_level = worker_outermost_level();
141 bool outermost_dispatch_level = outermost_worker_level || master_outermost_level();
142 bool can_steal_here = can_steal();
143 my_inbox.set_is_idle(
true );
144 #if __TBB_HOARD_NONLOCAL_TASKS 147 #if __TBB_TASK_PRIORITY 148 if ( outermost_dispatch_level ) {
149 if ( intptr_t skipped_priority = my_arena->my_skipped_fifo_priority ) {
153 if ( my_arena->my_skipped_fifo_priority.compare_and_swap(0, skipped_priority) == skipped_priority
154 && skipped_priority > my_arena->my_top_priority )
156 my_market->update_arena_priority( *my_arena, skipped_priority );
163 size_t n = my_arena->my_limit-1;
167 for(
int failure_count = -static_cast<int>(SchedulerTraits::itt_possible);; ++failure_count) {
170 if( completion_ref_count==1 ) {
171 if( SchedulerTraits::itt_possible ) {
172 if( failure_count!=-1 ) {
173 ITT_NOTIFY(sync_prepare, &completion_ref_count);
177 ITT_NOTIFY(sync_acquired, &completion_ref_count);
187 if ( outermost_worker_level && (my_arena->my_num_workers_allotted < my_arena->num_workers_active()
188 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 189 || my_arena->recall_by_mandatory_request()
192 if( SchedulerTraits::itt_possible && failure_count != -1 )
196 #if __TBB_TASK_PRIORITY 197 const int p =
int(my_arena->my_top_priority);
199 static const int p = 0;
203 if ( n && !my_inbox.empty() ) {
205 #if __TBB_TASK_ISOLATION 209 if ( isolation !=
no_isolation && !t && !my_inbox.empty()
210 && my_inbox.is_idle_state(
true ) ) {
213 my_inbox.set_is_idle(
false );
223 !my_arena->my_task_stream.empty(p) && (
224 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD 227 t = my_arena->my_task_stream.pop( p, my_arena_slot->hint_for_pop )
230 ITT_NOTIFY(sync_acquired, &my_arena->my_task_stream);
233 #if __TBB_TASK_PRIORITY 236 __TBB_ASSERT( !is_proxy(*t),
"The proxy task cannot be offloaded" );
243 #if __TBB_PREVIEW_CRITICAL_TASKS 246 ITT_NOTIFY(sync_acquired, &my_arena->my_critical_task_stream);
249 #endif // __TBB_PREVIEW_CRITICAL_TASKS 254 #if __TBB_ARENA_OBSERVER 255 my_arena->my_observers.notify_entry_observers( my_last_local_observer, is_worker() );
257 #if __TBB_SCHEDULER_OBSERVER 258 the_global_observer_list.notify_entry_observers( my_last_global_observer, is_worker() );
260 if ( SchedulerTraits::itt_possible && failure_count != -1 ) {
268 if( SchedulerTraits::itt_possible && failure_count==-1 ) {
278 const int failure_threshold = 2*
int(n+1);
279 if( failure_count>=failure_threshold ) {
283 failure_count = failure_threshold;
286 #if __TBB_TASK_PRIORITY 288 if ( my_arena->my_orphaned_tasks ) {
290 ++my_arena->my_abandonment_epoch;
291 task* orphans = (
task*)__TBB_FetchAndStoreW( &my_arena->my_orphaned_tasks, 0 );
295 my_local_reload_epoch--;
296 t = reload_tasks( orphans, link,
__TBB_ISOLATION_ARG( effective_reference_priority(), isolation ) );
298 *link = my_offloaded_tasks;
299 if ( !my_offloaded_tasks )
300 my_offloaded_task_list_tail_link = link;
301 my_offloaded_tasks = orphans;
303 __TBB_ASSERT( !my_offloaded_tasks == !my_offloaded_task_list_tail_link, NULL );
305 if( SchedulerTraits::itt_possible )
307 __TBB_ASSERT( !is_proxy(*t),
"The proxy task cannot be offloaded" );
313 const int yield_threshold = 100;
314 if( yield_count++ >= yield_threshold ) {
317 #if __TBB_TASK_PRIORITY 318 if( outermost_worker_level || my_arena->my_top_priority > my_arena->my_bottom_priority ) {
319 if ( my_arena->is_out_of_work() && outermost_worker_level ) {
321 if ( outermost_worker_level && my_arena->is_out_of_work() ) {
323 if( SchedulerTraits::itt_possible )
327 #if __TBB_TASK_PRIORITY 329 if ( my_offloaded_tasks ) {
332 my_local_reload_epoch--;
337 if ( !outermost_worker_level && *my_ref_top_priority > my_arena->my_top_priority ) {
339 my_ref_top_priority = &my_arena->my_top_priority;
341 __TBB_ASSERT(my_ref_reload_epoch == &my_arena->my_reload_epoch, NULL);
347 n = my_arena->my_limit-1;
350 if ( my_inbox.is_idle_state(
true ) )
351 my_inbox.set_is_idle(
false );
355 template<
typename SchedulerTraits>
360 assert_task_pool_valid();
363 if( SchedulerTraits::itt_possible )
365 #if __TBB_TASK_GROUP_CONTEXT 374 parents_work_done = 1,
378 #if __TBB_TASK_PRIORITY 380 volatile intptr_t *old_ref_top_priority = my_ref_top_priority;
383 volatile uintptr_t *old_ref_reload_epoch = my_ref_reload_epoch;
385 task* old_innermost_running_task = my_innermost_running_task;
388 __TBB_ASSERT( my_properties.outermost || my_innermost_running_task!=my_dummy_task,
"The outermost property should be set out of a dispatch loop" );
389 my_properties.outermost &= my_innermost_running_task==my_dummy_task;
390 #if __TBB_TASK_ISOLATION 391 isolation_tag isolation = my_innermost_running_task->prefix().isolation;
393 if( master_outermost_level() ) {
395 quit_point = &parent == my_dummy_task ? all_local_work_done : parents_work_done;
397 quit_point = parents_work_done;
398 #if __TBB_TASK_PRIORITY 399 if ( &parent != my_dummy_task ) {
403 my_ref_top_priority = &parent.
prefix().context->my_priority;
404 my_ref_reload_epoch = &my_arena->my_reload_epoch;
405 if(my_ref_reload_epoch != old_ref_reload_epoch)
406 my_local_reload_epoch = *my_ref_reload_epoch-1;
414 #if __TBB_TASK_ISOLATION 418 t->prefix().isolation = isolation;
422 #if TBB_USE_EXCEPTIONS 439 #if __TBB_TASK_ISOLATION 441 "A task from another isolated region is going to be executed" );
444 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_ASSERT 445 assert_context_valid(t->prefix().context);
446 if ( !t->prefix().context->my_cancellation_requested )
450 assert_task_pool_valid();
451 #if __TBB_PREVIEW_CRITICAL_TASKS 457 "Received task must be critical one" );
458 ITT_NOTIFY(sync_acquired, &my_arena->my_critical_task_stream);
460 my_innermost_running_task = t;
461 local_spawn(t, t->prefix().next);
465 #if __TBB_TASK_PRIORITY 466 intptr_t
p = priority(*t);
467 if ( p != *my_ref_top_priority
469 assert_priority_valid(p);
470 if ( p != my_arena->my_top_priority ) {
471 my_market->update_arena_priority( *my_arena, p );
473 if ( p < effective_reference_priority() ) {
474 if ( !my_offloaded_tasks ) {
475 my_offloaded_task_list_tail_link = &t->prefix().next_offloaded;
478 *my_offloaded_task_list_tail_link = NULL;
480 offload_task( *t, p );
481 if ( is_task_pool_published() ) {
490 goto stealing_ground;
494 #if __TBB_PREVIEW_CRITICAL_TASKS 499 my_innermost_running_task = t;
502 #if __TBB_TASK_GROUP_CONTEXT 503 if ( !t->prefix().context->my_cancellation_requested )
507 GATHER_STATISTIC( my_counters.avg_arena_concurrency += my_arena->num_workers_active() );
508 GATHER_STATISTIC( my_counters.avg_assigned_workers += my_arena->my_num_workers_allotted );
509 #if __TBB_TASK_PRIORITY 511 GATHER_STATISTIC( my_counters.avg_market_prio += my_market->my_global_top_priority );
513 ITT_STACK(SchedulerTraits::itt_possible, callee_enter, t->prefix().context->itt_caller);
514 #if __TBB_PREVIEW_CRITICAL_TASKS 515 internal::critical_task_count_guard tc_guard(my_properties, *t);
518 ITT_STACK(SchedulerTraits::itt_possible, callee_leave, t->prefix().context->itt_caller);
521 "if task::execute() returns task, it must be marked as allocated" );
526 if (next_affinity != 0 && next_affinity != my_affinity_id)
531 assert_task_pool_valid();
532 switch( t->state() ) {
534 task*
s = t->parent();
536 __TBB_ASSERT( t->prefix().ref_count==0,
"Task still has children after it has been executed" );
540 free_task<no_hint>( *t );
542 assert_task_pool_valid();
548 #if __TBB_RECYCLE_TO_ENQUEUE 550 case task::to_enqueue:
552 __TBB_ASSERT( t_next != t,
"a task returned from method execute() can not be recycled in another way" );
556 assert_task_pool_valid();
560 __TBB_ASSERT( t_next,
"reexecution requires that method execute() return another task" );
561 __TBB_ASSERT( t_next != t,
"a task returned from method execute() can not be recycled in another way" );
564 local_spawn( t, t->prefix().next );
565 assert_task_pool_valid();
572 __TBB_ASSERT(
false,
"task is in READY state upon return from method execute()" );
585 assert_task_pool_valid();
586 if ( parent.
prefix().ref_count == quit_point ) {
587 __TBB_ASSERT( quit_point != all_local_work_done, NULL );
592 if ( is_task_pool_published() ) {
595 __TBB_ASSERT( is_quiescent_local_task_pool_reset(), NULL );
598 assert_task_pool_valid();
605 #if __TBB_TASK_PRIORITY 608 #if __TBB_HOARD_NONLOCAL_TASKS 610 for (; my_nonlocal_free_list; my_nonlocal_free_list = t ) {
611 t = my_nonlocal_free_list->prefix().next;
612 free_nonlocal_small_task( *my_nonlocal_free_list );
615 if ( quit_point == all_local_work_done ) {
616 __TBB_ASSERT( !is_task_pool_published() && is_quiescent_local_task_pool_reset(), NULL );
618 my_innermost_running_task = old_innermost_running_task;
619 my_properties = old_properties;
620 #if __TBB_TASK_PRIORITY 621 my_ref_top_priority = old_ref_top_priority;
622 if(my_ref_reload_epoch != old_ref_reload_epoch)
623 my_local_reload_epoch = *old_ref_reload_epoch-1;
624 my_ref_reload_epoch = old_ref_reload_epoch;
637 #if TBB_USE_EXCEPTIONS 640 TbbCatchAll( t->prefix().context );
643 #if __TBB_RECYCLE_TO_ENQUEUE 645 || t->state() == task::to_enqueue
650 if( SchedulerTraits::itt_possible )
653 if( SchedulerTraits::itt_possible )
654 ITT_NOTIFY(sync_acquired, &t->prefix().ref_count);
663 my_innermost_running_task = old_innermost_running_task;
664 my_properties = old_properties;
665 #if __TBB_TASK_PRIORITY 666 my_ref_top_priority = old_ref_top_priority;
667 if(my_ref_reload_epoch != old_ref_reload_epoch)
668 my_local_reload_epoch = *old_ref_reload_epoch-1;
669 my_ref_reload_epoch = old_ref_reload_epoch;
672 if ( parent.
prefix().ref_count != parents_work_done ) {
675 "Worker thread exits nested dispatch loop prematurely" );
678 parent.
prefix().ref_count = 0;
683 #if __TBB_TASK_GROUP_CONTEXT 686 if ( parent_ctx->my_cancellation_requested ) {
688 if ( master_outermost_level() && parent_ctx == default_context() ) {
691 parent_ctx->my_cancellation_requested = 0;
699 context_guard.restore_default();
700 TbbRethrowException( pe );
703 __TBB_ASSERT(!is_worker() || !CancellationInfoPresent(*my_dummy_task),
704 "Worker's dummy task context modified");
705 __TBB_ASSERT(!master_outermost_level() || !CancellationInfoPresent(*my_dummy_task),
706 "Unexpected exception or cancellation data in the master's dummy task");
708 assert_task_pool_valid();
static const intptr_t num_priority_levels
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
#define __TBB_ISOLATION_EXPR(isolation)
intptr_t reference_count
A reference count.
task object is freshly allocated or recycled.
#define ITT_SYNC_CREATE(obj, type, name)
void local_wait_for_all(task &parent, task *child) __TBB_override
Scheduler loop that dispatches tasks.
void const char const char int ITT_FORMAT __itt_group_sync s
static const bool itt_possible
Traits classes for scheduler.
#define __TBB_FetchAndDecrementWrelease(P)
#define GATHER_STATISTIC(x)
void poison_pointer(T *__TBB_atomic &)
internal::tbb_exception_ptr exception_container_type
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p sync_cancel
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
void wait_for_all(task &parent, task *child) __TBB_override
Entry point from client code to the scheduler loop that dispatches tasks.
task is running, and will be destroyed after method execute() completes.
A scheduler with a customized evaluation loop.
void tally_completion_of_predecessor(task &s, __TBB_ISOLATION_ARG(task *&bypass_slot, isolation_tag isolation))
Decrements ref_count of a predecessor.
unsigned short affinity_id
An id as used for specifying affinity.
#define ITT_STACK(precond, name, obj)
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
virtual task * execute()=0
Should be overridden by derived classes.
custom_scheduler< SchedulerTraits > scheduler_type
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
#define __TBB_fallthrough
void set_ctx(__TBB_CONTEXT_ARG1(task_group_context *))
Memory prefix to a task object.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
void assert_task_valid(const task *)
bool ConcurrentWaitsEnabled(task &t)
#define __TBB_control_consistency_helper()
state_type state() const
Current execution state.
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
task * receive_or_steal_task(__TBB_ISOLATION_ARG(__TBB_atomic reference_count &completion_ref_count, isolation_tag isolation)) __TBB_override
Try getting a task from the mailbox or stealing from another scheduler.
task to be recycled as continuation
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
Used to form groups of tasks.
unsigned char state
A task::state_type, stored as a byte for compactness.
static const bool has_slow_atomic
Bit-field representing properties of a sheduler.
task is in ready pool, or is going to be put there, or was just taken off.
#define __TBB_CONTEXT_ARG1(context)
#define ITT_NOTIFY(name, obj)
custom_scheduler(market &m)
bool is_critical(task &t)
int ref_count() const
The internal reference count.
static generic_scheduler * allocate_scheduler(market &m)
Set if ref_count might be changed by another thread. Used for debugging.
intptr_t isolation_tag
A tag for task isolation.
Base class for user-defined tasks.
Work stealing task scheduler.
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
isolation_tag isolation
The tag used for task isolation.
#define __TBB_ISOLATION_ARG(arg1, isolation)
const isolation_tag no_isolation
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
void const char const char int ITT_FORMAT __itt_group_sync p
void reset_extra_state(task *t)
void assert_task_pool_valid() const
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.