27 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD 32 #include "../rml/include/rml_tbb.h" 41 class task_group_context;
42 class allocate_root_with_context_proxy;
59 #if __TBB_TASK_PRIORITY 60 volatile intptr_t my_top_priority;
73 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD 79 #if __TBB_PREVIEW_CRITICAL_TASKS 101 #if __TBB_ARENA_OBSERVER 102 observer_list my_observers;
106 #if __TBB_TASK_PRIORITY 107 intptr_t my_bottom_priority;
113 uintptr_t my_reload_epoch;
116 task* my_orphaned_tasks;
136 #if !__TBB_FP_CONTEXT 141 #if __TBB_TASK_GROUP_CONTEXT 154 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 155 enum concurrency_mode {
163 concurrency_mode my_concurrency_mode;
178 void restore_priority_if_need();
190 arena (
market&,
unsigned max_num_workers,
unsigned num_reserved_slots );
193 static arena& allocate_arena(
market&,
unsigned num_slots,
unsigned num_reserved_slots );
196 return max(2u, num_slots);
205 __TBB_ASSERT( 0<
id,
"affinity id must be positive integer" );
217 static const pool_state_t SNAPSHOT_EMPTY = 0;
220 static const pool_state_t SNAPSHOT_FULL = pool_state_t(-1);
223 static const unsigned ref_external_bits = 12;
226 static const unsigned ref_external = 1;
227 static const unsigned ref_worker = 1<<ref_external_bits;
238 template<arena::new_work_type work_type>
void advertise_new_work();
242 bool is_out_of_work();
251 template<
unsigned ref_param>
252 inline void on_thread_leaving ( );
255 void dump_arena_statistics ();
259 #if __TBB_TASK_PRIORITY 262 inline bool may_have_tasks (
generic_scheduler*,
bool& tasks_present,
bool& dequeuing_possible );
268 #if __TBB_COUNT_TASK_NODES 269 intptr_t workers_task_node_count();
274 bool has_enqueued_tasks();
276 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 277 bool recall_by_mandatory_request()
const {
279 return my_market->my_mandatory_num_requested && my_concurrency_mode==cm_normal;
283 bool must_have_concurrency()
const {
285 ( my_concurrency_mode==cm_enforced_local || my_concurrency_mode==cm_enforced_global );
288 static const size_t out_of_arena = ~size_t(0);
290 template <
bool as_worker>
293 size_t occupy_free_slot_in_range(
generic_scheduler& s,
size_t lower,
size_t upper );
299 template<
unsigned ref_param>
358 #if __TBB_STATISTICS_EARLY_DUMP 364 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 375 is_out = is_out_of_work();
390 if( work_type == work_enqueued ) {
391 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 393 if( my_concurrency_mode!=cm_enforced_global ) {
394 if(
my_market->mandatory_concurrency_enable(
this ) ) {
402 my_concurrency_mode = cm_enforced_local;
412 else if( work_type == wakeup ) {
423 if( is_busy_or_empty(snapshot) ) {
427 if( snapshot!=SNAPSHOT_EMPTY ) {
439 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 440 if( work_type == work_spawned ) {
441 if( my_concurrency_mode!=cm_normal ) {
442 switch( my_concurrency_mode ) {
443 case cm_enforced_local:
451 my_concurrency_mode = cm_normal;
453 case cm_enforced_global:
454 my_market->mandatory_concurrency_disable(
this );
455 restore_priority_if_need();
concurrent_monitor my_exit_monitors
Waiting object for master threads that cannot join the arena.
atomic< unsigned > my_limit
The maximal number of currently busy slots.
static bool is_busy_or_empty(pool_state_t s)
No tasks to steal or snapshot is being taken.
static const intptr_t num_priority_levels
new_work_type
Types of work advertised by advertise_new_work()
void const char const char int ITT_FORMAT __itt_group_sync s
mail_outbox & mailbox(affinity_id id)
Get reference to mailbox corresponding to given affinity_id.
padded< arena_base > base_type
#define GATHER_STATISTIC(x)
static int unsigned num_arena_slots(unsigned num_slots)
int my_num_workers_requested
The number of workers that are currently requested from the resource manager.
unsigned my_num_reserved_slots
The number of reserved slots (can be occupied only by masters).
unsigned num_workers_active()
The number of workers active in the arena.
void advertise_new_work()
If necessary, raise a flag that there is new job in arena.
unsigned short affinity_id
An id as used for specifying affinity.
Pads type T to fill out to a multiple of cache line size.
market * my_market
The market that owns this arena.
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
task_stream< num_priority_levels > my_task_stream
Task pool for the tasks scheduled via task::enqueue() method.
unsigned my_num_workers_soft_limit
Current application-imposed limit on the number of workers (see set_active_num_workers()) ...
argument_integer_type modulo_power_of_two(argument_integer_type arg, divisor_integer_type divisor)
A function to compute arg modulo divisor where divisor is a power of 2.
void try_destroy_arena(arena *, uintptr_t aba_epoch)
Removes the arena from the market's list.
Used to form groups of tasks.
A fast random number generator.
tbb::atomic< uintptr_t > my_pool_state
Current task pool state and estimate of available tasks amount.
static int allocation_size(unsigned num_slots)
unsigned my_num_workers_allotted
The number of workers that have been marked out by the resource manager to service the arena...
value_type compare_and_swap(value_type value, value_type comparand)
The structure of an arena, except the array of slots.
Base class for user-defined tasks.
Work stealing task scheduler.
unsigned my_max_num_workers
The number of workers requested by the master thread owning the arena.
void adjust_demand(arena &, int delta)
Request that arena's need in workers should be adjusted.
cpu_ctl_env my_cpu_ctl_env
FPU control settings of arena's master thread captured at the moment of arena instantiation.
atomic< unsigned > my_references
Reference counter for the arena.
Class representing where mail is put.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
void atomic_fence()
Sequentially consistent full memory fence.
T max(const T &val1, const T &val2)
Utility template function returning greater of the two values.
void on_thread_leaving()
Notification that worker or master leaves its arena.
The container for "fairness-oriented" aka "enqueued" tasks.
uintptr_t my_aba_epoch
ABA prevention marker.
unsigned my_num_slots
The number of slots in the arena.