41 #include "kmp_error.h"
42 #include "kmp_stats.h"
44 #define MAX_MESSAGE 512
65 if (__kmp_ignore_mppbeg() == FALSE) {
66 __kmp_internal_begin();
68 KC_TRACE( 10, (
"__kmpc_begin: called\n" ) );
86 if (__kmp_ignore_mppend() == FALSE) {
87 KC_TRACE( 10, (
"__kmpc_end: called\n" ) );
88 KA_TRACE( 30, (
"__kmpc_end\n" ));
90 __kmp_internal_end_thread( -1 );
116 kmp_int32 gtid = __kmp_entry_gtid();
118 KC_TRACE( 10, (
"__kmpc_global_thread_num: T#%d\n", gtid ) );
139 KC_TRACE( 10, (
"__kmpc_global_num_threads: num_threads = %d\n", __kmp_nth ) );
141 return TCR_4(__kmp_nth);
153 KC_TRACE( 10, (
"__kmpc_bound_thread_num: called\n" ) );
154 return __kmp_tid_from_gtid( __kmp_entry_gtid() );
165 KC_TRACE( 10, (
"__kmpc_bound_num_threads: called\n" ) );
167 return __kmp_entry_thread() -> th.th_team -> t.t_nproc;
189 if (__kmp_par_range == 0) {
196 semi2 = strchr(semi2,
';');
200 semi2 = strchr(semi2 + 1,
';');
204 if (__kmp_par_range_filename[0]) {
205 const char *name = semi2 - 1;
206 while ((name > loc->
psource) && (*name !=
'/') && (*name !=
';')) {
209 if ((*name ==
'/') || (*name ==
';')) {
212 if (strncmp(__kmp_par_range_filename, name, semi2 - name)) {
213 return __kmp_par_range < 0;
216 semi3 = strchr(semi2 + 1,
';');
217 if (__kmp_par_range_routine[0]) {
218 if ((semi3 != NULL) && (semi3 > semi2)
219 && (strncmp(__kmp_par_range_routine, semi2 + 1, semi3 - semi2 - 1))) {
220 return __kmp_par_range < 0;
223 if (sscanf(semi3 + 1,
"%d", &line_no) == 1) {
224 if ((line_no >= __kmp_par_range_lb) && (line_no <= __kmp_par_range_ub)) {
225 return __kmp_par_range > 0;
227 return __kmp_par_range < 0;
243 return __kmp_entry_thread() -> th.th_root -> r.r_active;
258 KA_TRACE( 20, (
"__kmpc_push_num_threads: enter T#%d num_threads=%d\n",
259 global_tid, num_threads ) );
261 __kmp_push_num_threads( loc, global_tid, num_threads );
265 __kmpc_pop_num_threads(
ident_t *loc, kmp_int32 global_tid )
267 KA_TRACE( 20, (
"__kmpc_pop_num_threads: enter\n" ) );
276 __kmpc_push_proc_bind(
ident_t *loc, kmp_int32 global_tid, kmp_int32 proc_bind )
278 KA_TRACE( 20, (
"__kmpc_push_proc_bind: enter T#%d proc_bind=%d\n",
279 global_tid, proc_bind ) );
281 __kmp_push_proc_bind( loc, global_tid, (kmp_proc_bind_t)proc_bind );
301 int gtid = __kmp_entry_gtid();
305 va_start( ap, microtask );
307 #if INCLUDE_SSC_MARKS
310 __kmp_fork_call( loc, gtid, fork_context_intel,
312 VOLATILE_CAST(microtask_t) microtask,
313 VOLATILE_CAST(launch_t) __kmp_invoke_task_func,
315 #
if (KMP_ARCH_X86_64 || KMP_ARCH_ARM) && KMP_OS_LINUX
321 #if INCLUDE_SSC_MARKS
324 __kmp_join_call( loc, gtid );
345 KA_TRACE( 20, (
"__kmpc_push_num_teams: enter T#%d num_teams=%d num_threads=%d\n",
346 global_tid, num_teams, num_threads ) );
348 __kmp_push_num_teams( loc, global_tid, num_teams, num_threads );
363 int gtid = __kmp_entry_gtid();
364 kmp_info_t *this_thr = __kmp_threads[ gtid ];
366 va_start( ap, microtask );
369 this_thr->th.th_teams_microtask = microtask;
370 this_thr->th.th_teams_level = this_thr->th.th_team->t.t_level;
373 if ( this_thr->th.th_teams_size.nteams == 0 ) {
374 __kmp_push_num_teams( loc, gtid, 0, 0 );
376 KMP_DEBUG_ASSERT(this_thr->th.th_set_nproc >= 1);
377 KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nteams >= 1);
378 KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nth >= 1);
380 __kmp_fork_call( loc, gtid, fork_context_intel,
382 VOLATILE_CAST(microtask_t) __kmp_teams_master,
383 VOLATILE_CAST(launch_t) __kmp_invoke_teams_master,
384 #
if (KMP_ARCH_X86_64 || KMP_ARCH_ARM) && KMP_OS_LINUX
390 __kmp_join_call( loc, gtid );
391 this_thr->th.th_teams_microtask = NULL;
392 this_thr->th.th_teams_level = 0;
393 *(kmp_int64*)(&this_thr->th.th_teams_size) = 0L;
406 __kmpc_invoke_task_func(
int gtid )
408 return __kmp_invoke_task_func( gtid );
426 __kmp_serialized_parallel(loc, global_tid);
441 kmp_internal_control_t *top;
442 kmp_info_t *this_thr;
443 kmp_team_t *serial_team;
445 KC_TRACE( 10, (
"__kmpc_end_serialized_parallel: called by T#%d\n", global_tid ) );
453 if( ! TCR_4( __kmp_init_parallel ) )
454 __kmp_parallel_initialize();
456 this_thr = __kmp_threads[ global_tid ];
457 serial_team = this_thr->th.th_serial_team;
460 KMP_DEBUG_ASSERT( serial_team );
461 KMP_ASSERT( serial_team -> t.t_serialized );
462 KMP_DEBUG_ASSERT( this_thr -> th.th_team == serial_team );
463 KMP_DEBUG_ASSERT( serial_team != this_thr->th.th_root->r.r_root_team );
464 KMP_DEBUG_ASSERT( serial_team -> t.t_threads );
465 KMP_DEBUG_ASSERT( serial_team -> t.t_threads[0] == this_thr );
468 top = serial_team -> t.t_control_stack_top;
469 if ( top && top -> serial_nesting_level == serial_team -> t.t_serialized ) {
470 copy_icvs( &serial_team -> t.t_threads[0] -> th.th_current_task -> td_icvs, top );
471 serial_team -> t.t_control_stack_top = top -> next;
476 serial_team -> t.t_level--;
479 KMP_DEBUG_ASSERT(serial_team->t.t_dispatch->th_disp_buffer);
481 dispatch_private_info_t * disp_buffer = serial_team->t.t_dispatch->th_disp_buffer;
482 serial_team->t.t_dispatch->th_disp_buffer =
483 serial_team->t.t_dispatch->th_disp_buffer->next;
484 __kmp_free( disp_buffer );
487 -- serial_team -> t.t_serialized;
488 if ( serial_team -> t.t_serialized == 0 ) {
492 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
493 if ( __kmp_inherit_fp_control && serial_team->t.t_fp_control_saved ) {
494 __kmp_clear_x87_fpu_status_word();
495 __kmp_load_x87_fpu_control_word( &serial_team->t.t_x87_fpu_control_word );
496 __kmp_load_mxcsr( &serial_team->t.t_mxcsr );
500 this_thr -> th.th_team = serial_team -> t.t_parent;
501 this_thr -> th.th_info.ds.ds_tid = serial_team -> t.t_master_tid;
504 this_thr -> th.th_team_nproc = serial_team -> t.t_parent -> t.t_nproc;
505 this_thr -> th.th_team_master = serial_team -> t.t_parent -> t.t_threads[0];
506 this_thr -> th.th_team_serialized = this_thr -> th.th_team -> t.t_serialized;
509 this_thr -> th.th_dispatch = & this_thr -> th.th_team ->
510 t.t_dispatch[ serial_team -> t.t_master_tid ];
512 __kmp_pop_current_task_from_thread( this_thr );
514 KMP_ASSERT( this_thr -> th.th_current_task -> td_flags.executing == 0 );
515 this_thr -> th.th_current_task -> td_flags.executing = 1;
517 if ( __kmp_tasking_mode != tskm_immediate_exec ) {
522 if ( ( this_thr -> th.th_task_team = this_thr -> th.th_team -> t.t_task_team ) != NULL ) {
523 this_thr -> th.th_task_state = this_thr -> th.th_task_team -> tt.tt_state;
525 KA_TRACE( 20, (
"__kmpc_end_serialized_parallel: T#%d restoring task_team %p / team %p\n",
526 global_tid, this_thr -> th.th_task_team, this_thr -> th.th_team ) );
529 if ( __kmp_tasking_mode != tskm_immediate_exec ) {
530 KA_TRACE( 20, (
"__kmpc_end_serialized_parallel: T#%d decreasing nesting depth of serial team %p to %d\n",
531 global_tid, serial_team, serial_team -> t.t_serialized ) );
536 kmp_uint64 cur_time = 0;
538 if( __itt_get_timestamp_ptr ) {
539 cur_time = __itt_get_timestamp();
543 if( ( __kmp_forkjoin_frames_mode == 1 || __kmp_forkjoin_frames_mode == 3 ) && __itt_frame_submit_v3_ptr ) {
544 if( this_thr->th.th_team->t.t_level == 0 ) {
545 __kmp_itt_frame_submit( global_tid, this_thr->th.th_frame_time_serialized, cur_time, 0, loc, this_thr->th.th_team_nproc, 0 );
549 if ( ( __itt_frame_end_v3_ptr && __kmp_forkjoin_frames && ! __kmp_forkjoin_frames_mode ) || KMP_ITT_DEBUG )
551 this_thr->th.th_ident = loc;
552 __kmp_itt_region_joined( global_tid, 1 );
554 if ( ( __itt_frame_submit_v3_ptr && __kmp_forkjoin_frames_mode == 3 ) || KMP_ITT_DEBUG )
556 this_thr->th.th_ident = loc;
558 __kmp_itt_frame_submit( global_tid, serial_team->t.t_region_time, cur_time, 0, loc, this_thr->th.th_team_nproc, 2 );
562 if ( __kmp_env_consistency_check )
563 __kmp_pop_parallel( global_tid, NULL );
581 KC_TRACE( 10, (
"__kmpc_flush: called\n" ) );
586 #if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 )
599 if ( ! __kmp_cpuinfo.initialized ) {
600 __kmp_query_cpuid( & __kmp_cpuinfo );
602 if ( ! __kmp_cpuinfo.sse2 ) {
605 #if KMP_COMPILER_ICC || KMP_COMPILER_MSVC
608 __sync_synchronize();
609 #endif // KMP_COMPILER_ICC
628 #error Unknown or unsupported architecture
649 int explicit_barrier_flag;
650 KC_TRACE( 10, (
"__kmpc_barrier: called T#%d\n", global_tid ) );
652 if (! TCR_4(__kmp_init_parallel))
653 __kmp_parallel_initialize();
655 if ( __kmp_env_consistency_check ) {
657 KMP_WARNING( ConstructIdentInvalid );
660 __kmp_check_barrier( global_tid, ct_barrier, loc );
663 __kmp_threads[ global_tid ]->th.th_ident = loc;
671 __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
687 KC_TRACE( 10, (
"__kmpc_master: called T#%d\n", global_tid ) );
689 if( ! TCR_4( __kmp_init_parallel ) )
690 __kmp_parallel_initialize();
692 if( KMP_MASTER_GTID( global_tid ))
695 if ( __kmp_env_consistency_check ) {
697 __kmp_push_sync( global_tid, ct_master, loc, NULL );
699 __kmp_check_sync( global_tid, ct_master, loc, NULL );
716 KC_TRACE( 10, (
"__kmpc_end_master: called T#%d\n", global_tid ) );
718 KMP_DEBUG_ASSERT( KMP_MASTER_GTID( global_tid ));
720 if ( __kmp_env_consistency_check ) {
722 KMP_WARNING( ThreadIdentInvalid );
724 if( KMP_MASTER_GTID( global_tid ))
725 __kmp_pop_sync( global_tid, ct_master, loc );
741 KMP_DEBUG_ASSERT( __kmp_init_serial );
743 KC_TRACE( 10, (
"__kmpc_ordered: called T#%d\n", gtid ));
745 if (! TCR_4(__kmp_init_parallel))
746 __kmp_parallel_initialize();
749 __kmp_itt_ordered_prep( gtid );
753 th = __kmp_threads[ gtid ];
755 if ( th -> th.th_dispatch -> th_deo_fcn != 0 )
756 (*th->th.th_dispatch->th_deo_fcn)( & gtid, & cid, loc );
758 __kmp_parallel_deo( & gtid, & cid, loc );
761 __kmp_itt_ordered_start( gtid );
778 KC_TRACE( 10, (
"__kmpc_end_ordered: called T#%d\n", gtid ) );
781 __kmp_itt_ordered_end( gtid );
785 th = __kmp_threads[ gtid ];
787 if ( th -> th.th_dispatch -> th_dxo_fcn != 0 )
788 (*th->th.th_dispatch->th_dxo_fcn)( & gtid, & cid, loc );
790 __kmp_parallel_dxo( & gtid, & cid, loc );
793 static kmp_user_lock_p
794 __kmp_get_critical_section_ptr( kmp_critical_name * crit,
ident_t const * loc, kmp_int32 gtid )
796 kmp_user_lock_p *lck_pp = (kmp_user_lock_p *)crit;
802 kmp_user_lock_p lck = (kmp_user_lock_p)TCR_PTR( *lck_pp );
809 lck = __kmp_user_lock_allocate( &idx, gtid, kmp_lf_critical_section );
810 __kmp_init_user_lock_with_checks( lck );
811 __kmp_set_user_lock_location( lck, loc );
813 __kmp_itt_critical_creating( lck );
826 int status = KMP_COMPARE_AND_STORE_PTR( lck_pp, 0, lck );
831 __kmp_itt_critical_destroyed( lck );
835 __kmp_destroy_user_lock_with_checks( lck );
836 __kmp_user_lock_free( &idx, gtid, lck );
837 lck = (kmp_user_lock_p)TCR_PTR( *lck_pp );
838 KMP_DEBUG_ASSERT( lck != NULL );
860 KC_TRACE( 10, (
"__kmpc_critical: called T#%d\n", global_tid ) );
864 KMP_CHECK_USER_LOCK_INIT();
866 if ( ( __kmp_user_lock_kind == lk_tas )
867 && (
sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) {
868 lck = (kmp_user_lock_p)crit;
870 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
871 else if ( ( __kmp_user_lock_kind == lk_futex )
872 && (
sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) {
873 lck = (kmp_user_lock_p)crit;
877 lck = __kmp_get_critical_section_ptr( crit, loc, global_tid );
880 if ( __kmp_env_consistency_check )
881 __kmp_push_sync( global_tid, ct_critical, loc, lck );
890 __kmp_itt_critical_acquiring( lck );
894 __kmp_acquire_user_lock_with_checks( lck, global_tid );
897 __kmp_itt_critical_acquired( lck );
900 KA_TRACE( 15, (
"__kmpc_critical: done T#%d\n", global_tid ));
917 KC_TRACE( 10, (
"__kmpc_end_critical: called T#%d\n", global_tid ));
919 if ( ( __kmp_user_lock_kind == lk_tas )
920 && (
sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) {
921 lck = (kmp_user_lock_p)crit;
923 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
924 else if ( ( __kmp_user_lock_kind == lk_futex )
925 && (
sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) {
926 lck = (kmp_user_lock_p)crit;
930 lck = (kmp_user_lock_p) TCR_PTR(*((kmp_user_lock_p *)crit));
933 KMP_ASSERT(lck != NULL);
935 if ( __kmp_env_consistency_check )
936 __kmp_pop_sync( global_tid, ct_critical, loc );
939 __kmp_itt_critical_releasing( lck );
943 __kmp_release_user_lock_with_checks( lck, global_tid );
945 KA_TRACE( 15, (
"__kmpc_end_critical: done T#%d\n", global_tid ));
961 KC_TRACE( 10, (
"__kmpc_barrier_master: called T#%d\n", global_tid ) );
963 if (! TCR_4(__kmp_init_parallel))
964 __kmp_parallel_initialize();
966 if ( __kmp_env_consistency_check )
967 __kmp_check_barrier( global_tid, ct_barrier, loc );
970 __kmp_threads[global_tid]->th.th_ident = loc;
972 status = __kmp_barrier( bs_plain_barrier, global_tid, TRUE, 0, NULL, NULL );
974 return (status != 0) ? 0 : 1;
989 KC_TRACE( 10, (
"__kmpc_end_barrier_master: called T#%d\n", global_tid ));
991 __kmp_end_split_barrier ( bs_plain_barrier, global_tid );
1009 KC_TRACE( 10, (
"__kmpc_barrier_master_nowait: called T#%d\n", global_tid ));
1011 if (! TCR_4(__kmp_init_parallel))
1012 __kmp_parallel_initialize();
1014 if ( __kmp_env_consistency_check ) {
1016 KMP_WARNING( ConstructIdentInvalid );
1018 __kmp_check_barrier( global_tid, ct_barrier, loc );
1022 __kmp_threads[global_tid]->th.th_ident = loc;
1024 __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
1028 if ( __kmp_env_consistency_check ) {
1032 if ( global_tid < 0 ) {
1033 KMP_WARNING( ThreadIdentInvalid );
1039 __kmp_pop_sync( global_tid, ct_master, loc );
1062 kmp_int32 rc = __kmp_enter_single( global_tid, loc, TRUE );
1078 __kmp_exit_single( global_tid );
1091 KE_TRACE( 10, (
"__kmpc_for_static_fini called T#%d\n", global_tid));
1093 if ( __kmp_env_consistency_check )
1094 __kmp_pop_workshare( global_tid, ct_pdo, loc );
1103 ompc_set_num_threads(
int arg )
1106 __kmp_set_num_threads( arg, __kmp_entry_gtid() );
1110 ompc_set_dynamic(
int flag )
1115 thread = __kmp_entry_thread();
1117 __kmp_save_internal_controls( thread );
1119 set__dynamic( thread, flag ? TRUE : FALSE );
1123 ompc_set_nested(
int flag )
1128 thread = __kmp_entry_thread();
1130 __kmp_save_internal_controls( thread );
1132 set__nested( thread, flag ? TRUE : FALSE );
1136 ompc_set_max_active_levels(
int max_active_levels )
1142 __kmp_set_max_active_levels( __kmp_entry_gtid(), max_active_levels );
1146 ompc_set_schedule( omp_sched_t kind,
int modifier )
1149 __kmp_set_schedule( __kmp_entry_gtid(), ( kmp_sched_t ) kind, modifier );
1153 ompc_get_ancestor_thread_num(
int level )
1155 return __kmp_get_ancestor_thread_num( __kmp_entry_gtid(), level );
1159 ompc_get_team_size(
int level )
1161 return __kmp_get_team_size( __kmp_entry_gtid(), level );
1165 kmpc_set_stacksize(
int arg )
1168 __kmp_aux_set_stacksize( arg );
1172 kmpc_set_stacksize_s(
size_t arg )
1175 __kmp_aux_set_stacksize( arg );
1179 kmpc_set_blocktime(
int arg )
1184 gtid = __kmp_entry_gtid();
1185 tid = __kmp_tid_from_gtid(gtid);
1186 thread = __kmp_thread_from_gtid(gtid);
1188 __kmp_aux_set_blocktime( arg, thread, tid );
1192 kmpc_set_library(
int arg )
1195 __kmp_user_set_library( (
enum library_type)arg );
1199 kmpc_set_defaults(
char const * str )
1202 __kmp_aux_set_defaults( str, strlen( str ) );
1206 kmpc_set_affinity_mask_proc(
int proc,
void **mask )
1208 #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED
1211 if ( ! TCR_4(__kmp_init_middle) ) {
1212 __kmp_middle_initialize();
1214 return __kmp_aux_set_affinity_mask_proc( proc, mask );
1219 kmpc_unset_affinity_mask_proc(
int proc,
void **mask )
1221 #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED
1224 if ( ! TCR_4(__kmp_init_middle) ) {
1225 __kmp_middle_initialize();
1227 return __kmp_aux_unset_affinity_mask_proc( proc, mask );
1232 kmpc_get_affinity_mask_proc(
int proc,
void **mask )
1234 #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED
1237 if ( ! TCR_4(__kmp_init_middle) ) {
1238 __kmp_middle_initialize();
1240 return __kmp_aux_get_affinity_mask_proc( proc, mask );
1291 KC_TRACE( 10, (
"__kmpc_copyprivate: called T#%d\n", gtid ));
1295 data_ptr = & __kmp_team_from_gtid( gtid )->t.t_copypriv_data;
1297 if ( __kmp_env_consistency_check ) {
1299 KMP_WARNING( ConstructIdentInvalid );
1305 if (didit) *data_ptr = cpy_data;
1309 __kmp_threads[gtid]->th.th_ident = loc;
1311 __kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL );
1313 if (! didit) (*cpy_func)( cpy_data, *data_ptr );
1319 __kmp_threads[gtid]->th.th_ident = loc;
1321 __kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL );
1326 #define INIT_LOCK __kmp_init_user_lock_with_checks
1327 #define INIT_NESTED_LOCK __kmp_init_nested_user_lock_with_checks
1328 #define ACQUIRE_LOCK __kmp_acquire_user_lock_with_checks
1329 #define ACQUIRE_LOCK_TIMED __kmp_acquire_user_lock_with_checks_timed
1330 #define ACQUIRE_NESTED_LOCK __kmp_acquire_nested_user_lock_with_checks
1331 #define ACQUIRE_NESTED_LOCK_TIMED __kmp_acquire_nested_user_lock_with_checks_timed
1332 #define RELEASE_LOCK __kmp_release_user_lock_with_checks
1333 #define RELEASE_NESTED_LOCK __kmp_release_nested_user_lock_with_checks
1334 #define TEST_LOCK __kmp_test_user_lock_with_checks
1335 #define TEST_NESTED_LOCK __kmp_test_nested_user_lock_with_checks
1336 #define DESTROY_LOCK __kmp_destroy_user_lock_with_checks
1337 #define DESTROY_NESTED_LOCK __kmp_destroy_nested_user_lock_with_checks
1347 __kmpc_init_lock(
ident_t * loc, kmp_int32 gtid,
void ** user_lock ) {
1348 static char const *
const func =
"omp_init_lock";
1349 kmp_user_lock_p lck;
1350 KMP_DEBUG_ASSERT( __kmp_init_serial );
1352 if ( __kmp_env_consistency_check ) {
1353 if ( user_lock == NULL ) {
1354 KMP_FATAL( LockIsUninitialized, func );
1358 KMP_CHECK_USER_LOCK_INIT();
1360 if ( ( __kmp_user_lock_kind == lk_tas )
1361 && (
sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
1362 lck = (kmp_user_lock_p)user_lock;
1364 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
1365 else if ( ( __kmp_user_lock_kind == lk_futex )
1366 && (
sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
1367 lck = (kmp_user_lock_p)user_lock;
1371 lck = __kmp_user_lock_allocate( user_lock, gtid, 0 );
1374 __kmp_set_user_lock_location( lck, loc );
1377 __kmp_itt_lock_creating( lck );
1383 __kmpc_init_nest_lock(
ident_t * loc, kmp_int32 gtid,
void ** user_lock ) {
1384 static char const *
const func =
"omp_init_nest_lock";
1385 kmp_user_lock_p lck;
1386 KMP_DEBUG_ASSERT( __kmp_init_serial );
1388 if ( __kmp_env_consistency_check ) {
1389 if ( user_lock == NULL ) {
1390 KMP_FATAL( LockIsUninitialized, func );
1394 KMP_CHECK_USER_LOCK_INIT();
1396 if ( ( __kmp_user_lock_kind == lk_tas ) && (
sizeof( lck->tas.lk.poll )
1397 +
sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
1398 lck = (kmp_user_lock_p)user_lock;
1400 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
1401 else if ( ( __kmp_user_lock_kind == lk_futex )
1402 && (
sizeof( lck->futex.lk.poll ) +
sizeof( lck->futex.lk.depth_locked )
1403 <= OMP_NEST_LOCK_T_SIZE ) ) {
1404 lck = (kmp_user_lock_p)user_lock;
1408 lck = __kmp_user_lock_allocate( user_lock, gtid, 0 );
1411 INIT_NESTED_LOCK( lck );
1412 __kmp_set_user_lock_location( lck, loc );
1415 __kmp_itt_lock_creating( lck );
1420 __kmpc_destroy_lock(
ident_t * loc, kmp_int32 gtid,
void ** user_lock ) {
1422 kmp_user_lock_p lck;
1424 if ( ( __kmp_user_lock_kind == lk_tas )
1425 && (
sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
1426 lck = (kmp_user_lock_p)user_lock;
1428 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
1429 else if ( ( __kmp_user_lock_kind == lk_futex )
1430 && (
sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
1431 lck = (kmp_user_lock_p)user_lock;
1435 lck = __kmp_lookup_user_lock( user_lock,
"omp_destroy_lock" );
1439 __kmp_itt_lock_destroyed( lck );
1441 DESTROY_LOCK( lck );
1443 if ( ( __kmp_user_lock_kind == lk_tas )
1444 && (
sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
1447 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
1448 else if ( ( __kmp_user_lock_kind == lk_futex )
1449 && (
sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
1454 __kmp_user_lock_free( user_lock, gtid, lck );
1460 __kmpc_destroy_nest_lock(
ident_t * loc, kmp_int32 gtid,
void ** user_lock ) {
1462 kmp_user_lock_p lck;
1464 if ( ( __kmp_user_lock_kind == lk_tas ) && (
sizeof( lck->tas.lk.poll )
1465 +
sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
1466 lck = (kmp_user_lock_p)user_lock;
1468 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
1469 else if ( ( __kmp_user_lock_kind == lk_futex )
1470 && (
sizeof( lck->futex.lk.poll ) +
sizeof( lck->futex.lk.depth_locked )
1471 <= OMP_NEST_LOCK_T_SIZE ) ) {
1472 lck = (kmp_user_lock_p)user_lock;
1476 lck = __kmp_lookup_user_lock( user_lock,
"omp_destroy_nest_lock" );
1480 __kmp_itt_lock_destroyed( lck );
1483 DESTROY_NESTED_LOCK( lck );
1485 if ( ( __kmp_user_lock_kind == lk_tas ) && (
sizeof( lck->tas.lk.poll )
1486 +
sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
1489 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
1490 else if ( ( __kmp_user_lock_kind == lk_futex )
1491 && (
sizeof( lck->futex.lk.poll ) +
sizeof( lck->futex.lk.depth_locked )
1492 <= OMP_NEST_LOCK_T_SIZE ) ) {
1497 __kmp_user_lock_free( user_lock, gtid, lck );
1502 __kmpc_set_lock(
ident_t * loc, kmp_int32 gtid,
void ** user_lock ) {
1504 kmp_user_lock_p lck;
1506 if ( ( __kmp_user_lock_kind == lk_tas )
1507 && (
sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
1508 lck = (kmp_user_lock_p)user_lock;
1510 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
1511 else if ( ( __kmp_user_lock_kind == lk_futex )
1512 && (
sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
1513 lck = (kmp_user_lock_p)user_lock;
1517 lck = __kmp_lookup_user_lock( user_lock,
"omp_set_lock" );
1521 __kmp_itt_lock_acquiring( lck );
1524 ACQUIRE_LOCK( lck, gtid );
1527 __kmp_itt_lock_acquired( lck );
1533 __kmpc_set_nest_lock(
ident_t * loc, kmp_int32 gtid,
void ** user_lock ) {
1534 kmp_user_lock_p lck;
1536 if ( ( __kmp_user_lock_kind == lk_tas ) && (
sizeof( lck->tas.lk.poll )
1537 +
sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
1538 lck = (kmp_user_lock_p)user_lock;
1540 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
1541 else if ( ( __kmp_user_lock_kind == lk_futex )
1542 && (
sizeof( lck->futex.lk.poll ) +
sizeof( lck->futex.lk.depth_locked )
1543 <= OMP_NEST_LOCK_T_SIZE ) ) {
1544 lck = (kmp_user_lock_p)user_lock;
1548 lck = __kmp_lookup_user_lock( user_lock,
"omp_set_nest_lock" );
1552 __kmp_itt_lock_acquiring( lck );
1555 ACQUIRE_NESTED_LOCK( lck, gtid );
1558 __kmp_itt_lock_acquired( lck );
1563 __kmpc_unset_lock(
ident_t *loc, kmp_int32 gtid,
void **user_lock )
1565 kmp_user_lock_p lck;
1570 if ( ( __kmp_user_lock_kind == lk_tas )
1571 && (
sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
1572 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
1575 __kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock );
1577 TCW_4(((kmp_user_lock_p)user_lock)->tas.lk.poll, 0);
1581 lck = (kmp_user_lock_p)user_lock;
1584 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
1585 else if ( ( __kmp_user_lock_kind == lk_futex )
1586 && (
sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
1587 lck = (kmp_user_lock_p)user_lock;
1591 lck = __kmp_lookup_user_lock( user_lock,
"omp_unset_lock" );
1595 __kmp_itt_lock_releasing( lck );
1598 RELEASE_LOCK( lck, gtid );
1603 __kmpc_unset_nest_lock(
ident_t *loc, kmp_int32 gtid,
void **user_lock )
1605 kmp_user_lock_p lck;
1609 if ( ( __kmp_user_lock_kind == lk_tas ) && (
sizeof( lck->tas.lk.poll )
1610 +
sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
1611 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
1613 kmp_tas_lock_t *tl = (kmp_tas_lock_t*)user_lock;
1615 __kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock );
1617 if ( --(tl->lk.depth_locked) == 0 ) {
1618 TCW_4(tl->lk.poll, 0);
1623 lck = (kmp_user_lock_p)user_lock;
1626 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
1627 else if ( ( __kmp_user_lock_kind == lk_futex )
1628 && (
sizeof( lck->futex.lk.poll ) +
sizeof( lck->futex.lk.depth_locked )
1629 <= OMP_NEST_LOCK_T_SIZE ) ) {
1630 lck = (kmp_user_lock_p)user_lock;
1634 lck = __kmp_lookup_user_lock( user_lock,
"omp_unset_nest_lock" );
1638 __kmp_itt_lock_releasing( lck );
1641 RELEASE_NESTED_LOCK( lck, gtid );
1646 __kmpc_test_lock(
ident_t *loc, kmp_int32 gtid,
void **user_lock )
1650 kmp_user_lock_p lck;
1653 if ( ( __kmp_user_lock_kind == lk_tas )
1654 && (
sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
1655 lck = (kmp_user_lock_p)user_lock;
1657 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
1658 else if ( ( __kmp_user_lock_kind == lk_futex )
1659 && (
sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
1660 lck = (kmp_user_lock_p)user_lock;
1664 lck = __kmp_lookup_user_lock( user_lock,
"omp_test_lock" );
1668 __kmp_itt_lock_acquiring( lck );
1671 rc = TEST_LOCK( lck, gtid );
1674 __kmp_itt_lock_acquired( lck );
1676 __kmp_itt_lock_cancelled( lck );
1679 return ( rc ? FTN_TRUE : FTN_FALSE );
1686 __kmpc_test_nest_lock(
ident_t *loc, kmp_int32 gtid,
void **user_lock )
1688 kmp_user_lock_p lck;
1691 if ( ( __kmp_user_lock_kind == lk_tas ) && (
sizeof( lck->tas.lk.poll )
1692 +
sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
1693 lck = (kmp_user_lock_p)user_lock;
1695 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
1696 else if ( ( __kmp_user_lock_kind == lk_futex )
1697 && (
sizeof( lck->futex.lk.poll ) +
sizeof( lck->futex.lk.depth_locked )
1698 <= OMP_NEST_LOCK_T_SIZE ) ) {
1699 lck = (kmp_user_lock_p)user_lock;
1703 lck = __kmp_lookup_user_lock( user_lock,
"omp_test_nest_lock" );
1707 __kmp_itt_lock_acquiring( lck );
1710 rc = TEST_NESTED_LOCK( lck, gtid );
1713 __kmp_itt_lock_acquired( lck );
1715 __kmp_itt_lock_cancelled( lck );
1733 #define __KMP_SET_REDUCTION_METHOD(gtid,rmethod) \
1734 ( ( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method ) = ( rmethod ) )
1736 #define __KMP_GET_REDUCTION_METHOD(gtid) \
1737 ( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method )
1743 static __forceinline
void
1744 __kmp_enter_critical_section_reduce_block(
ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) {
1750 kmp_user_lock_p lck;
1755 if ( __kmp_base_user_lock_size <= INTEL_CRITICAL_SIZE ) {
1756 lck = (kmp_user_lock_p)crit;
1759 lck = __kmp_get_critical_section_ptr( crit, loc, global_tid );
1761 KMP_DEBUG_ASSERT( lck != NULL );
1763 if ( __kmp_env_consistency_check )
1764 __kmp_push_sync( global_tid, ct_critical, loc, lck );
1766 __kmp_acquire_user_lock_with_checks( lck, global_tid );
1770 static __forceinline
void
1771 __kmp_end_critical_section_reduce_block(
ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) {
1773 kmp_user_lock_p lck;
1777 if ( __kmp_base_user_lock_size > 32 ) {
1778 lck = *( (kmp_user_lock_p *) crit );
1779 KMP_ASSERT( lck != NULL );
1781 lck = (kmp_user_lock_p) crit;
1784 if ( __kmp_env_consistency_check )
1785 __kmp_pop_sync( global_tid, ct_critical, loc );
1787 __kmp_release_user_lock_with_checks( lck, global_tid );
1808 ident_t *loc, kmp_int32 global_tid,
1809 kmp_int32 num_vars,
size_t reduce_size,
void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
1810 kmp_critical_name *lck ) {
1814 PACKED_REDUCTION_METHOD_T packed_reduction_method;
1818 int teams_swapped = 0, task_state;
1820 KA_TRACE( 10, (
"__kmpc_reduce_nowait() enter: called T#%d\n", global_tid ) );
1827 if( ! TCR_4( __kmp_init_parallel ) )
1828 __kmp_parallel_initialize();
1831 if ( __kmp_env_consistency_check )
1832 __kmp_push_sync( global_tid, ct_reduce, loc, NULL );
1835 th = __kmp_thread_from_gtid(global_tid);
1836 if( th->th.th_teams_microtask ) {
1837 team = th->th.th_team;
1838 if( team->t.t_level == th->th.th_teams_level ) {
1840 KMP_DEBUG_ASSERT(!th->th.th_info.ds.ds_tid);
1843 th->th.th_info.ds.ds_tid = team->t.t_master_tid;
1844 th->th.th_team = team->t.t_parent;
1845 th->th.th_task_team = th->th.th_team->t.t_task_team;
1846 th->th.th_team_nproc = th->th.th_team->t.t_nproc;
1847 task_state = th->th.th_task_state;
1848 if( th->th.th_task_team )
1849 th->th.th_task_state = th->th.th_task_team->tt.tt_state;
1852 #endif // OMP_40_ENABLED
1863 packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck );
1864 __KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method );
1866 if( packed_reduction_method == critical_reduce_block ) {
1868 __kmp_enter_critical_section_reduce_block( loc, global_tid, lck );
1871 }
else if( packed_reduction_method == empty_reduce_block ) {
1876 }
else if( packed_reduction_method == atomic_reduce_block ) {
1883 if ( __kmp_env_consistency_check )
1884 __kmp_pop_sync( global_tid, ct_reduce, loc );
1886 }
else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) {
1899 __kmp_threads[global_tid]->th.th_ident = loc;
1901 retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, FALSE, reduce_size, reduce_data, reduce_func );
1902 retval = ( retval != 0 ) ? ( 0 ) : ( 1 );
1906 if ( __kmp_env_consistency_check ) {
1908 __kmp_pop_sync( global_tid, ct_reduce, loc );
1919 if( teams_swapped ) {
1921 th->th.th_info.ds.ds_tid = 0;
1922 th->th.th_team = team;
1923 th->th.th_task_team = team->t.t_task_team;
1924 th->th.th_team_nproc = team->t.t_nproc;
1925 th->th.th_task_state = task_state;
1928 KA_TRACE( 10, (
"__kmpc_reduce_nowait() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) );
1944 PACKED_REDUCTION_METHOD_T packed_reduction_method;
1946 KA_TRACE( 10, (
"__kmpc_end_reduce_nowait() enter: called T#%d\n", global_tid ) );
1948 packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid );
1950 if( packed_reduction_method == critical_reduce_block ) {
1952 __kmp_end_critical_section_reduce_block( loc, global_tid, lck );
1954 }
else if( packed_reduction_method == empty_reduce_block ) {
1958 }
else if( packed_reduction_method == atomic_reduce_block ) {
1965 }
else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) {
1976 if ( __kmp_env_consistency_check )
1977 __kmp_pop_sync( global_tid, ct_reduce, loc );
1979 KA_TRACE( 10, (
"__kmpc_end_reduce_nowait() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) );
2001 ident_t *loc, kmp_int32 global_tid,
2002 kmp_int32 num_vars,
size_t reduce_size,
void *reduce_data,
2003 void (*reduce_func)(
void *lhs_data,
void *rhs_data),
2004 kmp_critical_name *lck )
2008 PACKED_REDUCTION_METHOD_T packed_reduction_method;
2010 KA_TRACE( 10, (
"__kmpc_reduce() enter: called T#%d\n", global_tid ) );
2017 if( ! TCR_4( __kmp_init_parallel ) )
2018 __kmp_parallel_initialize();
2021 if ( __kmp_env_consistency_check )
2022 __kmp_push_sync( global_tid, ct_reduce, loc, NULL );
2024 packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck );
2025 __KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method );
2027 if( packed_reduction_method == critical_reduce_block ) {
2029 __kmp_enter_critical_section_reduce_block( loc, global_tid, lck );
2032 }
else if( packed_reduction_method == empty_reduce_block ) {
2037 }
else if( packed_reduction_method == atomic_reduce_block ) {
2041 }
else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) {
2047 __kmp_threads[global_tid]->th.th_ident = loc;
2049 retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, TRUE, reduce_size, reduce_data, reduce_func );
2050 retval = ( retval != 0 ) ? ( 0 ) : ( 1 );
2054 if ( __kmp_env_consistency_check ) {
2056 __kmp_pop_sync( global_tid, ct_reduce, loc );
2067 KA_TRACE( 10, (
"__kmpc_reduce() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) );
2084 PACKED_REDUCTION_METHOD_T packed_reduction_method;
2086 KA_TRACE( 10, (
"__kmpc_end_reduce() enter: called T#%d\n", global_tid ) );
2088 packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid );
2093 if( packed_reduction_method == critical_reduce_block ) {
2095 __kmp_end_critical_section_reduce_block( loc, global_tid, lck );
2099 __kmp_threads[global_tid]->th.th_ident = loc;
2101 __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
2103 }
else if( packed_reduction_method == empty_reduce_block ) {
2109 __kmp_threads[global_tid]->th.th_ident = loc;
2111 __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
2113 }
else if( packed_reduction_method == atomic_reduce_block ) {
2117 __kmp_threads[global_tid]->th.th_ident = loc;
2119 __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
2121 }
else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) {
2124 __kmp_end_split_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid );
2133 if ( __kmp_env_consistency_check )
2134 __kmp_pop_sync( global_tid, ct_reduce, loc );
2136 KA_TRACE( 10, (
"__kmpc_end_reduce() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) );
2141 #undef __KMP_GET_REDUCTION_METHOD
2142 #undef __KMP_SET_REDUCTION_METHOD
2147 __kmpc_get_taskid() {
2150 kmp_info_t * thread;
2152 gtid = __kmp_get_gtid();
2156 thread = __kmp_thread_from_gtid( gtid );
2157 return thread->th.th_current_task->td_task_id;
2163 __kmpc_get_parent_taskid() {
2166 kmp_info_t * thread;
2167 kmp_taskdata_t * parent_task;
2169 gtid = __kmp_get_gtid();
2173 thread = __kmp_thread_from_gtid( gtid );
2174 parent_task = thread->th.th_current_task->td_parent;
2175 return ( parent_task == NULL ? 0 : parent_task->td_task_id );
2179 void __kmpc_place_threads(
int nC,
int nT,
int nO)
2182 if ( ! __kmp_init_serial ) {
2183 __kmp_serial_initialize();
2185 __kmp_place_num_cores = nC;
2186 __kmp_place_num_threads_per_core = nT;
2187 __kmp_place_core_offset = nO;
kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid)
kmp_int32 __kmpc_barrier_master(ident_t *loc, kmp_int32 global_tid)
void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid)
void(* kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
kmp_int32 __kmpc_global_thread_num(ident_t *loc)
void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid)
#define KMP_START_EXPLICIT_TIMER(name)
"Starts" an explicit timer which will need a corresponding KMP_STOP_EXPLICIT_TIMER() macro...
#define KMP_STOP_EXPLICIT_TIMER(name)
"Stops" an explicit timer.
#define KMP_TIME_BLOCK(name)
Uses specified timer (name) to time code block.
kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid)
void __kmpc_end(ident_t *loc)
void __kmpc_end_ordered(ident_t *loc, kmp_int32 gtid)
void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 global_tid)
#define KMP_IDENT_AUTOPAR
void __kmpc_begin(ident_t *loc, kmp_int32 flags)
kmp_int32 __kmpc_bound_thread_num(ident_t *loc)
kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
void __kmpc_copyprivate(ident_t *loc, kmp_int32 gtid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *), kmp_int32 didit)
void __kmpc_ordered(ident_t *loc, kmp_int32 gtid)
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
void __kmpc_critical(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *crit)
void __kmpc_end_barrier_master(ident_t *loc, kmp_int32 global_tid)
void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid)
void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads)
void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask,...)
kmp_int32 __kmpc_in_parallel(ident_t *loc)
kmp_int32 __kmpc_ok_to_fork(ident_t *loc)
kmp_int32 __kmpc_global_num_threads(ident_t *loc)
kmp_int32 __kmpc_bound_num_threads(ident_t *loc)
void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid)
void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *crit)
void __kmpc_flush(ident_t *loc,...)
void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads)
kmp_int32 __kmpc_barrier_master_nowait(ident_t *loc, kmp_int32 global_tid)
void __kmpc_serialized_parallel(ident_t *loc, kmp_int32 global_tid)
void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask,...)