47 #include "kmp_error.h"
48 #include "kmp_stats.h"
52 template<
typename T >
58 struct i_maxmin< int > {
59 static const int mx = 0x7fffffff;
60 static const int mn = 0x80000000;
63 struct i_maxmin< unsigned int > {
64 static const unsigned int mx = 0xffffffff;
65 static const unsigned int mn = 0x00000000;
68 struct i_maxmin< long long > {
69 static const long long mx = 0x7fffffffffffffffLL;
70 static const long long mn = 0x8000000000000000LL;
73 struct i_maxmin< unsigned long long > {
74 static const unsigned long long mx = 0xffffffffffffffffLL;
75 static const unsigned long long mn = 0x0000000000000000LL;
81 char const * traits_t< int >::spec =
"d";
82 char const * traits_t< unsigned int >::spec =
"u";
83 char const * traits_t< long long >::spec =
"lld";
84 char const * traits_t< unsigned long long >::spec =
"llu";
88 template<
typename T >
90 __kmp_for_static_init(
97 typename traits_t< T >::signed_t *pstride,
98 typename traits_t< T >::signed_t incr,
99 typename traits_t< T >::signed_t chunk
102 typedef typename traits_t< T >::unsigned_t UT;
103 typedef typename traits_t< T >::signed_t ST;
105 register kmp_int32 gtid = global_tid;
106 register kmp_uint32 tid;
107 register kmp_uint32 nth;
108 register UT trip_count;
109 register kmp_team_t *team;
110 register kmp_info_t *th = __kmp_threads[ gtid ];
112 KMP_DEBUG_ASSERT( plastiter && plower && pupper && pstride );
113 KE_TRACE( 10, (
"__kmpc_for_static_init called (%d)\n", global_tid));
118 buff = __kmp_str_format(
119 "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s," \
120 " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
121 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
122 traits_t< ST >::spec, traits_t< ST >::spec, traits_t< T >::spec );
123 KD_TRACE(100, ( buff, global_tid, schedtype, *plastiter,
124 *plower, *pupper, *pstride, incr, chunk ) );
125 __kmp_str_free( &buff );
129 if ( __kmp_env_consistency_check ) {
130 __kmp_push_workshare( global_tid, ct_pdo, loc );
132 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo, loc );
136 if ( incr > 0 ? (*pupper < *plower) : (*plower < *pupper) ) {
137 if( plastiter != NULL )
147 buff = __kmp_str_format(
148 "__kmpc_for_static_init:(ZERO TRIP) liter=%%d lower=%%%s upper=%%%s stride = %%%s signed?<%s>, loc = %%s\n",
149 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec, traits_t< T >::spec );
150 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride, loc->
psource ) );
151 __kmp_str_free( &buff );
154 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
162 tid = th->th.th_team->t.t_master_tid;
163 team = th->th.th_team->t.t_parent;
167 tid = __kmp_tid_from_gtid( global_tid );
168 team = th->th.th_team;
172 if ( team -> t.t_serialized ) {
174 if( plastiter != NULL )
177 *pstride = (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
183 buff = __kmp_str_format(
184 "__kmpc_for_static_init: (serial) liter=%%d lower=%%%s upper=%%%s stride = %%%s\n",
185 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec );
186 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
187 __kmp_str_free( &buff );
190 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
193 nth = team->t.t_nproc;
195 if( plastiter != NULL )
197 *pstride = (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
202 buff = __kmp_str_format(
203 "__kmpc_for_static_init: (serial) liter=%%d lower=%%%s upper=%%%s stride = %%%s\n",
204 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec );
205 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
206 __kmp_str_free( &buff );
209 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
215 trip_count = *pupper - *plower + 1;
216 }
else if (incr == -1) {
217 trip_count = *plower - *pupper + 1;
220 trip_count = (*pupper - *plower) / incr + 1;
222 trip_count = (*plower - *pupper) / ( -incr ) + 1;
226 if ( __kmp_env_consistency_check ) {
228 if ( trip_count == 0 && *pupper != *plower ) {
229 __kmp_error_construct( kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo, loc );
234 switch ( schedtype ) {
237 if ( trip_count < nth ) {
239 __kmp_static == kmp_sch_static_greedy || \
240 __kmp_static == kmp_sch_static_balanced
242 if ( tid < trip_count ) {
243 *pupper = *plower = *plower + tid * incr;
245 *plower = *pupper + incr;
247 if( plastiter != NULL )
248 *plastiter = ( tid == trip_count - 1 );
250 if ( __kmp_static == kmp_sch_static_balanced ) {
251 register UT small_chunk = trip_count / nth;
252 register UT extras = trip_count % nth;
253 *plower += incr * ( tid * small_chunk + ( tid < extras ? tid : extras ) );
254 *pupper = *plower + small_chunk * incr - ( tid < extras ? 0 : incr );
255 if( plastiter != NULL )
256 *plastiter = ( tid == nth - 1 );
258 register T big_chunk_inc_count = ( trip_count/nth +
259 ( ( trip_count % nth ) ? 1 : 0) ) * incr;
260 register T old_upper = *pupper;
262 KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
265 *plower += tid * big_chunk_inc_count;
266 *pupper = *plower + big_chunk_inc_count - incr;
268 if( *pupper < *plower )
269 *pupper = i_maxmin< T >::mx;
270 if( plastiter != NULL )
271 *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
272 if ( *pupper > old_upper ) *pupper = old_upper;
274 if( *pupper > *plower )
275 *pupper = i_maxmin< T >::mn;
276 if( plastiter != NULL )
277 *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
278 if ( *pupper < old_upper ) *pupper = old_upper;
284 case kmp_sch_static_chunked:
291 *pstride = span * nth;
292 *plower = *plower + (span * tid);
293 *pupper = *plower + span - incr;
294 if( plastiter != NULL )
295 *plastiter = (tid == ((trip_count - 1)/( UT )chunk) % nth);
299 KMP_ASSERT2( 0,
"__kmpc_for_static_init: unknown scheduling type" );
305 if ( KMP_MASTER_TID(tid) && __itt_metadata_add_ptr && __kmp_forkjoin_frames_mode == 3 &&
307 th->th.th_teams_microtask == NULL &&
309 team->t.t_active_level == 1 )
311 kmp_uint64 cur_chunk = chunk;
314 cur_chunk = trip_count / nth + ( ( trip_count % nth ) ? 1 : 0);
317 __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
324 buff = __kmp_str_format(
325 "__kmpc_for_static_init: liter=%%d lower=%%%s upper=%%%s stride = %%%s signed?<%s>\n",
326 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec, traits_t< T >::spec );
327 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
328 __kmp_str_free( &buff );
331 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
335 template<
typename T >
337 __kmp_dist_for_static_init(
341 kmp_int32 *plastiter,
345 typename traits_t< T >::signed_t *pstride,
346 typename traits_t< T >::signed_t incr,
347 typename traits_t< T >::signed_t chunk
350 typedef typename traits_t< T >::unsigned_t UT;
351 typedef typename traits_t< T >::signed_t ST;
352 register kmp_uint32 tid;
353 register kmp_uint32 nth;
354 register kmp_uint32 team_id;
355 register kmp_uint32 nteams;
356 register UT trip_count;
357 register kmp_team_t *team;
360 KMP_DEBUG_ASSERT( plastiter && plower && pupper && pupperDist && pstride );
361 KE_TRACE( 10, (
"__kmpc_dist_for_static_init called (%d)\n", gtid));
366 buff = __kmp_str_format(
367 "__kmpc_dist_for_static_init: T#%%d schedLoop=%%d liter=%%d "\
368 "iter=(%%%s, %%%s, %%%s) chunk=%%%s signed?<%s>\n",
369 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
370 traits_t< ST >::spec, traits_t< T >::spec );
371 KD_TRACE(100, ( buff, gtid, schedule, *plastiter,
372 *plower, *pupper, incr, chunk ) );
373 __kmp_str_free( &buff );
377 if( __kmp_env_consistency_check ) {
378 __kmp_push_workshare( gtid, ct_pdo, loc );
380 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo, loc );
382 if( incr > 0 ? (*pupper < *plower) : (*plower < *pupper) ) {
392 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc );
395 tid = __kmp_tid_from_gtid( gtid );
396 th = __kmp_threads[gtid];
397 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
398 nth = th->th.th_team_nproc;
399 team = th->th.th_team;
401 nteams = th->th.th_teams_size.nteams;
403 team_id = team->t.t_master_tid;
404 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
408 trip_count = *pupper - *plower + 1;
409 }
else if(incr == -1) {
410 trip_count = *plower - *pupper + 1;
412 trip_count = (ST)(*pupper - *plower) / incr + 1;
414 *pstride = *pupper - *plower;
415 if( trip_count <= nteams ) {
417 __kmp_static == kmp_sch_static_greedy || \
418 __kmp_static == kmp_sch_static_balanced
421 if( team_id < trip_count && tid == 0 ) {
422 *pupper = *pupperDist = *plower = *plower + team_id * incr;
424 *pupperDist = *pupper;
425 *plower = *pupper + incr;
427 if( plastiter != NULL )
428 *plastiter = ( tid == 0 && team_id == trip_count - 1 );
431 if( __kmp_static == kmp_sch_static_balanced ) {
432 register UT chunkD = trip_count / nteams;
433 register UT extras = trip_count % nteams;
434 *plower += incr * ( team_id * chunkD + ( team_id < extras ? team_id : extras ) );
435 *pupperDist = *plower + chunkD * incr - ( team_id < extras ? 0 : incr );
436 if( plastiter != NULL )
437 *plastiter = ( team_id == nteams - 1 );
439 register T chunk_inc_count =
440 ( trip_count / nteams + ( ( trip_count % nteams ) ? 1 : 0) ) * incr;
441 register T upper = *pupper;
442 KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
444 *plower += team_id * chunk_inc_count;
445 *pupperDist = *plower + chunk_inc_count - incr;
448 if( *pupperDist < *plower )
449 *pupperDist = i_maxmin< T >::mx;
450 if( plastiter != NULL )
451 *plastiter = *plower <= upper && *pupperDist > upper - incr;
452 if( *pupperDist > upper )
454 if( *plower > *pupperDist ) {
455 *pupper = *pupperDist;
459 if( *pupperDist > *plower )
460 *pupperDist = i_maxmin< T >::mn;
461 if( plastiter != NULL )
462 *plastiter = *plower >= upper && *pupperDist < upper - incr;
463 if( *pupperDist < upper )
465 if( *plower < *pupperDist ) {
466 *pupper = *pupperDist;
474 trip_count = *pupperDist - *plower + 1;
475 }
else if(incr == -1) {
476 trip_count = *plower - *pupperDist + 1;
478 trip_count = (ST)(*pupperDist - *plower) / incr + 1;
480 KMP_DEBUG_ASSERT( trip_count );
484 if( trip_count <= nth ) {
486 __kmp_static == kmp_sch_static_greedy || \
487 __kmp_static == kmp_sch_static_balanced
489 if( tid < trip_count )
490 *pupper = *plower = *plower + tid * incr;
492 *plower = *pupper + incr;
493 if( plastiter != NULL )
494 if( *plastiter != 0 && !( tid == trip_count - 1 ) )
497 if( __kmp_static == kmp_sch_static_balanced ) {
498 register UT chunkL = trip_count / nth;
499 register UT extras = trip_count % nth;
500 *plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
501 *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
502 if( plastiter != NULL )
503 if( *plastiter != 0 && !( tid == nth - 1 ) )
506 register T chunk_inc_count =
507 ( trip_count / nth + ( ( trip_count % nth ) ? 1 : 0) ) * incr;
508 register T upper = *pupperDist;
509 KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
511 *plower += tid * chunk_inc_count;
512 *pupper = *plower + chunk_inc_count - incr;
514 if( *pupper < *plower )
515 *pupper = i_maxmin< T >::mx;
516 if( plastiter != NULL )
517 if( *plastiter != 0 && !(*plower <= upper && *pupper > upper - incr) )
519 if( *pupper > upper )
522 if( *pupper > *plower )
523 *pupper = i_maxmin< T >::mn;
524 if( plastiter != NULL )
525 if( *plastiter != 0 && !(*plower >= upper && *pupper < upper - incr) )
527 if( *pupper < upper )
534 case kmp_sch_static_chunked:
540 *pstride = span * nth;
541 *plower = *plower + (span * tid);
542 *pupper = *plower + span - incr;
543 if( plastiter != NULL )
544 if( *plastiter != 0 && !(tid == ((trip_count - 1) / ( UT )chunk) % nth) )
549 KMP_ASSERT2( 0,
"__kmpc_dist_for_static_init: unknown loop scheduling type" );
558 buff = __kmp_str_format(
559 "__kmpc_dist_for_static_init: last=%%d lo=%%%s up=%%%s upDist=%%%s "\
560 "stride=%%%s signed?<%s>\n",
561 traits_t< T >::spec, traits_t< T >::spec, traits_t< T >::spec,
562 traits_t< ST >::spec, traits_t< T >::spec );
563 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pupperDist, *pstride ) );
564 __kmp_str_free( &buff );
567 KE_TRACE( 10, (
"__kmpc_dist_for_static_init: T#%d return\n", gtid ) );
571 template<
typename T >
573 __kmp_team_static_init(
579 typename traits_t< T >::signed_t *p_st,
580 typename traits_t< T >::signed_t incr,
581 typename traits_t< T >::signed_t chunk
588 typedef typename traits_t< T >::unsigned_t UT;
589 typedef typename traits_t< T >::signed_t ST;
599 KMP_DEBUG_ASSERT( p_last && p_lb && p_ub && p_st );
600 KE_TRACE( 10, (
"__kmp_team_static_init called (%d)\n", gtid));
605 buff = __kmp_str_format(
"__kmp_team_static_init enter: T#%%d liter=%%d "\
606 "iter=(%%%s, %%%s, %%%s) chunk %%%s; signed?<%s>\n",
607 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
608 traits_t< ST >::spec, traits_t< T >::spec );
609 KD_TRACE(100, ( buff, gtid, *p_last, *p_lb, *p_ub, *p_st, chunk ) );
610 __kmp_str_free( &buff );
616 if( __kmp_env_consistency_check ) {
618 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo, loc );
620 if( incr > 0 ? (upper < lower) : (lower < upper) ) {
630 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc );
633 th = __kmp_threads[gtid];
634 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
635 team = th->th.th_team;
637 nteams = th->th.th_teams_size.nteams;
639 team_id = team->t.t_master_tid;
640 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
644 trip_count = upper - lower + 1;
645 }
else if(incr == -1) {
646 trip_count = lower - upper + 1;
648 trip_count = (ST)(upper - lower) / incr + 1;
653 *p_st = span * nteams;
654 *p_lb = lower + (span * team_id);
655 *p_ub = *p_lb + span - incr;
656 if ( p_last != NULL )
657 *p_last = (team_id == ((trip_count - 1)/(UT)chunk) % nteams);
661 *p_ub = i_maxmin< T >::mx;
666 *p_ub = i_maxmin< T >::mn;
674 buff = __kmp_str_format(
"__kmp_team_static_init exit: T#%%d team%%u liter=%%d "\
675 "iter=(%%%s, %%%s, %%%s) chunk %%%s\n",
676 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
677 traits_t< ST >::spec );
678 KD_TRACE(100, ( buff, gtid, team_id, *p_last, *p_lb, *p_ub, *p_st, chunk ) );
679 __kmp_str_free( &buff );
709 kmp_int32 *plower, kmp_int32 *pupper,
710 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
712 __kmp_for_static_init< kmp_int32 >(
713 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
721 kmp_uint32 *plower, kmp_uint32 *pupper,
722 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
724 __kmp_for_static_init< kmp_uint32 >(
725 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
733 kmp_int64 *plower, kmp_int64 *pupper,
734 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
736 __kmp_for_static_init< kmp_int64 >(
737 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
745 kmp_uint64 *plower, kmp_uint64 *pupper,
746 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
748 __kmp_for_static_init< kmp_uint64 >(
749 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
779 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
780 kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD,
781 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
783 __kmp_dist_for_static_init< kmp_int32 >(
784 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
792 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
793 kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD,
794 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
796 __kmp_dist_for_static_init< kmp_uint32 >(
797 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
805 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
806 kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD,
807 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
809 __kmp_dist_for_static_init< kmp_int64 >(
810 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
818 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
819 kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD,
820 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
822 __kmp_dist_for_static_init< kmp_uint64 >(
823 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
856 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
857 kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk )
859 KMP_DEBUG_ASSERT( __kmp_init_serial );
860 __kmp_team_static_init< kmp_int32 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
868 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
869 kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk )
871 KMP_DEBUG_ASSERT( __kmp_init_serial );
872 __kmp_team_static_init< kmp_uint32 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
880 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
881 kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk )
883 KMP_DEBUG_ASSERT( __kmp_init_serial );
884 __kmp_team_static_init< kmp_int64 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
892 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
893 kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk )
895 KMP_DEBUG_ASSERT( __kmp_init_serial );
896 __kmp_team_static_init< kmp_uint64 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
void __kmpc_team_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
void __kmpc_team_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)