49 #include "kmp_error.h"
50 #include "kmp_stats.h"
54 template<
typename T >
60 struct i_maxmin< int > {
61 static const int mx = 0x7fffffff;
62 static const int mn = 0x80000000;
65 struct i_maxmin< unsigned int > {
66 static const unsigned int mx = 0xffffffff;
67 static const unsigned int mn = 0x00000000;
70 struct i_maxmin< long long > {
71 static const long long mx = 0x7fffffffffffffffLL;
72 static const long long mn = 0x8000000000000000LL;
75 struct i_maxmin< unsigned long long > {
76 static const unsigned long long mx = 0xffffffffffffffffLL;
77 static const unsigned long long mn = 0x0000000000000000LL;
83 char const * traits_t< int >::spec =
"d";
84 char const * traits_t< unsigned int >::spec =
"u";
85 char const * traits_t< long long >::spec =
"lld";
86 char const * traits_t< unsigned long long >::spec =
"llu";
90 template<
typename T >
92 __kmp_for_static_init(
99 typename traits_t< T >::signed_t *pstride,
100 typename traits_t< T >::signed_t incr,
101 typename traits_t< T >::signed_t chunk
104 typedef typename traits_t< T >::unsigned_t UT;
105 typedef typename traits_t< T >::signed_t ST;
107 register kmp_int32 gtid = global_tid;
108 register kmp_uint32 tid;
109 register kmp_uint32 nth;
110 register UT trip_count;
111 register kmp_team_t *team;
113 KMP_DEBUG_ASSERT( plastiter && plower && pupper && pstride );
114 KE_TRACE( 10, (
"__kmpc_for_static_init called (%d)\n", global_tid));
119 buff = __kmp_str_format(
120 "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s," \
121 " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
122 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
123 traits_t< ST >::spec, traits_t< ST >::spec, traits_t< T >::spec );
124 KD_TRACE(100, ( buff, global_tid, schedtype, *plastiter,
125 *plower, *pupper, *pstride, incr, chunk ) );
126 __kmp_str_free( &buff );
130 if ( __kmp_env_consistency_check ) {
131 __kmp_push_workshare( global_tid, ct_pdo, loc );
133 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo, loc );
137 if ( incr > 0 ? (*pupper < *plower) : (*plower < *pupper) ) {
138 if( plastiter != NULL )
148 buff = __kmp_str_format(
149 "__kmpc_for_static_init:(ZERO TRIP) liter=%%d lower=%%%s upper=%%%s stride = %%%s signed?<%s>, loc = %%s\n",
150 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec, traits_t< T >::spec );
151 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride, loc->
psource ) );
152 __kmp_str_free( &buff );
155 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
163 tid = __kmp_threads[ gtid ]->th.th_team->t.t_master_tid;
164 team = __kmp_threads[ gtid ]->th.th_team->t.t_parent;
168 tid = __kmp_tid_from_gtid( global_tid );
169 team = __kmp_threads[ gtid ]->th.th_team;
173 if ( team -> t.t_serialized ) {
175 if( plastiter != NULL )
178 *pstride = (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
184 buff = __kmp_str_format(
185 "__kmpc_for_static_init: (serial) liter=%%d lower=%%%s upper=%%%s stride = %%%s\n",
186 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec );
187 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
188 __kmp_str_free( &buff );
191 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
194 nth = team->t.t_nproc;
196 if( plastiter != NULL )
198 *pstride = (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
203 buff = __kmp_str_format(
204 "__kmpc_for_static_init: (serial) liter=%%d lower=%%%s upper=%%%s stride = %%%s\n",
205 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec );
206 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
207 __kmp_str_free( &buff );
210 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
216 trip_count = *pupper - *plower + 1;
217 }
else if (incr == -1) {
218 trip_count = *plower - *pupper + 1;
221 trip_count = (*pupper - *plower) / incr + 1;
223 trip_count = (*plower - *pupper) / ( -incr ) + 1;
227 if ( __kmp_env_consistency_check ) {
229 if ( trip_count == 0 && *pupper != *plower ) {
230 __kmp_error_construct( kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo, loc );
235 switch ( schedtype ) {
238 if ( trip_count < nth ) {
240 __kmp_static == kmp_sch_static_greedy || \
241 __kmp_static == kmp_sch_static_balanced
243 if ( tid < trip_count ) {
244 *pupper = *plower = *plower + tid * incr;
246 *plower = *pupper + incr;
248 if( plastiter != NULL )
249 *plastiter = ( tid == trip_count - 1 );
251 if ( __kmp_static == kmp_sch_static_balanced ) {
252 register UT small_chunk = trip_count / nth;
253 register UT extras = trip_count % nth;
254 *plower += incr * ( tid * small_chunk + ( tid < extras ? tid : extras ) );
255 *pupper = *plower + small_chunk * incr - ( tid < extras ? 0 : incr );
256 if( plastiter != NULL )
257 *plastiter = ( tid == nth - 1 );
259 register T big_chunk_inc_count = ( trip_count/nth +
260 ( ( trip_count % nth ) ? 1 : 0) ) * incr;
261 register T old_upper = *pupper;
263 KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
266 *plower += tid * big_chunk_inc_count;
267 *pupper = *plower + big_chunk_inc_count - incr;
269 if( *pupper < *plower )
270 *pupper = i_maxmin< T >::mx;
271 if( plastiter != NULL )
272 *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
273 if ( *pupper > old_upper ) *pupper = old_upper;
275 if( *pupper > *plower )
276 *pupper = i_maxmin< T >::mn;
277 if( plastiter != NULL )
278 *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
279 if ( *pupper < old_upper ) *pupper = old_upper;
285 case kmp_sch_static_chunked:
292 *pstride = span * nth;
293 *plower = *plower + (span * tid);
294 *pupper = *plower + span - incr;
295 if( plastiter != NULL )
296 *plastiter = (tid == ((trip_count - 1)/( UT )chunk) % nth);
300 KMP_ASSERT2( 0,
"__kmpc_for_static_init: unknown scheduling type" );
306 if ( KMP_MASTER_TID(tid) && __itt_metadata_add_ptr && __kmp_forkjoin_frames_mode == 3 ) {
307 kmp_uint64 cur_chunk = chunk;
310 cur_chunk = trip_count / nth + ( ( trip_count % nth ) ? 1 : 0);
313 __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
320 buff = __kmp_str_format(
321 "__kmpc_for_static_init: liter=%%d lower=%%%s upper=%%%s stride = %%%s signed?<%s>\n",
322 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec, traits_t< T >::spec );
323 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
324 __kmp_str_free( &buff );
327 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
331 template<
typename T >
333 __kmp_dist_for_static_init(
337 kmp_int32 *plastiter,
341 typename traits_t< T >::signed_t *pstride,
342 typename traits_t< T >::signed_t incr,
343 typename traits_t< T >::signed_t chunk
346 typedef typename traits_t< T >::unsigned_t UT;
347 typedef typename traits_t< T >::signed_t ST;
348 register kmp_uint32 tid;
349 register kmp_uint32 nth;
350 register kmp_uint32 team_id;
351 register kmp_uint32 nteams;
352 register UT trip_count;
353 register kmp_team_t *team;
356 KMP_DEBUG_ASSERT( plastiter && plower && pupper && pupperDist && pstride );
357 KE_TRACE( 10, (
"__kmpc_dist_for_static_init called (%d)\n", gtid));
362 buff = __kmp_str_format(
363 "__kmpc_dist_for_static_init: T#%%d schedLoop=%%d liter=%%d "\
364 "iter=(%%%s, %%%s, %%%s) chunk=%%%s signed?<%s>\n",
365 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
366 traits_t< ST >::spec, traits_t< T >::spec );
367 KD_TRACE(100, ( buff, gtid, schedule, *plastiter,
368 *plower, *pupper, incr, chunk ) );
369 __kmp_str_free( &buff );
373 if( __kmp_env_consistency_check ) {
374 __kmp_push_workshare( gtid, ct_pdo, loc );
376 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo, loc );
378 if( incr > 0 ? (*pupper < *plower) : (*plower < *pupper) ) {
388 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc );
391 tid = __kmp_tid_from_gtid( gtid );
392 th = __kmp_threads[gtid];
393 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
394 nth = th->th.th_team_nproc;
395 team = th->th.th_team;
397 nteams = th->th.th_teams_size.nteams;
399 team_id = team->t.t_master_tid;
400 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
404 trip_count = *pupper - *plower + 1;
405 }
else if(incr == -1) {
406 trip_count = *plower - *pupper + 1;
408 trip_count = (ST)(*pupper - *plower) / incr + 1;
410 *pstride = *pupper - *plower;
411 if( trip_count <= nteams ) {
413 __kmp_static == kmp_sch_static_greedy || \
414 __kmp_static == kmp_sch_static_balanced
417 if( team_id < trip_count && tid == 0 ) {
418 *pupper = *pupperDist = *plower = *plower + team_id * incr;
420 *pupperDist = *pupper;
421 *plower = *pupper + incr;
423 if( plastiter != NULL )
424 *plastiter = ( tid == 0 && team_id == trip_count - 1 );
427 if( __kmp_static == kmp_sch_static_balanced ) {
428 register UT chunkD = trip_count / nteams;
429 register UT extras = trip_count % nteams;
430 *plower += incr * ( team_id * chunkD + ( team_id < extras ? team_id : extras ) );
431 *pupperDist = *plower + chunkD * incr - ( team_id < extras ? 0 : incr );
432 if( plastiter != NULL )
433 *plastiter = ( team_id == nteams - 1 );
435 register T chunk_inc_count =
436 ( trip_count / nteams + ( ( trip_count % nteams ) ? 1 : 0) ) * incr;
437 register T upper = *pupper;
438 KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
440 *plower += team_id * chunk_inc_count;
441 *pupperDist = *plower + chunk_inc_count - incr;
444 if( *pupperDist < *plower )
445 *pupperDist = i_maxmin< T >::mx;
446 if( plastiter != NULL )
447 *plastiter = *plower <= upper && *pupperDist > upper - incr;
448 if( *pupperDist > upper )
450 if( *plower > *pupperDist ) {
451 *pupper = *pupperDist;
455 if( *pupperDist > *plower )
456 *pupperDist = i_maxmin< T >::mn;
457 if( plastiter != NULL )
458 *plastiter = *plower >= upper && *pupperDist < upper - incr;
459 if( *pupperDist < upper )
461 if( *plower < *pupperDist ) {
462 *pupper = *pupperDist;
470 trip_count = *pupperDist - *plower + 1;
471 }
else if(incr == -1) {
472 trip_count = *plower - *pupperDist + 1;
474 trip_count = (ST)(*pupperDist - *plower) / incr + 1;
476 KMP_DEBUG_ASSERT( trip_count );
480 if( trip_count <= nth ) {
482 __kmp_static == kmp_sch_static_greedy || \
483 __kmp_static == kmp_sch_static_balanced
485 if( tid < trip_count )
486 *pupper = *plower = *plower + tid * incr;
488 *plower = *pupper + incr;
489 if( plastiter != NULL )
490 if( *plastiter != 0 && !( tid == trip_count - 1 ) )
493 if( __kmp_static == kmp_sch_static_balanced ) {
494 register UT chunkL = trip_count / nth;
495 register UT extras = trip_count % nth;
496 *plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
497 *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
498 if( plastiter != NULL )
499 if( *plastiter != 0 && !( tid == nth - 1 ) )
502 register T chunk_inc_count =
503 ( trip_count / nth + ( ( trip_count % nth ) ? 1 : 0) ) * incr;
504 register T upper = *pupperDist;
505 KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
507 *plower += tid * chunk_inc_count;
508 *pupper = *plower + chunk_inc_count - incr;
510 if( *pupper < *plower )
511 *pupper = i_maxmin< T >::mx;
512 if( plastiter != NULL )
513 if( *plastiter != 0 && !(*plower <= upper && *pupper > upper - incr) )
515 if( *pupper > upper )
518 if( *pupper > *plower )
519 *pupper = i_maxmin< T >::mn;
520 if( plastiter != NULL )
521 if( *plastiter != 0 && !(*plower >= upper && *pupper < upper - incr) )
523 if( *pupper < upper )
530 case kmp_sch_static_chunked:
536 *pstride = span * nth;
537 *plower = *plower + (span * tid);
538 *pupper = *plower + span - incr;
539 if( plastiter != NULL )
540 if( *plastiter != 0 && !(tid == ((trip_count - 1) / ( UT )chunk) % nth) )
545 KMP_ASSERT2( 0,
"__kmpc_dist_for_static_init: unknown loop scheduling type" );
554 buff = __kmp_str_format(
555 "__kmpc_dist_for_static_init: last=%%d lo=%%%s up=%%%s upDist=%%%s "\
556 "stride=%%%s signed?<%s>\n",
557 traits_t< T >::spec, traits_t< T >::spec, traits_t< T >::spec,
558 traits_t< ST >::spec, traits_t< T >::spec );
559 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pupperDist, *pstride ) );
560 __kmp_str_free( &buff );
563 KE_TRACE( 10, (
"__kmpc_dist_for_static_init: T#%d return\n", gtid ) );
567 template<
typename T >
569 __kmp_team_static_init(
575 typename traits_t< T >::signed_t *p_st,
576 typename traits_t< T >::signed_t incr,
577 typename traits_t< T >::signed_t chunk
584 typedef typename traits_t< T >::unsigned_t UT;
585 typedef typename traits_t< T >::signed_t ST;
595 KMP_DEBUG_ASSERT( p_last && p_lb && p_ub && p_st );
596 KE_TRACE( 10, (
"__kmp_team_static_init called (%d)\n", gtid));
601 buff = __kmp_str_format(
"__kmp_team_static_init enter: T#%%d liter=%%d "\
602 "iter=(%%%s, %%%s, %%%s) chunk %%%s; signed?<%s>\n",
603 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
604 traits_t< ST >::spec, traits_t< T >::spec );
605 KD_TRACE(100, ( buff, gtid, *p_last, *p_lb, *p_ub, *p_st, chunk ) );
606 __kmp_str_free( &buff );
612 if( __kmp_env_consistency_check ) {
614 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo, loc );
616 if( incr > 0 ? (upper < lower) : (lower < upper) ) {
626 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc );
629 th = __kmp_threads[gtid];
630 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
631 team = th->th.th_team;
633 nteams = th->th.th_teams_size.nteams;
635 team_id = team->t.t_master_tid;
636 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
640 trip_count = upper - lower + 1;
641 }
else if(incr == -1) {
642 trip_count = lower - upper + 1;
644 trip_count = (ST)(upper - lower) / incr + 1;
649 *p_st = span * nteams;
650 *p_lb = lower + (span * team_id);
651 *p_ub = *p_lb + span - incr;
652 if ( p_last != NULL )
653 *p_last = (team_id == ((trip_count - 1)/(UT)chunk) % nteams);
657 *p_ub = i_maxmin< T >::mx;
662 *p_ub = i_maxmin< T >::mn;
670 buff = __kmp_str_format(
"__kmp_team_static_init exit: T#%%d team%%u liter=%%d "\
671 "iter=(%%%s, %%%s, %%%s) chunk %%%s\n",
672 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
673 traits_t< ST >::spec );
674 KD_TRACE(100, ( buff, gtid, team_id, *p_last, *p_lb, *p_ub, *p_st, chunk ) );
675 __kmp_str_free( &buff );
705 kmp_int32 *plower, kmp_int32 *pupper,
706 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
708 __kmp_for_static_init< kmp_int32 >(
709 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
717 kmp_uint32 *plower, kmp_uint32 *pupper,
718 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
720 __kmp_for_static_init< kmp_uint32 >(
721 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
729 kmp_int64 *plower, kmp_int64 *pupper,
730 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
732 __kmp_for_static_init< kmp_int64 >(
733 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
741 kmp_uint64 *plower, kmp_uint64 *pupper,
742 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
744 __kmp_for_static_init< kmp_uint64 >(
745 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
778 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
779 kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD,
780 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
782 __kmp_dist_for_static_init< kmp_int32 >(
783 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
791 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
792 kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD,
793 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
795 __kmp_dist_for_static_init< kmp_uint32 >(
796 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
804 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
805 kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD,
806 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
808 __kmp_dist_for_static_init< kmp_int64 >(
809 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
817 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
818 kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD,
819 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
821 __kmp_dist_for_static_init< kmp_uint64 >(
822 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
855 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
856 kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk )
858 KMP_DEBUG_ASSERT( __kmp_init_serial );
859 __kmp_team_static_init< kmp_int32 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
867 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
868 kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk )
870 KMP_DEBUG_ASSERT( __kmp_init_serial );
871 __kmp_team_static_init< kmp_uint32 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
879 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
880 kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk )
882 KMP_DEBUG_ASSERT( __kmp_init_serial );
883 __kmp_team_static_init< kmp_int64 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
891 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
892 kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk )
894 KMP_DEBUG_ASSERT( __kmp_init_serial );
895 __kmp_team_static_init< kmp_uint64 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
void __kmpc_team_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
void __kmpc_team_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)