23 #include "kmp_error.h" 26 #include "kmp_stats.h" 30 #include "ompt-specific.h" 36 char const *traits_t<int>::spec =
"d";
37 char const *traits_t<unsigned int>::spec =
"u";
38 char const *traits_t<long long>::spec =
"lld";
39 char const *traits_t<unsigned long long>::spec =
"llu";
44 static void __kmp_for_static_init(
ident_t *loc, kmp_int32 global_tid,
45 kmp_int32 schedtype, kmp_int32 *plastiter,
47 typename traits_t<T>::signed_t *pstride,
48 typename traits_t<T>::signed_t incr,
49 typename traits_t<T>::signed_t chunk) {
51 KMP_TIME_PARTITIONED_BLOCK(FOR_static_scheduling);
53 typedef typename traits_t<T>::unsigned_t UT;
54 typedef typename traits_t<T>::signed_t ST;
56 kmp_int32 gtid = global_tid;
61 kmp_info_t *th = __kmp_threads[gtid];
63 #if OMPT_SUPPORT && OMPT_TRACE 64 ompt_team_info_t *team_info = NULL;
65 ompt_task_info_t *task_info = NULL;
69 team_info = __ompt_get_teaminfo(0, NULL);
70 task_info = __ompt_get_taskinfo(0);
74 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pstride);
75 KE_TRACE(10, (
"__kmpc_for_static_init called (%d)\n", global_tid));
80 buff = __kmp_str_format(
81 "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s," 82 " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
83 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
84 traits_t<ST>::spec, traits_t<ST>::spec, traits_t<T>::spec);
85 KD_TRACE(100, (buff, global_tid, schedtype, *plastiter, *plower, *pupper,
86 *pstride, incr, chunk));
87 __kmp_str_free(&buff);
91 if (__kmp_env_consistency_check) {
92 __kmp_push_workshare(global_tid, ct_pdo, loc);
94 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
99 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
100 if (plastiter != NULL)
112 buff = __kmp_str_format(
"__kmpc_for_static_init:(ZERO TRIP) liter=%%d " 113 "lower=%%%s upper=%%%s stride = %%%s " 114 "signed?<%s>, loc = %%s\n",
115 traits_t<T>::spec, traits_t<T>::spec,
116 traits_t<ST>::spec, traits_t<T>::spec);
118 (buff, *plastiter, *plower, *pupper, *pstride, loc->
psource));
119 __kmp_str_free(&buff);
122 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
124 #if OMPT_SUPPORT && OMPT_TRACE 125 if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
126 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
127 team_info->parallel_id, task_info->task_id, team_info->microtask);
143 tid = th->th.th_team->t.t_master_tid;
144 team = th->th.th_team->t.t_parent;
148 tid = __kmp_tid_from_gtid(global_tid);
149 team = th->th.th_team;
153 if (team->t.t_serialized) {
155 if (plastiter != NULL)
159 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
165 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d " 166 "lower=%%%s upper=%%%s stride = %%%s\n",
167 traits_t<T>::spec, traits_t<T>::spec,
169 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
170 __kmp_str_free(&buff);
173 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
175 #if OMPT_SUPPORT && OMPT_TRACE 176 if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
177 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
178 team_info->parallel_id, task_info->task_id, team_info->microtask);
183 nth = team->t.t_nproc;
185 if (plastiter != NULL)
188 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
193 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d " 194 "lower=%%%s upper=%%%s stride = %%%s\n",
195 traits_t<T>::spec, traits_t<T>::spec,
197 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
198 __kmp_str_free(&buff);
201 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
203 #if OMPT_SUPPORT && OMPT_TRACE 204 if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
205 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
206 team_info->parallel_id, task_info->task_id, team_info->microtask);
214 trip_count = *pupper - *plower + 1;
215 }
else if (incr == -1) {
216 trip_count = *plower - *pupper + 1;
217 }
else if (incr > 0) {
219 trip_count = (UT)(*pupper - *plower) / incr + 1;
221 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
224 if (__kmp_env_consistency_check) {
226 if (trip_count == 0 && *pupper != *plower) {
227 __kmp_error_construct(kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo,
236 if (trip_count < nth) {
238 __kmp_static == kmp_sch_static_greedy ||
240 kmp_sch_static_balanced);
241 if (tid < trip_count) {
242 *pupper = *plower = *plower + tid * incr;
244 *plower = *pupper + incr;
246 if (plastiter != NULL)
247 *plastiter = (tid == trip_count - 1);
249 if (__kmp_static == kmp_sch_static_balanced) {
250 UT small_chunk = trip_count / nth;
251 UT extras = trip_count % nth;
252 *plower += incr * (tid * small_chunk + (tid < extras ? tid : extras));
253 *pupper = *plower + small_chunk * incr - (tid < extras ? 0 : incr);
254 if (plastiter != NULL)
255 *plastiter = (tid == nth - 1);
257 T big_chunk_inc_count =
258 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
259 T old_upper = *pupper;
261 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
264 *plower += tid * big_chunk_inc_count;
265 *pupper = *plower + big_chunk_inc_count - incr;
267 if (*pupper < *plower)
268 *pupper = traits_t<T>::max_value;
269 if (plastiter != NULL)
270 *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
271 if (*pupper > old_upper)
274 if (*pupper > *plower)
275 *pupper = traits_t<T>::min_value;
276 if (plastiter != NULL)
277 *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
278 if (*pupper < old_upper)
283 *pstride = trip_count;
286 case kmp_sch_static_chunked: {
292 *pstride = span * nth;
293 *plower = *plower + (span * tid);
294 *pupper = *plower + span - incr;
295 if (plastiter != NULL)
296 *plastiter = (tid == ((trip_count - 1) / (UT)chunk) % nth);
300 case kmp_sch_static_balanced_chunked: {
301 T old_upper = *pupper;
303 UT span = (trip_count + nth - 1) / nth;
306 chunk = (span + chunk - 1) & ~(chunk - 1);
309 *plower = *plower + (span * tid);
310 *pupper = *plower + span - incr;
312 if (*pupper > old_upper)
314 }
else if (*pupper < old_upper)
317 if (plastiter != NULL)
318 *plastiter = (tid == ((trip_count - 1) / (UT)chunk));
323 KMP_ASSERT2(0,
"__kmpc_for_static_init: unknown scheduling type");
329 if (KMP_MASTER_TID(tid) && __itt_metadata_add_ptr &&
330 __kmp_forkjoin_frames_mode == 3 &&
332 th->th.th_teams_microtask == NULL &&
334 team->t.t_active_level == 1) {
335 kmp_uint64 cur_chunk = chunk;
339 cur_chunk = trip_count / nth + ((trip_count % nth) ? 1 : 0);
342 __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
349 buff = __kmp_str_format(
"__kmpc_for_static_init: liter=%%d lower=%%%s " 350 "upper=%%%s stride = %%%s signed?<%s>\n",
351 traits_t<T>::spec, traits_t<T>::spec,
352 traits_t<ST>::spec, traits_t<T>::spec);
353 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
354 __kmp_str_free(&buff);
357 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
359 #if OMPT_SUPPORT && OMPT_TRACE 360 if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
361 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
362 team_info->parallel_id, task_info->task_id, team_info->microtask);
369 template <
typename T>
370 static void __kmp_dist_for_static_init(
ident_t *loc, kmp_int32 gtid,
371 kmp_int32 schedule, kmp_int32 *plastiter,
372 T *plower, T *pupper, T *pupperDist,
373 typename traits_t<T>::signed_t *pstride,
374 typename traits_t<T>::signed_t incr,
375 typename traits_t<T>::signed_t chunk) {
377 typedef typename traits_t<T>::unsigned_t UT;
378 typedef typename traits_t<T>::signed_t ST;
387 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pupperDist && pstride);
388 KE_TRACE(10, (
"__kmpc_dist_for_static_init called (%d)\n", gtid));
393 buff = __kmp_str_format(
394 "__kmpc_dist_for_static_init: T#%%d schedLoop=%%d liter=%%d " 395 "iter=(%%%s, %%%s, %%%s) chunk=%%%s signed?<%s>\n",
396 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
397 traits_t<ST>::spec, traits_t<T>::spec);
399 (buff, gtid, schedule, *plastiter, *plower, *pupper, incr, chunk));
400 __kmp_str_free(&buff);
404 if (__kmp_env_consistency_check) {
405 __kmp_push_workshare(gtid, ct_pdo, loc);
407 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
410 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
420 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
423 tid = __kmp_tid_from_gtid(gtid);
424 th = __kmp_threads[gtid];
425 nth = th->th.th_team_nproc;
426 team = th->th.th_team;
428 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
429 nteams = th->th.th_teams_size.nteams;
431 team_id = team->t.t_master_tid;
432 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
436 trip_count = *pupper - *plower + 1;
437 }
else if (incr == -1) {
438 trip_count = *plower - *pupper + 1;
439 }
else if (incr > 0) {
441 trip_count = (UT)(*pupper - *plower) / incr + 1;
443 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
446 *pstride = *pupper - *plower;
447 if (trip_count <= nteams) {
449 __kmp_static == kmp_sch_static_greedy ||
451 kmp_sch_static_balanced);
454 if (team_id < trip_count && tid == 0) {
455 *pupper = *pupperDist = *plower = *plower + team_id * incr;
457 *pupperDist = *pupper;
458 *plower = *pupper + incr;
460 if (plastiter != NULL)
461 *plastiter = (tid == 0 && team_id == trip_count - 1);
464 if (__kmp_static == kmp_sch_static_balanced) {
465 UT chunkD = trip_count / nteams;
466 UT extras = trip_count % nteams;
468 incr * (team_id * chunkD + (team_id < extras ? team_id : extras));
469 *pupperDist = *plower + chunkD * incr - (team_id < extras ? 0 : incr);
470 if (plastiter != NULL)
471 *plastiter = (team_id == nteams - 1);
474 (trip_count / nteams + ((trip_count % nteams) ? 1 : 0)) * incr;
476 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
478 *plower += team_id * chunk_inc_count;
479 *pupperDist = *plower + chunk_inc_count - incr;
482 if (*pupperDist < *plower)
483 *pupperDist = traits_t<T>::max_value;
484 if (plastiter != NULL)
485 *plastiter = *plower <= upper && *pupperDist > upper - incr;
486 if (*pupperDist > upper)
488 if (*plower > *pupperDist) {
489 *pupper = *pupperDist;
493 if (*pupperDist > *plower)
494 *pupperDist = traits_t<T>::min_value;
495 if (plastiter != NULL)
496 *plastiter = *plower >= upper && *pupperDist < upper - incr;
497 if (*pupperDist < upper)
499 if (*plower < *pupperDist) {
500 *pupper = *pupperDist;
508 trip_count = *pupperDist - *plower + 1;
509 }
else if (incr == -1) {
510 trip_count = *plower - *pupperDist + 1;
511 }
else if (incr > 1) {
513 trip_count = (UT)(*pupperDist - *plower) / incr + 1;
515 trip_count = (UT)(*plower - *pupperDist) / (-incr) + 1;
517 KMP_DEBUG_ASSERT(trip_count);
520 if (trip_count <= nth) {
522 __kmp_static == kmp_sch_static_greedy ||
524 kmp_sch_static_balanced);
525 if (tid < trip_count)
526 *pupper = *plower = *plower + tid * incr;
528 *plower = *pupper + incr;
529 if (plastiter != NULL)
530 if (*plastiter != 0 && !(tid == trip_count - 1))
533 if (__kmp_static == kmp_sch_static_balanced) {
534 UT chunkL = trip_count / nth;
535 UT extras = trip_count % nth;
536 *plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
537 *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
538 if (plastiter != NULL)
539 if (*plastiter != 0 && !(tid == nth - 1))
543 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
544 T upper = *pupperDist;
545 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
547 *plower += tid * chunk_inc_count;
548 *pupper = *plower + chunk_inc_count - incr;
550 if (*pupper < *plower)
551 *pupper = traits_t<T>::max_value;
552 if (plastiter != NULL)
553 if (*plastiter != 0 &&
554 !(*plower <= upper && *pupper > upper - incr))
559 if (*pupper > *plower)
560 *pupper = traits_t<T>::min_value;
561 if (plastiter != NULL)
562 if (*plastiter != 0 &&
563 !(*plower >= upper && *pupper < upper - incr))
572 case kmp_sch_static_chunked: {
577 *pstride = span * nth;
578 *plower = *plower + (span * tid);
579 *pupper = *plower + span - incr;
580 if (plastiter != NULL)
581 if (*plastiter != 0 && !(tid == ((trip_count - 1) / (UT)chunk) % nth))
587 "__kmpc_dist_for_static_init: unknown loop scheduling type");
596 buff = __kmp_str_format(
597 "__kmpc_dist_for_static_init: last=%%d lo=%%%s up=%%%s upDist=%%%s " 598 "stride=%%%s signed?<%s>\n",
599 traits_t<T>::spec, traits_t<T>::spec, traits_t<T>::spec,
600 traits_t<ST>::spec, traits_t<T>::spec);
601 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pupperDist, *pstride));
602 __kmp_str_free(&buff);
605 KE_TRACE(10, (
"__kmpc_dist_for_static_init: T#%d return\n", gtid));
609 template <
typename T>
610 static void __kmp_team_static_init(
ident_t *loc, kmp_int32 gtid,
611 kmp_int32 *p_last, T *p_lb, T *p_ub,
612 typename traits_t<T>::signed_t *p_st,
613 typename traits_t<T>::signed_t incr,
614 typename traits_t<T>::signed_t chunk) {
620 typedef typename traits_t<T>::unsigned_t UT;
621 typedef typename traits_t<T>::signed_t ST;
631 KMP_DEBUG_ASSERT(p_last && p_lb && p_ub && p_st);
632 KE_TRACE(10, (
"__kmp_team_static_init called (%d)\n", gtid));
637 buff = __kmp_str_format(
"__kmp_team_static_init enter: T#%%d liter=%%d " 638 "iter=(%%%s, %%%s, %%%s) chunk %%%s; signed?<%s>\n",
639 traits_t<T>::spec, traits_t<T>::spec,
640 traits_t<ST>::spec, traits_t<ST>::spec,
642 KD_TRACE(100, (buff, gtid, *p_last, *p_lb, *p_ub, *p_st, chunk));
643 __kmp_str_free(&buff);
649 if (__kmp_env_consistency_check) {
651 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
654 if (incr > 0 ? (upper < lower) : (lower < upper)) {
664 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
667 th = __kmp_threads[gtid];
668 team = th->th.th_team;
670 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
671 nteams = th->th.th_teams_size.nteams;
673 team_id = team->t.t_master_tid;
674 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
678 trip_count = upper - lower + 1;
679 }
else if (incr == -1) {
680 trip_count = lower - upper + 1;
681 }
else if (incr > 0) {
683 trip_count = (UT)(upper - lower) / incr + 1;
685 trip_count = (UT)(lower - upper) / (-incr) + 1;
690 *p_st = span * nteams;
691 *p_lb = lower + (span * team_id);
692 *p_ub = *p_lb + span - incr;
694 *p_last = (team_id == ((trip_count - 1) / (UT)chunk) % nteams);
698 *p_ub = traits_t<T>::max_value;
703 *p_ub = traits_t<T>::min_value;
712 __kmp_str_format(
"__kmp_team_static_init exit: T#%%d team%%u liter=%%d " 713 "iter=(%%%s, %%%s, %%%s) chunk %%%s\n",
714 traits_t<T>::spec, traits_t<T>::spec,
715 traits_t<ST>::spec, traits_t<ST>::spec);
716 KD_TRACE(100, (buff, gtid, team_id, *p_last, *p_lb, *p_ub, *p_st, chunk));
717 __kmp_str_free(&buff);
746 kmp_int32 *plastiter, kmp_int32 *plower,
747 kmp_int32 *pupper, kmp_int32 *pstride,
748 kmp_int32 incr, kmp_int32 chunk) {
749 __kmp_for_static_init<kmp_int32>(loc, gtid, schedtype, plastiter, plower,
750 pupper, pstride, incr, chunk);
757 kmp_int32 schedtype, kmp_int32 *plastiter,
758 kmp_uint32 *plower, kmp_uint32 *pupper,
759 kmp_int32 *pstride, kmp_int32 incr,
761 __kmp_for_static_init<kmp_uint32>(loc, gtid, schedtype, plastiter, plower,
762 pupper, pstride, incr, chunk);
769 kmp_int32 *plastiter, kmp_int64 *plower,
770 kmp_int64 *pupper, kmp_int64 *pstride,
771 kmp_int64 incr, kmp_int64 chunk) {
772 __kmp_for_static_init<kmp_int64>(loc, gtid, schedtype, plastiter, plower,
773 pupper, pstride, incr, chunk);
780 kmp_int32 schedtype, kmp_int32 *plastiter,
781 kmp_uint64 *plower, kmp_uint64 *pupper,
782 kmp_int64 *pstride, kmp_int64 incr,
784 __kmp_for_static_init<kmp_uint64>(loc, gtid, schedtype, plastiter, plower,
785 pupper, pstride, incr, chunk);
814 kmp_int32 schedule, kmp_int32 *plastiter,
815 kmp_int32 *plower, kmp_int32 *pupper,
816 kmp_int32 *pupperD, kmp_int32 *pstride,
817 kmp_int32 incr, kmp_int32 chunk) {
818 __kmp_dist_for_static_init<kmp_int32>(loc, gtid, schedule, plastiter, plower,
819 pupper, pupperD, pstride, incr, chunk);
826 kmp_int32 schedule, kmp_int32 *plastiter,
827 kmp_uint32 *plower, kmp_uint32 *pupper,
828 kmp_uint32 *pupperD, kmp_int32 *pstride,
829 kmp_int32 incr, kmp_int32 chunk) {
830 __kmp_dist_for_static_init<kmp_uint32>(loc, gtid, schedule, plastiter, plower,
831 pupper, pupperD, pstride, incr, chunk);
838 kmp_int32 schedule, kmp_int32 *plastiter,
839 kmp_int64 *plower, kmp_int64 *pupper,
840 kmp_int64 *pupperD, kmp_int64 *pstride,
841 kmp_int64 incr, kmp_int64 chunk) {
842 __kmp_dist_for_static_init<kmp_int64>(loc, gtid, schedule, plastiter, plower,
843 pupper, pupperD, pstride, incr, chunk);
850 kmp_int32 schedule, kmp_int32 *plastiter,
851 kmp_uint64 *plower, kmp_uint64 *pupper,
852 kmp_uint64 *pupperD, kmp_int64 *pstride,
853 kmp_int64 incr, kmp_int64 chunk) {
854 __kmp_dist_for_static_init<kmp_uint64>(loc, gtid, schedule, plastiter, plower,
855 pupper, pupperD, pstride, incr, chunk);
888 kmp_int32 *p_lb, kmp_int32 *p_ub,
889 kmp_int32 *p_st, kmp_int32 incr,
891 KMP_DEBUG_ASSERT(__kmp_init_serial);
892 __kmp_team_static_init<kmp_int32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
900 kmp_uint32 *p_lb, kmp_uint32 *p_ub,
901 kmp_int32 *p_st, kmp_int32 incr,
903 KMP_DEBUG_ASSERT(__kmp_init_serial);
904 __kmp_team_static_init<kmp_uint32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
912 kmp_int64 *p_lb, kmp_int64 *p_ub,
913 kmp_int64 *p_st, kmp_int64 incr,
915 KMP_DEBUG_ASSERT(__kmp_init_serial);
916 __kmp_team_static_init<kmp_int64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
924 kmp_uint64 *p_lb, kmp_uint64 *p_ub,
925 kmp_int64 *p_st, kmp_int64 incr,
927 KMP_DEBUG_ASSERT(__kmp_init_serial);
928 __kmp_team_static_init<kmp_uint64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
void __kmpc_team_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
#define KMP_COUNT_VALUE(name, value)
Adds value to specified timer (name).
void __kmpc_team_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
void __kmpc_team_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)