21 #include "kmp_error.h" 24 #include "kmp_stats.h" 28 #include "ompt-specific.h" 34 char const *traits_t<int>::spec =
"d";
35 char const *traits_t<unsigned int>::spec =
"u";
36 char const *traits_t<long long>::spec =
"lld";
37 char const *traits_t<unsigned long long>::spec =
"llu";
38 char const *traits_t<long>::spec =
"ld";
43 static void __kmp_for_static_init(
ident_t *loc, kmp_int32 global_tid,
44 kmp_int32 schedtype, kmp_int32 *plastiter,
46 typename traits_t<T>::signed_t *pstride,
47 typename traits_t<T>::signed_t incr,
48 typename traits_t<T>::signed_t chunk
49 #
if OMPT_SUPPORT && OMPT_OPTIONAL
55 KMP_TIME_PARTITIONED_BLOCK(FOR_static_scheduling);
57 typedef typename traits_t<T>::unsigned_t UT;
58 typedef typename traits_t<T>::signed_t ST;
60 kmp_int32 gtid = global_tid;
65 kmp_info_t *th = __kmp_threads[gtid];
67 #if OMPT_SUPPORT && OMPT_OPTIONAL 68 ompt_team_info_t *team_info = NULL;
69 ompt_task_info_t *task_info = NULL;
70 ompt_work_type_t ompt_work_type = ompt_work_loop;
72 static kmp_int8 warn = 0;
74 if (ompt_enabled.ompt_callback_work) {
76 team_info = __ompt_get_teaminfo(0, NULL);
77 task_info = __ompt_get_task_info_object(0);
80 if ((loc->
flags & KMP_IDENT_WORK_LOOP) != 0) {
81 ompt_work_type = ompt_work_loop;
82 }
else if ((loc->
flags & KMP_IDENT_WORK_SECTIONS) != 0) {
83 ompt_work_type = ompt_work_sections;
84 }
else if ((loc->
flags & KMP_IDENT_WORK_DISTRIBUTE) != 0) {
85 ompt_work_type = ompt_work_distribute;
88 KMP_COMPARE_AND_STORE_ACQ8(&warn, (kmp_int8)0, (kmp_int8)1);
90 KMP_WARNING(OmptOutdatedWorkshare);
92 KMP_DEBUG_ASSERT(ompt_work_type);
97 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pstride);
98 KE_TRACE(10, (
"__kmpc_for_static_init called (%d)\n", global_tid));
103 buff = __kmp_str_format(
104 "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s," 105 " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
106 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
107 traits_t<ST>::spec, traits_t<ST>::spec, traits_t<T>::spec);
108 KD_TRACE(100, (buff, global_tid, schedtype, *plastiter, *plower, *pupper,
109 *pstride, incr, chunk));
110 __kmp_str_free(&buff);
114 if (__kmp_env_consistency_check) {
115 __kmp_push_workshare(global_tid, ct_pdo, loc);
117 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
122 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
123 if (plastiter != NULL)
135 buff = __kmp_str_format(
"__kmpc_for_static_init:(ZERO TRIP) liter=%%d " 136 "lower=%%%s upper=%%%s stride = %%%s " 137 "signed?<%s>, loc = %%s\n",
138 traits_t<T>::spec, traits_t<T>::spec,
139 traits_t<ST>::spec, traits_t<T>::spec);
141 (buff, *plastiter, *plower, *pupper, *pstride, loc->
psource));
142 __kmp_str_free(&buff);
145 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
147 #if OMPT_SUPPORT && OMPT_OPTIONAL 148 if (ompt_enabled.ompt_callback_work) {
149 ompt_callbacks.ompt_callback(ompt_callback_work)(
150 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
151 &(task_info->task_data), 0, codeptr);
167 tid = th->th.th_team->t.t_master_tid;
168 team = th->th.th_team->t.t_parent;
172 tid = __kmp_tid_from_gtid(global_tid);
173 team = th->th.th_team;
177 if (team->t.t_serialized) {
179 if (plastiter != NULL)
183 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
189 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d " 190 "lower=%%%s upper=%%%s stride = %%%s\n",
191 traits_t<T>::spec, traits_t<T>::spec,
193 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
194 __kmp_str_free(&buff);
197 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
199 #if OMPT_SUPPORT && OMPT_OPTIONAL 200 if (ompt_enabled.ompt_callback_work) {
201 ompt_callbacks.ompt_callback(ompt_callback_work)(
202 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
203 &(task_info->task_data), *pstride, codeptr);
208 nth = team->t.t_nproc;
210 if (plastiter != NULL)
213 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
218 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d " 219 "lower=%%%s upper=%%%s stride = %%%s\n",
220 traits_t<T>::spec, traits_t<T>::spec,
222 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
223 __kmp_str_free(&buff);
226 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
228 #if OMPT_SUPPORT && OMPT_OPTIONAL 229 if (ompt_enabled.ompt_callback_work) {
230 ompt_callbacks.ompt_callback(ompt_callback_work)(
231 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
232 &(task_info->task_data), *pstride, codeptr);
240 trip_count = *pupper - *plower + 1;
241 }
else if (incr == -1) {
242 trip_count = *plower - *pupper + 1;
243 }
else if (incr > 0) {
245 trip_count = (UT)(*pupper - *plower) / incr + 1;
247 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
250 if (__kmp_env_consistency_check) {
252 if (trip_count == 0 && *pupper != *plower) {
253 __kmp_error_construct(kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo,
262 if (trip_count < nth) {
264 __kmp_static == kmp_sch_static_greedy ||
266 kmp_sch_static_balanced);
267 if (tid < trip_count) {
268 *pupper = *plower = *plower + tid * incr;
270 *plower = *pupper + incr;
272 if (plastiter != NULL)
273 *plastiter = (tid == trip_count - 1);
275 if (__kmp_static == kmp_sch_static_balanced) {
276 UT small_chunk = trip_count / nth;
277 UT extras = trip_count % nth;
278 *plower += incr * (tid * small_chunk + (tid < extras ? tid : extras));
279 *pupper = *plower + small_chunk * incr - (tid < extras ? 0 : incr);
280 if (plastiter != NULL)
281 *plastiter = (tid == nth - 1);
283 T big_chunk_inc_count =
284 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
285 T old_upper = *pupper;
287 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
290 *plower += tid * big_chunk_inc_count;
291 *pupper = *plower + big_chunk_inc_count - incr;
293 if (*pupper < *plower)
294 *pupper = traits_t<T>::max_value;
295 if (plastiter != NULL)
296 *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
297 if (*pupper > old_upper)
300 if (*pupper > *plower)
301 *pupper = traits_t<T>::min_value;
302 if (plastiter != NULL)
303 *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
304 if (*pupper < old_upper)
309 *pstride = trip_count;
312 case kmp_sch_static_chunked: {
318 *pstride = span * nth;
319 *plower = *plower + (span * tid);
320 *pupper = *plower + span - incr;
321 if (plastiter != NULL)
322 *plastiter = (tid == ((trip_count - 1) / (UT)chunk) % nth);
326 case kmp_sch_static_balanced_chunked: {
327 T old_upper = *pupper;
329 UT span = (trip_count + nth - 1) / nth;
332 chunk = (span + chunk - 1) & ~(chunk - 1);
335 *plower = *plower + (span * tid);
336 *pupper = *plower + span - incr;
338 if (*pupper > old_upper)
340 }
else if (*pupper < old_upper)
343 if (plastiter != NULL)
344 *plastiter = (tid == ((trip_count - 1) / (UT)chunk));
349 KMP_ASSERT2(0,
"__kmpc_for_static_init: unknown scheduling type");
355 if (KMP_MASTER_TID(tid) && __itt_metadata_add_ptr &&
356 __kmp_forkjoin_frames_mode == 3 &&
358 th->th.th_teams_microtask == NULL &&
360 team->t.t_active_level == 1) {
361 kmp_uint64 cur_chunk = chunk;
365 cur_chunk = trip_count / nth + ((trip_count % nth) ? 1 : 0);
368 __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
375 buff = __kmp_str_format(
"__kmpc_for_static_init: liter=%%d lower=%%%s " 376 "upper=%%%s stride = %%%s signed?<%s>\n",
377 traits_t<T>::spec, traits_t<T>::spec,
378 traits_t<ST>::spec, traits_t<T>::spec);
379 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
380 __kmp_str_free(&buff);
383 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
385 #if OMPT_SUPPORT && OMPT_OPTIONAL 386 if (ompt_enabled.ompt_callback_work) {
387 ompt_callbacks.ompt_callback(ompt_callback_work)(
388 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
389 &(task_info->task_data), trip_count, codeptr);
396 template <
typename T>
397 static void __kmp_dist_for_static_init(
ident_t *loc, kmp_int32 gtid,
398 kmp_int32 schedule, kmp_int32 *plastiter,
399 T *plower, T *pupper, T *pupperDist,
400 typename traits_t<T>::signed_t *pstride,
401 typename traits_t<T>::signed_t incr,
402 typename traits_t<T>::signed_t chunk) {
404 typedef typename traits_t<T>::unsigned_t UT;
405 typedef typename traits_t<T>::signed_t ST;
414 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pupperDist && pstride);
415 KE_TRACE(10, (
"__kmpc_dist_for_static_init called (%d)\n", gtid));
420 buff = __kmp_str_format(
421 "__kmpc_dist_for_static_init: T#%%d schedLoop=%%d liter=%%d " 422 "iter=(%%%s, %%%s, %%%s) chunk=%%%s signed?<%s>\n",
423 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
424 traits_t<ST>::spec, traits_t<T>::spec);
426 (buff, gtid, schedule, *plastiter, *plower, *pupper, incr, chunk));
427 __kmp_str_free(&buff);
431 if (__kmp_env_consistency_check) {
432 __kmp_push_workshare(gtid, ct_pdo, loc);
434 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
437 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
447 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
450 tid = __kmp_tid_from_gtid(gtid);
451 th = __kmp_threads[gtid];
452 nth = th->th.th_team_nproc;
453 team = th->th.th_team;
455 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
456 nteams = th->th.th_teams_size.nteams;
458 team_id = team->t.t_master_tid;
459 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
463 trip_count = *pupper - *plower + 1;
464 }
else if (incr == -1) {
465 trip_count = *plower - *pupper + 1;
466 }
else if (incr > 0) {
468 trip_count = (UT)(*pupper - *plower) / incr + 1;
470 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
473 *pstride = *pupper - *plower;
474 if (trip_count <= nteams) {
476 __kmp_static == kmp_sch_static_greedy ||
478 kmp_sch_static_balanced);
481 if (team_id < trip_count && tid == 0) {
482 *pupper = *pupperDist = *plower = *plower + team_id * incr;
484 *pupperDist = *pupper;
485 *plower = *pupper + incr;
487 if (plastiter != NULL)
488 *plastiter = (tid == 0 && team_id == trip_count - 1);
491 if (__kmp_static == kmp_sch_static_balanced) {
492 UT chunkD = trip_count / nteams;
493 UT extras = trip_count % nteams;
495 incr * (team_id * chunkD + (team_id < extras ? team_id : extras));
496 *pupperDist = *plower + chunkD * incr - (team_id < extras ? 0 : incr);
497 if (plastiter != NULL)
498 *plastiter = (team_id == nteams - 1);
501 (trip_count / nteams + ((trip_count % nteams) ? 1 : 0)) * incr;
503 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
505 *plower += team_id * chunk_inc_count;
506 *pupperDist = *plower + chunk_inc_count - incr;
509 if (*pupperDist < *plower)
510 *pupperDist = traits_t<T>::max_value;
511 if (plastiter != NULL)
512 *plastiter = *plower <= upper && *pupperDist > upper - incr;
513 if (*pupperDist > upper)
515 if (*plower > *pupperDist) {
516 *pupper = *pupperDist;
520 if (*pupperDist > *plower)
521 *pupperDist = traits_t<T>::min_value;
522 if (plastiter != NULL)
523 *plastiter = *plower >= upper && *pupperDist < upper - incr;
524 if (*pupperDist < upper)
526 if (*plower < *pupperDist) {
527 *pupper = *pupperDist;
535 trip_count = *pupperDist - *plower + 1;
536 }
else if (incr == -1) {
537 trip_count = *plower - *pupperDist + 1;
538 }
else if (incr > 1) {
540 trip_count = (UT)(*pupperDist - *plower) / incr + 1;
542 trip_count = (UT)(*plower - *pupperDist) / (-incr) + 1;
544 KMP_DEBUG_ASSERT(trip_count);
547 if (trip_count <= nth) {
549 __kmp_static == kmp_sch_static_greedy ||
551 kmp_sch_static_balanced);
552 if (tid < trip_count)
553 *pupper = *plower = *plower + tid * incr;
555 *plower = *pupper + incr;
556 if (plastiter != NULL)
557 if (*plastiter != 0 && !(tid == trip_count - 1))
560 if (__kmp_static == kmp_sch_static_balanced) {
561 UT chunkL = trip_count / nth;
562 UT extras = trip_count % nth;
563 *plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
564 *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
565 if (plastiter != NULL)
566 if (*plastiter != 0 && !(tid == nth - 1))
570 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
571 T upper = *pupperDist;
572 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
574 *plower += tid * chunk_inc_count;
575 *pupper = *plower + chunk_inc_count - incr;
577 if (*pupper < *plower)
578 *pupper = traits_t<T>::max_value;
579 if (plastiter != NULL)
580 if (*plastiter != 0 &&
581 !(*plower <= upper && *pupper > upper - incr))
586 if (*pupper > *plower)
587 *pupper = traits_t<T>::min_value;
588 if (plastiter != NULL)
589 if (*plastiter != 0 &&
590 !(*plower >= upper && *pupper < upper - incr))
599 case kmp_sch_static_chunked: {
604 *pstride = span * nth;
605 *plower = *plower + (span * tid);
606 *pupper = *plower + span - incr;
607 if (plastiter != NULL)
608 if (*plastiter != 0 && !(tid == ((trip_count - 1) / (UT)chunk) % nth))
614 "__kmpc_dist_for_static_init: unknown loop scheduling type");
623 buff = __kmp_str_format(
624 "__kmpc_dist_for_static_init: last=%%d lo=%%%s up=%%%s upDist=%%%s " 625 "stride=%%%s signed?<%s>\n",
626 traits_t<T>::spec, traits_t<T>::spec, traits_t<T>::spec,
627 traits_t<ST>::spec, traits_t<T>::spec);
628 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pupperDist, *pstride));
629 __kmp_str_free(&buff);
632 KE_TRACE(10, (
"__kmpc_dist_for_static_init: T#%d return\n", gtid));
636 template <
typename T>
637 static void __kmp_team_static_init(
ident_t *loc, kmp_int32 gtid,
638 kmp_int32 *p_last, T *p_lb, T *p_ub,
639 typename traits_t<T>::signed_t *p_st,
640 typename traits_t<T>::signed_t incr,
641 typename traits_t<T>::signed_t chunk) {
647 typedef typename traits_t<T>::unsigned_t UT;
648 typedef typename traits_t<T>::signed_t ST;
658 KMP_DEBUG_ASSERT(p_last && p_lb && p_ub && p_st);
659 KE_TRACE(10, (
"__kmp_team_static_init called (%d)\n", gtid));
664 buff = __kmp_str_format(
"__kmp_team_static_init enter: T#%%d liter=%%d " 665 "iter=(%%%s, %%%s, %%%s) chunk %%%s; signed?<%s>\n",
666 traits_t<T>::spec, traits_t<T>::spec,
667 traits_t<ST>::spec, traits_t<ST>::spec,
669 KD_TRACE(100, (buff, gtid, *p_last, *p_lb, *p_ub, *p_st, chunk));
670 __kmp_str_free(&buff);
676 if (__kmp_env_consistency_check) {
678 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
681 if (incr > 0 ? (upper < lower) : (lower < upper)) {
691 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
694 th = __kmp_threads[gtid];
695 team = th->th.th_team;
697 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
698 nteams = th->th.th_teams_size.nteams;
700 team_id = team->t.t_master_tid;
701 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
705 trip_count = upper - lower + 1;
706 }
else if (incr == -1) {
707 trip_count = lower - upper + 1;
708 }
else if (incr > 0) {
710 trip_count = (UT)(upper - lower) / incr + 1;
712 trip_count = (UT)(lower - upper) / (-incr) + 1;
717 *p_st = span * nteams;
718 *p_lb = lower + (span * team_id);
719 *p_ub = *p_lb + span - incr;
721 *p_last = (team_id == ((trip_count - 1) / (UT)chunk) % nteams);
725 *p_ub = traits_t<T>::max_value;
730 *p_ub = traits_t<T>::min_value;
739 __kmp_str_format(
"__kmp_team_static_init exit: T#%%d team%%u liter=%%d " 740 "iter=(%%%s, %%%s, %%%s) chunk %%%s\n",
741 traits_t<T>::spec, traits_t<T>::spec,
742 traits_t<ST>::spec, traits_t<ST>::spec);
743 KD_TRACE(100, (buff, gtid, team_id, *p_last, *p_lb, *p_ub, *p_st, chunk));
744 __kmp_str_free(&buff);
773 kmp_int32 *plastiter, kmp_int32 *plower,
774 kmp_int32 *pupper, kmp_int32 *pstride,
775 kmp_int32 incr, kmp_int32 chunk) {
776 __kmp_for_static_init<kmp_int32>(loc, gtid, schedtype, plastiter, plower,
777 pupper, pstride, incr, chunk
778 #if OMPT_SUPPORT && OMPT_OPTIONAL 780 OMPT_GET_RETURN_ADDRESS(0)
789 kmp_int32 schedtype, kmp_int32 *plastiter,
790 kmp_uint32 *plower, kmp_uint32 *pupper,
791 kmp_int32 *pstride, kmp_int32 incr,
793 __kmp_for_static_init<kmp_uint32>(loc, gtid, schedtype, plastiter, plower,
794 pupper, pstride, incr, chunk
795 #if OMPT_SUPPORT && OMPT_OPTIONAL 797 OMPT_GET_RETURN_ADDRESS(0)
806 kmp_int32 *plastiter, kmp_int64 *plower,
807 kmp_int64 *pupper, kmp_int64 *pstride,
808 kmp_int64 incr, kmp_int64 chunk) {
809 __kmp_for_static_init<kmp_int64>(loc, gtid, schedtype, plastiter, plower,
810 pupper, pstride, incr, chunk
811 #if OMPT_SUPPORT && OMPT_OPTIONAL 813 OMPT_GET_RETURN_ADDRESS(0)
822 kmp_int32 schedtype, kmp_int32 *plastiter,
823 kmp_uint64 *plower, kmp_uint64 *pupper,
824 kmp_int64 *pstride, kmp_int64 incr,
826 __kmp_for_static_init<kmp_uint64>(loc, gtid, schedtype, plastiter, plower,
827 pupper, pstride, incr, chunk
828 #if OMPT_SUPPORT && OMPT_OPTIONAL 830 OMPT_GET_RETURN_ADDRESS(0)
861 kmp_int32 schedule, kmp_int32 *plastiter,
862 kmp_int32 *plower, kmp_int32 *pupper,
863 kmp_int32 *pupperD, kmp_int32 *pstride,
864 kmp_int32 incr, kmp_int32 chunk) {
865 __kmp_dist_for_static_init<kmp_int32>(loc, gtid, schedule, plastiter, plower,
866 pupper, pupperD, pstride, incr, chunk);
873 kmp_int32 schedule, kmp_int32 *plastiter,
874 kmp_uint32 *plower, kmp_uint32 *pupper,
875 kmp_uint32 *pupperD, kmp_int32 *pstride,
876 kmp_int32 incr, kmp_int32 chunk) {
877 __kmp_dist_for_static_init<kmp_uint32>(loc, gtid, schedule, plastiter, plower,
878 pupper, pupperD, pstride, incr, chunk);
885 kmp_int32 schedule, kmp_int32 *plastiter,
886 kmp_int64 *plower, kmp_int64 *pupper,
887 kmp_int64 *pupperD, kmp_int64 *pstride,
888 kmp_int64 incr, kmp_int64 chunk) {
889 __kmp_dist_for_static_init<kmp_int64>(loc, gtid, schedule, plastiter, plower,
890 pupper, pupperD, pstride, incr, chunk);
897 kmp_int32 schedule, kmp_int32 *plastiter,
898 kmp_uint64 *plower, kmp_uint64 *pupper,
899 kmp_uint64 *pupperD, kmp_int64 *pstride,
900 kmp_int64 incr, kmp_int64 chunk) {
901 __kmp_dist_for_static_init<kmp_uint64>(loc, gtid, schedule, plastiter, plower,
902 pupper, pupperD, pstride, incr, chunk);
935 kmp_int32 *p_lb, kmp_int32 *p_ub,
936 kmp_int32 *p_st, kmp_int32 incr,
938 KMP_DEBUG_ASSERT(__kmp_init_serial);
939 __kmp_team_static_init<kmp_int32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
947 kmp_uint32 *p_lb, kmp_uint32 *p_ub,
948 kmp_int32 *p_st, kmp_int32 incr,
950 KMP_DEBUG_ASSERT(__kmp_init_serial);
951 __kmp_team_static_init<kmp_uint32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
959 kmp_int64 *p_lb, kmp_int64 *p_ub,
960 kmp_int64 *p_st, kmp_int64 incr,
962 KMP_DEBUG_ASSERT(__kmp_init_serial);
963 __kmp_team_static_init<kmp_int64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
971 kmp_uint64 *p_lb, kmp_uint64 *p_ub,
972 kmp_int64 *p_st, kmp_int64 incr,
974 KMP_DEBUG_ASSERT(__kmp_init_serial);
975 __kmp_team_static_init<kmp_uint64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
void __kmpc_team_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
#define KMP_COUNT_VALUE(name, value)
Adds value to specified timer (name).
void __kmpc_team_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
void __kmpc_team_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)