Intel® OpenMP* Runtime Library
 All Classes Functions Variables Typedefs Enumerations Enumerator Groups Pages
kmp_gsupport.c
1 /*
2  * kmp_gsupport.c
3  * $Revision: 42810 $
4  * $Date: 2013-11-07 12:06:33 -0600 (Thu, 07 Nov 2013) $
5  */
6 
7 /* <copyright>
8  Copyright (c) 1997-2013 Intel Corporation. All Rights Reserved.
9 
10  Redistribution and use in source and binary forms, with or without
11  modification, are permitted provided that the following conditions
12  are met:
13 
14  * Redistributions of source code must retain the above copyright
15  notice, this list of conditions and the following disclaimer.
16  * Redistributions in binary form must reproduce the above copyright
17  notice, this list of conditions and the following disclaimer in the
18  documentation and/or other materials provided with the distribution.
19  * Neither the name of Intel Corporation nor the names of its
20  contributors may be used to endorse or promote products derived
21  from this software without specific prior written permission.
22 
23  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
29  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
33  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 
35 </copyright> */
36 
37 #if defined(__x86_64)
38 # define KMP_I8
39 #endif
40 #include "kmp.h"
41 #include "kmp_atomic.h"
42 
43 #ifdef __cplusplus
44  extern "C" {
45 #endif // __cplusplus
46 
47 #define MKLOC(loc,routine) \
48  static ident_t (loc) = {0, KMP_IDENT_KMPC, 0, 0, ";unknown;unknown;0;0;;" };
49 
50 #include "kmp_ftn_os.h"
51 
52 void
53 xexpand(KMP_API_NAME_GOMP_BARRIER)(void)
54 {
55  int gtid = __kmp_entry_gtid();
56  MKLOC(loc, "GOMP_barrier");
57  KA_TRACE(20, ("GOMP_barrier: T#%d\n", gtid));
58  __kmpc_barrier(&loc, gtid);
59 }
60 
61 
62 /* */
63 //
64 // Mutual exclusion
65 //
66 
67 //
68 // The symbol that icc/ifort generates for unnamed for unnamed critical
69 // sections - .gomp_critical_user_ - is defined using .comm in any objects
70 // reference it. We can't reference it directly here in C code, as the
71 // symbol contains a ".".
72 //
73 // The RTL contains an assembly language definition of .gomp_critical_user_
74 // with another symbol __kmp_unnamed_critical_addr initialized with it's
75 // address.
76 //
77 extern kmp_critical_name *__kmp_unnamed_critical_addr;
78 
79 
80 void
81 xexpand(KMP_API_NAME_GOMP_CRITICAL_START)(void)
82 {
83  int gtid = __kmp_entry_gtid();
84  MKLOC(loc, "GOMP_critical_start");
85  KA_TRACE(20, ("GOMP_critical_start: T#%d\n", gtid));
86  __kmpc_critical(&loc, gtid, __kmp_unnamed_critical_addr);
87 }
88 
89 
90 void
91 xexpand(KMP_API_NAME_GOMP_CRITICAL_END)(void)
92 {
93  int gtid = __kmp_get_gtid();
94  MKLOC(loc, "GOMP_critical_end");
95  KA_TRACE(20, ("GOMP_critical_end: T#%d\n", gtid));
96  __kmpc_end_critical(&loc, gtid, __kmp_unnamed_critical_addr);
97 }
98 
99 
100 void
101 xexpand(KMP_API_NAME_GOMP_CRITICAL_NAME_START)(void **pptr)
102 {
103  int gtid = __kmp_entry_gtid();
104  MKLOC(loc, "GOMP_critical_name_start");
105  KA_TRACE(20, ("GOMP_critical_name_start: T#%d\n", gtid));
106  __kmpc_critical(&loc, gtid, (kmp_critical_name *)pptr);
107 }
108 
109 
110 void
111 xexpand(KMP_API_NAME_GOMP_CRITICAL_NAME_END)(void **pptr)
112 {
113  int gtid = __kmp_get_gtid();
114  MKLOC(loc, "GOMP_critical_name_end");
115  KA_TRACE(20, ("GOMP_critical_name_end: T#%d\n", gtid));
116  __kmpc_end_critical(&loc, gtid, (kmp_critical_name *)pptr);
117 }
118 
119 
120 //
121 // The Gnu codegen tries to use locked operations to perform atomic updates
122 // inline. If it can't, then it calls GOMP_atomic_start() before performing
123 // the update and GOMP_atomic_end() afterward, regardless of the data type.
124 //
125 
126 void
127 xexpand(KMP_API_NAME_GOMP_ATOMIC_START)(void)
128 {
129  int gtid = __kmp_entry_gtid();
130  KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
131  __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);
132 }
133 
134 
135 void
136 xexpand(KMP_API_NAME_GOMP_ATOMIC_END)(void)
137 {
138  int gtid = __kmp_get_gtid();
139  KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
140  __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);
141 }
142 
143 
144 int
145 xexpand(KMP_API_NAME_GOMP_SINGLE_START)(void)
146 {
147  int gtid = __kmp_entry_gtid();
148  MKLOC(loc, "GOMP_single_start");
149  KA_TRACE(20, ("GOMP_single_start: T#%d\n", gtid));
150 
151  if (! TCR_4(__kmp_init_parallel))
152  __kmp_parallel_initialize();
153 
154  //
155  // 3rd parameter == FALSE prevents kmp_enter_single from pushing a
156  // workshare when USE_CHECKS is defined. We need to avoid the push,
157  // as there is no corresponding GOMP_single_end() call.
158  //
159  return __kmp_enter_single(gtid, &loc, FALSE);
160 }
161 
162 
163 void *
164 xexpand(KMP_API_NAME_GOMP_SINGLE_COPY_START)(void)
165 {
166  void *retval;
167  int gtid = __kmp_entry_gtid();
168  MKLOC(loc, "GOMP_single_copy_start");
169  KA_TRACE(20, ("GOMP_single_copy_start: T#%d\n", gtid));
170 
171  if (! TCR_4(__kmp_init_parallel))
172  __kmp_parallel_initialize();
173 
174  //
175  // If this is the first thread to enter, return NULL. The generated
176  // code will then call GOMP_single_copy_end() for this thread only,
177  // with the copyprivate data pointer as an argument.
178  //
179  if (__kmp_enter_single(gtid, &loc, FALSE))
180  return NULL;
181 
182  //
183  // Wait for the first thread to set the copyprivate data pointer,
184  // and for all other threads to reach this point.
185  //
186  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
187 
188  //
189  // Retrieve the value of the copyprivate data point, and wait for all
190  // threads to do likewise, then return.
191  //
192  retval = __kmp_team_from_gtid(gtid)->t.t_copypriv_data;
193  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
194  return retval;
195 }
196 
197 
198 void
199 xexpand(KMP_API_NAME_GOMP_SINGLE_COPY_END)(void *data)
200 {
201  int gtid = __kmp_get_gtid();
202  MKLOC(loc, "GOMP_single_copy_end");
203  KA_TRACE(20, ("GOMP_single_copy_end: T#%d\n", gtid));
204 
205  //
206  // Set the copyprivate data pointer fo the team, then hit the barrier
207  // so that the other threads will continue on and read it. Hit another
208  // barrier before continuing, so that the know that the copyprivate
209  // data pointer has been propagated to all threads before trying to
210  // reuse the t_copypriv_data field.
211  //
212  __kmp_team_from_gtid(gtid)->t.t_copypriv_data = data;
213  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
214  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
215 }
216 
217 
218 void
219 xexpand(KMP_API_NAME_GOMP_ORDERED_START)(void)
220 {
221  int gtid = __kmp_entry_gtid();
222  MKLOC(loc, "GOMP_ordered_start");
223  KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
224  __kmpc_ordered(&loc, gtid);
225 }
226 
227 
228 void
229 xexpand(KMP_API_NAME_GOMP_ORDERED_END)(void)
230 {
231  int gtid = __kmp_get_gtid();
232  MKLOC(loc, "GOMP_ordered_end");
233  KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
234  __kmpc_end_ordered(&loc, gtid);
235 }
236 
237 
238 /* */
239 //
240 // Dispatch macro defs
241 //
242 // They come in two flavors: 64-bit unsigned, and either 32-bit signed
243 // (IA-32 architecture) or 64-bit signed (Intel(R) 64).
244 //
245 
246 #if KMP_ARCH_X86 || KMP_ARCH_ARM
247 # define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_4
248 # define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_4
249 # define KMP_DISPATCH_NEXT __kmpc_dispatch_next_4
250 #else
251 # define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_8
252 # define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_8
253 # define KMP_DISPATCH_NEXT __kmpc_dispatch_next_8
254 #endif /* KMP_ARCH_X86 */
255 
256 # define KMP_DISPATCH_INIT_ULL __kmp_aux_dispatch_init_8u
257 # define KMP_DISPATCH_FINI_CHUNK_ULL __kmp_aux_dispatch_fini_chunk_8u
258 # define KMP_DISPATCH_NEXT_ULL __kmpc_dispatch_next_8u
259 
260 
261 /* */
262 //
263 // The parallel contruct
264 //
265 
266 #ifdef KMP_DEBUG
267 static
268 #endif /* KMP_DEBUG */
269 void
270 __kmp_GOMP_microtask_wrapper(int *gtid, int *npr, void (*task)(void *),
271  void *data)
272 {
273  task(data);
274 }
275 
276 
277 #ifdef KMP_DEBUG
278 static
279 #endif /* KMP_DEBUG */
280 void
281 __kmp_GOMP_parallel_microtask_wrapper(int *gtid, int *npr,
282  void (*task)(void *), void *data, unsigned num_threads, ident_t *loc,
283  enum sched_type schedule, long start, long end, long incr, long chunk_size)
284 {
285  //
286  // Intialize the loop worksharing construct.
287  //
288  KMP_DISPATCH_INIT(loc, *gtid, schedule, start, end, incr, chunk_size,
289  schedule != kmp_sch_static);
290 
291  //
292  // Now invoke the microtask.
293  //
294  task(data);
295 }
296 
297 
298 #ifdef KMP_DEBUG
299 static
300 #endif /* KMP_DEBUG */
301 void
302 __kmp_GOMP_fork_call(ident_t *loc, int gtid, microtask_t wrapper, int argc,...)
303 {
304  int rc;
305 
306  va_list ap;
307  va_start(ap, argc);
308 
309  rc = __kmp_fork_call(loc, gtid, FALSE, argc, wrapper, __kmp_invoke_task_func,
310 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM) && KMP_OS_LINUX
311  &ap
312 #else
313  ap
314 #endif
315  );
316 
317  va_end(ap);
318 
319  if (rc) {
320  kmp_info_t *thr = __kmp_threads[gtid];
321  __kmp_run_before_invoked_task(gtid, __kmp_tid_from_gtid(gtid), thr,
322  thr->th.th_team);
323  }
324 }
325 
326 
327 void
328 xexpand(KMP_API_NAME_GOMP_PARALLEL_START)(void (*task)(void *), void *data, unsigned num_threads)
329 {
330  int gtid = __kmp_entry_gtid();
331  MKLOC(loc, "GOMP_parallel_start");
332  KA_TRACE(20, ("GOMP_parallel_start: T#%d\n", gtid));
333 
334  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
335  if (num_threads != 0) {
336  __kmp_push_num_threads(&loc, gtid, num_threads);
337  }
338  __kmp_GOMP_fork_call(&loc, gtid,
339  (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, data);
340  }
341  else {
342  __kmpc_serialized_parallel(&loc, gtid);
343  }
344 }
345 
346 
347 void
348 xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(void)
349 {
350  int gtid = __kmp_get_gtid();
351  MKLOC(loc, "GOMP_parallel_end");
352  KA_TRACE(20, ("GOMP_parallel_end: T#%d\n", gtid));
353 
354  if (! __kmp_threads[gtid]->th.th_team->t.t_serialized) {
355  kmp_info_t *thr = __kmp_threads[gtid];
356  __kmp_run_after_invoked_task(gtid, __kmp_tid_from_gtid(gtid), thr,
357  thr->th.th_team);
358  __kmp_join_call(&loc, gtid);
359  }
360  else {
361  __kmpc_end_serialized_parallel(&loc, gtid);
362  }
363 }
364 
365 
366 /* */
367 //
368 // Loop worksharing constructs
369 //
370 
371 //
372 // The Gnu codegen passes in an exclusive upper bound for the overall range,
373 // but the libguide dispatch code expects an inclusive upper bound, hence the
374 // "end - incr" 5th argument to KMP_DISPATCH_INIT (and the " ub - str" 11th
375 // argument to __kmp_GOMP_fork_call).
376 //
377 // Conversely, KMP_DISPATCH_NEXT returns and inclusive upper bound in *p_ub,
378 // but the Gnu codegen expects an excluside upper bound, so the adjustment
379 // "*p_ub += stride" compenstates for the discrepancy.
380 //
381 // Correction: the gnu codegen always adjusts the upper bound by +-1, not the
382 // stride value. We adjust the dispatch parameters accordingly (by +-1), but
383 // we still adjust p_ub by the actual stride value.
384 //
385 // The "runtime" versions do not take a chunk_sz parameter.
386 //
387 // The profile lib cannot support construct checking of unordered loops that
388 // are predetermined by the compiler to be statically scheduled, as the gcc
389 // codegen will not always emit calls to GOMP_loop_static_next() to get the
390 // next iteration. Instead, it emits inline code to call omp_get_thread_num()
391 // num and calculate the iteration space using the result. It doesn't do this
392 // with ordered static loop, so they can be checked.
393 //
394 
395 #define LOOP_START(func,schedule) \
396  int func (long lb, long ub, long str, long chunk_sz, long *p_lb, \
397  long *p_ub) \
398  { \
399  int status; \
400  long stride; \
401  int gtid = __kmp_entry_gtid(); \
402  MKLOC(loc, #func); \
403  KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
404  gtid, lb, ub, str, chunk_sz )); \
405  \
406  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
407  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
408  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
409  (schedule) != kmp_sch_static); \
410  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
411  (kmp_int *)p_ub, (kmp_int *)&stride); \
412  if (status) { \
413  KMP_DEBUG_ASSERT(stride == str); \
414  *p_ub += (str > 0) ? 1 : -1; \
415  } \
416  } \
417  else { \
418  status = 0; \
419  } \
420  \
421  KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
422  gtid, *p_lb, *p_ub, status)); \
423  return status; \
424  }
425 
426 
427 #define LOOP_RUNTIME_START(func,schedule) \
428  int func (long lb, long ub, long str, long *p_lb, long *p_ub) \
429  { \
430  int status; \
431  long stride; \
432  long chunk_sz = 0; \
433  int gtid = __kmp_entry_gtid(); \
434  MKLOC(loc, #func); \
435  KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \
436  gtid, lb, ub, str, chunk_sz )); \
437  \
438  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
439  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
440  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \
441  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
442  (kmp_int *)p_ub, (kmp_int *)&stride); \
443  if (status) { \
444  KMP_DEBUG_ASSERT(stride == str); \
445  *p_ub += (str > 0) ? 1 : -1; \
446  } \
447  } \
448  else { \
449  status = 0; \
450  } \
451  \
452  KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
453  gtid, *p_lb, *p_ub, status)); \
454  return status; \
455  }
456 
457 
458 #define LOOP_NEXT(func,fini_code) \
459  int func(long *p_lb, long *p_ub) \
460  { \
461  int status; \
462  long stride; \
463  int gtid = __kmp_get_gtid(); \
464  MKLOC(loc, #func); \
465  KA_TRACE(20, ( #func ": T#%d\n", gtid)); \
466  \
467  fini_code \
468  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
469  (kmp_int *)p_ub, (kmp_int *)&stride); \
470  if (status) { \
471  *p_ub += (stride > 0) ? 1 : -1; \
472  } \
473  \
474  KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, stride 0x%lx, " \
475  "returning %d\n", gtid, *p_lb, *p_ub, stride, status)); \
476  return status; \
477  }
478 
479 
480 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_STATIC_START), kmp_sch_static)
481 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT), {})
482 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START), kmp_sch_dynamic_chunked)
483 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT), {})
484 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_GUIDED_START), kmp_sch_guided_chunked)
485 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT), {})
486 LOOP_RUNTIME_START(xexpand(KMP_API_NAME_GOMP_LOOP_RUNTIME_START), kmp_sch_runtime)
487 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT), {})
488 
489 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START), kmp_ord_static)
490 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT), \
491  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
492 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START), kmp_ord_dynamic_chunked)
493 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT), \
494  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
495 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START), kmp_ord_guided_chunked)
496 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT), \
497  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
498 LOOP_RUNTIME_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START), kmp_ord_runtime)
499 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT), \
500  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
501 
502 
503 void
504 xexpand(KMP_API_NAME_GOMP_LOOP_END)(void)
505 {
506  int gtid = __kmp_get_gtid();
507  KA_TRACE(20, ("GOMP_loop_end: T#%d\n", gtid))
508 
509  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
510 
511  KA_TRACE(20, ("GOMP_loop_end exit: T#%d\n", gtid))
512 }
513 
514 
515 void
516 xexpand(KMP_API_NAME_GOMP_LOOP_END_NOWAIT)(void)
517 {
518  KA_TRACE(20, ("GOMP_loop_end_nowait: T#%d\n", __kmp_get_gtid()))
519 }
520 
521 
522 /* */
523 //
524 // Unsigned long long loop worksharing constructs
525 //
526 // These are new with gcc 4.4
527 //
528 
529 #define LOOP_START_ULL(func,schedule) \
530  int func (int up, unsigned long long lb, unsigned long long ub, \
531  unsigned long long str, unsigned long long chunk_sz, \
532  unsigned long long *p_lb, unsigned long long *p_ub) \
533  { \
534  int status; \
535  long long str2 = up ? ((long long)str) : -((long long)str); \
536  long long stride; \
537  int gtid = __kmp_entry_gtid(); \
538  MKLOC(loc, #func); \
539  \
540  KA_TRACE(20, ( #func ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
541  gtid, up, lb, ub, str, chunk_sz )); \
542  \
543  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
544  KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
545  (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \
546  (schedule) != kmp_sch_static); \
547  status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, \
548  (kmp_uint64 *)p_lb, (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
549  if (status) { \
550  KMP_DEBUG_ASSERT(stride == str2); \
551  *p_ub += (str > 0) ? 1 : -1; \
552  } \
553  } \
554  else { \
555  status = 0; \
556  } \
557  \
558  KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
559  gtid, *p_lb, *p_ub, status)); \
560  return status; \
561  }
562 
563 
564 #define LOOP_RUNTIME_START_ULL(func,schedule) \
565  int func (int up, unsigned long long lb, unsigned long long ub, \
566  unsigned long long str, unsigned long long *p_lb, \
567  unsigned long long *p_ub) \
568  { \
569  int status; \
570  long long str2 = up ? ((long long)str) : -((long long)str); \
571  unsigned long long stride; \
572  unsigned long long chunk_sz = 0; \
573  int gtid = __kmp_entry_gtid(); \
574  MKLOC(loc, #func); \
575  \
576  KA_TRACE(20, ( #func ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
577  gtid, up, lb, ub, str, chunk_sz )); \
578  \
579  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
580  KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
581  (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, TRUE); \
582  status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, \
583  (kmp_uint64 *)p_lb, (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
584  if (status) { \
585  KMP_DEBUG_ASSERT(stride == str2); \
586  *p_ub += (str > 0) ? 1 : -1; \
587  } \
588  } \
589  else { \
590  status = 0; \
591  } \
592  \
593  KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
594  gtid, *p_lb, *p_ub, status)); \
595  return status; \
596  }
597 
598 
599 #define LOOP_NEXT_ULL(func,fini_code) \
600  int func(unsigned long long *p_lb, unsigned long long *p_ub) \
601  { \
602  int status; \
603  long long stride; \
604  int gtid = __kmp_get_gtid(); \
605  MKLOC(loc, #func); \
606  KA_TRACE(20, ( #func ": T#%d\n", gtid)); \
607  \
608  fini_code \
609  status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
610  (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
611  if (status) { \
612  *p_ub += (stride > 0) ? 1 : -1; \
613  } \
614  \
615  KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, stride 0x%llx, " \
616  "returning %d\n", gtid, *p_lb, *p_ub, stride, status)); \
617  return status; \
618  }
619 
620 
621 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START), kmp_sch_static)
622 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT), {})
623 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START), kmp_sch_dynamic_chunked)
624 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT), {})
625 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START), kmp_sch_guided_chunked)
626 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT), {})
627 LOOP_RUNTIME_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START), kmp_sch_runtime)
628 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT), {})
629 
630 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START), kmp_ord_static)
631 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT), \
632  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
633 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START), kmp_ord_dynamic_chunked)
634 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT), \
635  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
636 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START), kmp_ord_guided_chunked)
637 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT), \
638  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
639 LOOP_RUNTIME_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START), kmp_ord_runtime)
640 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT), \
641  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
642 
643 
644 /* */
645 //
646 // Combined parallel / loop worksharing constructs
647 //
648 // There are no ull versions (yet).
649 //
650 
651 #define PARALLEL_LOOP_START(func, schedule) \
652  void func (void (*task) (void *), void *data, unsigned num_threads, \
653  long lb, long ub, long str, long chunk_sz) \
654  { \
655  int gtid = __kmp_entry_gtid(); \
656  int last = FALSE; \
657  MKLOC(loc, #func); \
658  KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
659  gtid, lb, ub, str, chunk_sz )); \
660  \
661  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \
662  if (num_threads != 0) { \
663  __kmp_push_num_threads(&loc, gtid, num_threads); \
664  } \
665  __kmp_GOMP_fork_call(&loc, gtid, \
666  (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, \
667  task, data, num_threads, &loc, (schedule), lb, \
668  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \
669  } \
670  else { \
671  __kmpc_serialized_parallel(&loc, gtid); \
672  } \
673  \
674  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
675  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
676  (schedule) != kmp_sch_static); \
677  \
678  KA_TRACE(20, ( #func " exit: T#%d\n", gtid)); \
679  }
680 
681 
682 PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START), kmp_sch_static)
683 PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START), kmp_sch_dynamic_chunked)
684 PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START), kmp_sch_guided_chunked)
685 PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START), kmp_sch_runtime)
686 
687 
688 #if OMP_30_ENABLED
689 
690 
691 /* */
692 //
693 // Tasking constructs
694 //
695 
696 void
697 xexpand(KMP_API_NAME_GOMP_TASK)(void (*func)(void *), void *data, void (*copy_func)(void *, void *),
698  long arg_size, long arg_align, int if_cond, unsigned gomp_flags)
699 {
700  MKLOC(loc, "GOMP_task");
701  int gtid = __kmp_entry_gtid();
702  kmp_int32 flags = 0;
703  kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *) & flags;
704 
705  KA_TRACE(20, ("GOMP_task: T#%d\n", gtid));
706 
707  // The low-order bit is the "tied" flag
708  if (gomp_flags & 1) {
709  input_flags->tiedness = 1;
710  }
711  input_flags->native = 1;
712  // __kmp_task_alloc() sets up all other flags
713 
714  if (! if_cond) {
715  arg_size = 0;
716  }
717 
718  kmp_task_t *task = __kmp_task_alloc(&loc, gtid, input_flags,
719  sizeof(kmp_task_t), arg_size ? arg_size + arg_align - 1 : 0,
720  (kmp_routine_entry_t)func);
721 
722  if (arg_size > 0) {
723  if (arg_align > 0) {
724  task->shareds = (void *)((((size_t)task->shareds)
725  + arg_align - 1) / arg_align * arg_align);
726  }
727  //else error??
728 
729  if (copy_func) {
730  (*copy_func)(task->shareds, data);
731  }
732  else {
733  memcpy(task->shareds, data, arg_size);
734  }
735  }
736 
737  if (if_cond) {
738  __kmpc_omp_task(&loc, gtid, task);
739  }
740  else {
741  __kmpc_omp_task_begin_if0(&loc, gtid, task);
742  func(data);
743  __kmpc_omp_task_complete_if0(&loc, gtid, task);
744  }
745 
746  KA_TRACE(20, ("GOMP_task exit: T#%d\n", gtid));
747 }
748 
749 
750 void
751 xexpand(KMP_API_NAME_GOMP_TASKWAIT)(void)
752 {
753  MKLOC(loc, "GOMP_taskwait");
754  int gtid = __kmp_entry_gtid();
755 
756  KA_TRACE(20, ("GOMP_taskwait: T#%d\n", gtid));
757 
758  __kmpc_omp_taskwait(&loc, gtid);
759 
760  KA_TRACE(20, ("GOMP_taskwait exit: T#%d\n", gtid));
761 }
762 
763 
764 #endif /* OMP_30_ENABLED */
765 
766 
767 /* */
768 //
769 // Sections worksharing constructs
770 //
771 
772 //
773 // For the sections construct, we initialize a dynamically scheduled loop
774 // worksharing construct with lb 1 and stride 1, and use the iteration #'s
775 // that its returns as sections ids.
776 //
777 // There are no special entry points for ordered sections, so we always use
778 // the dynamically scheduled workshare, even if the sections aren't ordered.
779 //
780 
781 unsigned
782 xexpand(KMP_API_NAME_GOMP_SECTIONS_START)(unsigned count)
783 {
784  int status;
785  kmp_int lb, ub, stride;
786  int gtid = __kmp_entry_gtid();
787  MKLOC(loc, "GOMP_sections_start");
788  KA_TRACE(20, ("GOMP_sections_start: T#%d\n", gtid));
789 
790  KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
791 
792  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
793  if (status) {
794  KMP_DEBUG_ASSERT(stride == 1);
795  KMP_DEBUG_ASSERT(lb > 0);
796  KMP_ASSERT(lb == ub);
797  }
798  else {
799  lb = 0;
800  }
801 
802  KA_TRACE(20, ("GOMP_sections_start exit: T#%d returning %u\n", gtid,
803  (unsigned)lb));
804  return (unsigned)lb;
805 }
806 
807 
808 unsigned
809 xexpand(KMP_API_NAME_GOMP_SECTIONS_NEXT)(void)
810 {
811  int status;
812  kmp_int lb, ub, stride;
813  int gtid = __kmp_get_gtid();
814  MKLOC(loc, "GOMP_sections_next");
815  KA_TRACE(20, ("GOMP_sections_next: T#%d\n", gtid));
816 
817  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
818  if (status) {
819  KMP_DEBUG_ASSERT(stride == 1);
820  KMP_DEBUG_ASSERT(lb > 0);
821  KMP_ASSERT(lb == ub);
822  }
823  else {
824  lb = 0;
825  }
826 
827  KA_TRACE(20, ("GOMP_sections_next exit: T#%d returning %u\n", gtid,
828  (unsigned)lb));
829  return (unsigned)lb;
830 }
831 
832 
833 void
834 xexpand(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START)(void (*task) (void *), void *data,
835  unsigned num_threads, unsigned count)
836 {
837  int gtid = __kmp_entry_gtid();
838  int last = FALSE;
839  MKLOC(loc, "GOMP_parallel_sections_start");
840  KA_TRACE(20, ("GOMP_parallel_sections_start: T#%d\n", gtid));
841 
842  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
843  if (num_threads != 0) {
844  __kmp_push_num_threads(&loc, gtid, num_threads);
845  }
846  __kmp_GOMP_fork_call(&loc, gtid,
847  (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, task, data,
848  num_threads, &loc, kmp_nm_dynamic_chunked, (kmp_int)1,
849  (kmp_int)count, (kmp_int)1, (kmp_int)1);
850  }
851  else {
852  __kmpc_serialized_parallel(&loc, gtid);
853  }
854 
855  KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
856 
857  KA_TRACE(20, ("GOMP_parallel_sections_start exit: T#%d\n", gtid));
858 }
859 
860 
861 void
862 xexpand(KMP_API_NAME_GOMP_SECTIONS_END)(void)
863 {
864  int gtid = __kmp_get_gtid();
865  KA_TRACE(20, ("GOMP_sections_end: T#%d\n", gtid))
866 
867  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
868 
869  KA_TRACE(20, ("GOMP_sections_end exit: T#%d\n", gtid))
870 }
871 
872 
873 void
874 xexpand(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT)(void)
875 {
876  KA_TRACE(20, ("GOMP_sections_end_nowait: T#%d\n", __kmp_get_gtid()))
877 }
878 
879 // libgomp has an empty function for GOMP_taskyield as of 2013-10-10
880 void
881 xexpand(KMP_API_NAME_GOMP_TASKYIELD)(void)
882 {
883 
884 }
885 
886 /*
887  The following sections of code create aliases for the GOMP_* functions,
888  then create versioned symbols using the assembler directive .symver.
889  This is only pertinent for ELF .so library
890  xaliasify and xversionify are defined in kmp_ftn_os.h
891 */
892 
893 #if KMP_OS_LINUX
894 
895 // GOMP_1.0 aliases
896 xaliasify(KMP_API_NAME_GOMP_ATOMIC_END, 10);
897 xaliasify(KMP_API_NAME_GOMP_ATOMIC_START, 10);
898 xaliasify(KMP_API_NAME_GOMP_BARRIER, 10);
899 xaliasify(KMP_API_NAME_GOMP_CRITICAL_END, 10);
900 xaliasify(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10);
901 xaliasify(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10);
902 xaliasify(KMP_API_NAME_GOMP_CRITICAL_START, 10);
903 xaliasify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10);
904 xaliasify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10);
905 xaliasify(KMP_API_NAME_GOMP_LOOP_END, 10);
906 xaliasify(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10);
907 xaliasify(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10);
908 xaliasify(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10);
909 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10);
910 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10);
911 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10);
912 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10);
913 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10);
914 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10);
915 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10);
916 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10);
917 xaliasify(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10);
918 xaliasify(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10);
919 xaliasify(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10);
920 xaliasify(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10);
921 xaliasify(KMP_API_NAME_GOMP_ORDERED_END, 10);
922 xaliasify(KMP_API_NAME_GOMP_ORDERED_START, 10);
923 xaliasify(KMP_API_NAME_GOMP_PARALLEL_END, 10);
924 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10);
925 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10);
926 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10);
927 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10);
928 xaliasify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10);
929 xaliasify(KMP_API_NAME_GOMP_PARALLEL_START, 10);
930 xaliasify(KMP_API_NAME_GOMP_SECTIONS_END, 10);
931 xaliasify(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10);
932 xaliasify(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10);
933 xaliasify(KMP_API_NAME_GOMP_SECTIONS_START, 10);
934 xaliasify(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10);
935 xaliasify(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10);
936 xaliasify(KMP_API_NAME_GOMP_SINGLE_START, 10);
937 
938 // GOMP_2.0 aliases
939 #if OMP_30_ENABLED
940 xaliasify(KMP_API_NAME_GOMP_TASK, 20);
941 xaliasify(KMP_API_NAME_GOMP_TASKWAIT, 20);
942 #endif
943 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20);
944 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20);
945 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20);
946 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20);
947 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20);
948 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20);
949 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20);
950 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20);
951 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20);
952 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20);
953 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20);
954 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20);
955 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20);
956 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20);
957 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20);
958 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20);
959 
960 // GOMP_3.0 aliases
961 xaliasify(KMP_API_NAME_GOMP_TASKYIELD, 30);
962 
963 // GOMP_4.0 aliases
964 /* TODO: add GOMP_4.0 aliases when corresponding
965  GOMP_* functions are implemented
966 */
967 
968 // GOMP_1.0 versioned symbols
969 xversionify(KMP_API_NAME_GOMP_ATOMIC_END, 10, "GOMP_1.0");
970 xversionify(KMP_API_NAME_GOMP_ATOMIC_START, 10, "GOMP_1.0");
971 xversionify(KMP_API_NAME_GOMP_BARRIER, 10, "GOMP_1.0");
972 xversionify(KMP_API_NAME_GOMP_CRITICAL_END, 10, "GOMP_1.0");
973 xversionify(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10, "GOMP_1.0");
974 xversionify(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10, "GOMP_1.0");
975 xversionify(KMP_API_NAME_GOMP_CRITICAL_START, 10, "GOMP_1.0");
976 xversionify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10, "GOMP_1.0");
977 xversionify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10, "GOMP_1.0");
978 xversionify(KMP_API_NAME_GOMP_LOOP_END, 10, "GOMP_1.0");
979 xversionify(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10, "GOMP_1.0");
980 xversionify(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10, "GOMP_1.0");
981 xversionify(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10, "GOMP_1.0");
982 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10, "GOMP_1.0");
983 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10, "GOMP_1.0");
984 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10, "GOMP_1.0");
985 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10, "GOMP_1.0");
986 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10, "GOMP_1.0");
987 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10, "GOMP_1.0");
988 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10, "GOMP_1.0");
989 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10, "GOMP_1.0");
990 xversionify(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10, "GOMP_1.0");
991 xversionify(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10, "GOMP_1.0");
992 xversionify(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10, "GOMP_1.0");
993 xversionify(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10, "GOMP_1.0");
994 xversionify(KMP_API_NAME_GOMP_ORDERED_END, 10, "GOMP_1.0");
995 xversionify(KMP_API_NAME_GOMP_ORDERED_START, 10, "GOMP_1.0");
996 xversionify(KMP_API_NAME_GOMP_PARALLEL_END, 10, "GOMP_1.0");
997 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10, "GOMP_1.0");
998 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10, "GOMP_1.0");
999 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10, "GOMP_1.0");
1000 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10, "GOMP_1.0");
1001 xversionify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10, "GOMP_1.0");
1002 xversionify(KMP_API_NAME_GOMP_PARALLEL_START, 10, "GOMP_1.0");
1003 xversionify(KMP_API_NAME_GOMP_SECTIONS_END, 10, "GOMP_1.0");
1004 xversionify(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10, "GOMP_1.0");
1005 xversionify(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10, "GOMP_1.0");
1006 xversionify(KMP_API_NAME_GOMP_SECTIONS_START, 10, "GOMP_1.0");
1007 xversionify(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10, "GOMP_1.0");
1008 xversionify(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10, "GOMP_1.0");
1009 xversionify(KMP_API_NAME_GOMP_SINGLE_START, 10, "GOMP_1.0");
1010 
1011 // GOMP_2.0 versioned symbols
1012 #if OMP_30_ENABLED
1013 xversionify(KMP_API_NAME_GOMP_TASK, 20, "GOMP_2.0");
1014 xversionify(KMP_API_NAME_GOMP_TASKWAIT, 20, "GOMP_2.0");
1015 #endif
1016 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20, "GOMP_2.0");
1017 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20, "GOMP_2.0");
1018 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20, "GOMP_2.0");
1019 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20, "GOMP_2.0");
1020 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20, "GOMP_2.0");
1021 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20, "GOMP_2.0");
1022 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20, "GOMP_2.0");
1023 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20, "GOMP_2.0");
1024 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20, "GOMP_2.0");
1025 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20, "GOMP_2.0");
1026 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20, "GOMP_2.0");
1027 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20, "GOMP_2.0");
1028 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20, "GOMP_2.0");
1029 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20, "GOMP_2.0");
1030 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20, "GOMP_2.0");
1031 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20, "GOMP_2.0");
1032 
1033 // GOMP_3.0 versioned symbols
1034 xversionify(KMP_API_NAME_GOMP_TASKYIELD, 30, "GOMP_3.0");
1035 
1036 // GOMP_4.0 versioned symbols
1037 /* TODO: add GOMP_4.0 versioned symbols when corresponding
1038  GOMP_* functions are implemented
1039 */
1040 
1041 #endif /* KMP_OS_LINUX */
1042 
1043 #ifdef __cplusplus
1044  } //extern "C"
1045 #endif // __cplusplus
1046 
1047 
KMP_EXPORT void __kmpc_end_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_serialized_parallel(ident_t *, kmp_int32 global_tid)
Definition: kmp_csupport.c:671
KMP_EXPORT void __kmpc_ordered(ident_t *, kmp_int32 global_tid)
Definition: kmp_csupport.c:967
KMP_EXPORT void __kmpc_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
Definition: kmp.h:200
KMP_EXPORT kmp_int32 __kmpc_ok_to_fork(ident_t *)
Definition: kmp_csupport.c:176
KMP_EXPORT void __kmpc_barrier(ident_t *, kmp_int32 global_tid)
Definition: kmp_csupport.c:878
KMP_EXPORT void __kmpc_end_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
KMP_EXPORT void __kmpc_serialized_parallel(ident_t *, kmp_int32 global_tid)
Definition: kmp_csupport.c:413
sched_type
Definition: kmp.h:302