40 #include "kmp_error.h"
47 static char const * cons_text_fort[] = {
51 "ORDERED work-sharing",
66 static char const * cons_text_c[] = {
70 "\"ordered\" work-sharing",
85 #define get_src( ident ) ( (ident) == NULL ? NULL : (ident)->psource )
87 #define PUSH_MSG( ct, ident ) \
88 "\tpushing on stack: %s (%s)\n", cons_text_c[ (ct) ], get_src( (ident) )
89 #define POP_MSG( p ) \
90 "\tpopping off stack: %s (%s)\n", \
91 cons_text_c[ (p)->stack_data[ tos ].type ], \
92 get_src( (p)->stack_data[ tos ].ident )
94 static int const cons_text_fort_num =
sizeof( cons_text_fort ) /
sizeof(
char const * );
95 static int const cons_text_c_num =
sizeof( cons_text_c ) /
sizeof(
char const * );
102 __kmp_check_null_func(
void )
108 __kmp_expand_cons_stack(
int gtid,
struct cons_header *p )
115 __kmp_check_null_func();
117 KE_TRACE( 10, (
"expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid() ) );
121 p->stack_size = (p->stack_size * 2) + 100;
124 p->stack_data = (
struct cons_data *) __kmp_allocate(
sizeof(
struct cons_data ) * (p->stack_size+1) );
126 for (i = p->stack_top; i >= 0; --i)
127 p->stack_data[i] = d[i];
138 char const * cons = NULL;
142 kmp_str_buf_t buffer;
144 __kmp_str_buf_init( & buffer );
145 if ( 0 < ct && ct <= cons_text_c_num ) {;
146 cons = cons_text_c[ ct ];
148 KMP_DEBUG_ASSERT( 0 );
150 if ( ident != NULL && ident->
psource != NULL ) {
152 __kmp_str_buf_print( & buffer,
"%s", ident->
psource );
155 __kmp_str_split( tail,
';', NULL, & tail );
156 __kmp_str_split( tail,
';', & file, & tail );
157 __kmp_str_split( tail,
';', & func, & tail );
158 __kmp_str_split( tail,
';', & line, & tail );
160 prgm = __kmp_msg_format( kmp_i18n_fmt_Pragma, cons, file, func, line );
161 __kmp_str_buf_free( & buffer );
171 __kmp_error_construct(
176 char const * construct = __kmp_pragma( ct, ident );
177 __kmp_msg( kmp_ms_fatal, __kmp_msg_format(
id, construct ), __kmp_msg_null );
178 KMP_INTERNAL_FREE( (
void *) construct );
182 __kmp_error_construct2(
186 struct cons_data
const * cons
188 char const * construct1 = __kmp_pragma( ct, ident );
189 char const * construct2 = __kmp_pragma( cons->type, cons->ident );
190 __kmp_msg( kmp_ms_fatal, __kmp_msg_format(
id, construct1, construct2 ), __kmp_msg_null );
191 KMP_INTERNAL_FREE( (
void *) construct1 );
192 KMP_INTERNAL_FREE( (
void *) construct2 );
197 __kmp_allocate_cons_stack(
int gtid )
199 struct cons_header *p;
203 __kmp_check_null_func();
205 KE_TRACE( 10, (
"allocate cons_stack (%d)\n", gtid ) );
206 p = (
struct cons_header *) __kmp_allocate(
sizeof(
struct cons_header ) );
207 p->p_top = p->w_top = p->s_top = 0;
208 p->stack_data = (
struct cons_data *) __kmp_allocate(
sizeof(
struct cons_data ) * (MIN_STACK+1) );
209 p->stack_size = MIN_STACK;
211 p->stack_data[ 0 ].type = ct_none;
212 p->stack_data[ 0 ].prev = 0;
213 p->stack_data[ 0 ].ident = NULL;
218 __kmp_free_cons_stack(
void * ptr ) {
219 struct cons_header * p = (
struct cons_header *) ptr;
221 if ( p->stack_data != NULL ) {
222 __kmp_free( p->stack_data );
223 p->stack_data = NULL;
231 dump_cons_stack(
int gtid,
struct cons_header * p ) {
233 int tos = p->stack_top;
234 kmp_str_buf_t buffer;
235 __kmp_str_buf_init( & buffer );
236 __kmp_str_buf_print( & buffer,
"+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" );
237 __kmp_str_buf_print( & buffer,
"Begin construct stack with %d items for thread %d\n", tos, gtid );
238 __kmp_str_buf_print( & buffer,
" stack_top=%d { P=%d, W=%d, S=%d }\n", tos, p->p_top, p->w_top, p->s_top );
239 for ( i = tos; i > 0; i-- ) {
240 struct cons_data * c = & ( p->stack_data[ i ] );
241 __kmp_str_buf_print( & buffer,
" stack_data[%2d] = { %s (%s) %d %p }\n", i, cons_text_c[ c->type ], get_src( c->ident ), c->prev, c->name );
243 __kmp_str_buf_print( & buffer,
"End construct stack for thread %d\n", gtid );
244 __kmp_str_buf_print( & buffer,
"+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" );
245 __kmp_debug_printf(
"%s", buffer.str );
246 __kmp_str_buf_free( & buffer );
250 __kmp_push_parallel(
int gtid,
ident_t const * ident )
253 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
255 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons );
256 KE_TRACE( 10, (
"__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) );
257 KE_TRACE( 100, ( PUSH_MSG( ct_parallel, ident ) ) );
258 if ( p->stack_top >= p->stack_size ) {
259 __kmp_expand_cons_stack( gtid, p );
261 tos = ++p->stack_top;
262 p->stack_data[ tos ].type = ct_parallel;
263 p->stack_data[ tos ].prev = p->p_top;
264 p->stack_data[ tos ].ident = ident;
265 p->stack_data[ tos ].name = NULL;
267 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
271 __kmp_check_workshare(
int gtid,
enum cons_type ct,
ident_t const * ident )
273 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
275 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons );
276 KE_TRACE( 10, (
"__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
279 if ( p->stack_top >= p->stack_size ) {
280 __kmp_expand_cons_stack( gtid, p );
282 if ( p->w_top > p->p_top &&
283 !(IS_CONS_TYPE_TASKQ(p->stack_data[ p->w_top ].type) && IS_CONS_TYPE_TASKQ(ct))) {
285 __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->w_top ] );
287 if ( p->s_top > p->p_top ) {
289 __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->s_top ] );
294 __kmp_push_workshare(
int gtid,
enum cons_type ct,
ident_t const * ident )
297 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
298 KE_TRACE( 10, (
"__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
299 __kmp_check_workshare( gtid, ct, ident );
300 KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
301 tos = ++p->stack_top;
302 p->stack_data[ tos ].type = ct;
303 p->stack_data[ tos ].prev = p->w_top;
304 p->stack_data[ tos ].ident = ident;
305 p->stack_data[ tos ].name = NULL;
307 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
311 __kmp_check_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck )
313 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
315 KE_TRACE( 10, (
"__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid() ) );
317 if (p->stack_top >= p->stack_size)
318 __kmp_expand_cons_stack( gtid, p );
320 if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo || ct == ct_ordered_in_taskq ) {
321 if (p->w_top <= p->p_top) {
323 #ifdef BUILD_PARALLEL_ORDERED
325 KMP_ASSERT( ct == ct_ordered_in_parallel );
327 __kmp_error_construct( kmp_i18n_msg_CnsBoundToWorksharing, ct, ident );
331 if (!IS_CONS_TYPE_ORDERED(p->stack_data[ p->w_top ].type)) {
332 if (p->stack_data[ p->w_top ].type == ct_taskq) {
333 __kmp_error_construct2(
334 kmp_i18n_msg_CnsNotInTaskConstruct,
336 & p->stack_data[ p->w_top ]
339 __kmp_error_construct2(
340 kmp_i18n_msg_CnsNoOrderedClause,
342 & p->stack_data[ p->w_top ]
347 if (p->s_top > p->p_top && p->s_top > p->w_top) {
349 int index = p->s_top;
350 enum cons_type stack_type;
352 stack_type = p->stack_data[ index ].type;
354 if (stack_type == ct_critical ||
355 ( ( stack_type == ct_ordered_in_parallel ||
356 stack_type == ct_ordered_in_pdo ||
357 stack_type == ct_ordered_in_taskq ) &&
358 p->stack_data[ index ].ident != NULL &&
361 __kmp_error_construct2(
362 kmp_i18n_msg_CnsInvalidNesting,
364 & p->stack_data[ index ]
368 }
else if ( ct == ct_critical ) {
369 if ( lck != NULL && __kmp_get_user_lock_owner( lck ) == gtid ) {
370 int index = p->s_top;
371 struct cons_data cons = { NULL, ct_critical, 0, NULL };
373 while ( index != 0 && p->stack_data[ index ].name != lck ) {
374 index = p->stack_data[ index ].prev;
378 cons = p->stack_data[ index ];
381 __kmp_error_construct2( kmp_i18n_msg_CnsNestingSameName, ct, ident, & cons );
383 }
else if ( ct == ct_master || ct == ct_reduce ) {
384 if (p->w_top > p->p_top) {
386 __kmp_error_construct2(
387 kmp_i18n_msg_CnsInvalidNesting,
389 & p->stack_data[ p->w_top ]
392 if (ct == ct_reduce && p->s_top > p->p_top) {
394 __kmp_error_construct2(
395 kmp_i18n_msg_CnsInvalidNesting,
397 & p->stack_data[ p->s_top ]
404 __kmp_push_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck )
407 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
409 KMP_ASSERT( gtid == __kmp_get_gtid() );
410 KE_TRACE( 10, (
"__kmp_push_sync (gtid=%d)\n", gtid ) );
411 __kmp_check_sync( gtid, ct, ident, lck );
412 KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
413 tos = ++ p->stack_top;
414 p->stack_data[ tos ].type = ct;
415 p->stack_data[ tos ].prev = p->s_top;
416 p->stack_data[ tos ].ident = ident;
417 p->stack_data[ tos ].name = lck;
419 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
425 __kmp_pop_parallel(
int gtid,
ident_t const * ident )
428 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
430 KE_TRACE( 10, (
"__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) );
431 if ( tos == 0 || p->p_top == 0 ) {
432 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident );
434 if ( tos != p->p_top || p->stack_data[ tos ].type != ct_parallel ) {
435 __kmp_error_construct2(
436 kmp_i18n_msg_CnsExpectedEnd,
438 & p->stack_data[ tos ]
441 KE_TRACE( 100, ( POP_MSG( p ) ) );
442 p->p_top = p->stack_data[ tos ].prev;
443 p->stack_data[ tos ].type = ct_none;
444 p->stack_data[ tos ].ident = NULL;
445 p->stack_top = tos - 1;
446 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
450 __kmp_pop_workshare(
int gtid,
enum cons_type ct,
ident_t const * ident )
453 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
456 KE_TRACE( 10, (
"__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
457 if ( tos == 0 || p->w_top == 0 ) {
458 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident );
461 if ( tos != p->w_top ||
462 ( p->stack_data[ tos ].type != ct &&
464 ! ( p->stack_data[ tos ].type == ct_pdo_ordered && ct == ct_pdo ) &&
465 ! ( p->stack_data[ tos ].type == ct_task_ordered && ct == ct_task )
468 __kmp_check_null_func();
469 __kmp_error_construct2(
470 kmp_i18n_msg_CnsExpectedEnd,
472 & p->stack_data[ tos ]
475 KE_TRACE( 100, ( POP_MSG( p ) ) );
476 p->w_top = p->stack_data[ tos ].prev;
477 p->stack_data[ tos ].type = ct_none;
478 p->stack_data[ tos ].ident = NULL;
479 p->stack_top = tos - 1;
480 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
481 return p->stack_data[ p->w_top ].type;
485 __kmp_pop_sync(
int gtid,
enum cons_type ct,
ident_t const * ident )
488 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
490 KE_TRACE( 10, (
"__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid() ) );
491 if ( tos == 0 || p->s_top == 0 ) {
492 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident );
494 if ( tos != p->s_top || p->stack_data[ tos ].type != ct ) {
495 __kmp_check_null_func();
496 __kmp_error_construct2(
497 kmp_i18n_msg_CnsExpectedEnd,
499 & p->stack_data[ tos ]
503 __kmp_check_null_func();
505 KE_TRACE( 100, ( POP_MSG( p ) ) );
506 p->s_top = p->stack_data[ tos ].prev;
507 p->stack_data[ tos ].type = ct_none;
508 p->stack_data[ tos ].ident = NULL;
509 p->stack_top = tos - 1;
510 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
516 __kmp_check_barrier(
int gtid,
enum cons_type ct,
ident_t const * ident )
518 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
519 KE_TRACE( 10, (
"__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid, __kmp_get_gtid() ) );
521 __kmp_check_null_func();
523 if ( p->w_top > p->p_top ) {
525 __kmp_error_construct2(
526 kmp_i18n_msg_CnsInvalidNesting,
528 & p->stack_data[ p->w_top ]
531 if (p->s_top > p->p_top) {
533 __kmp_error_construct2(
534 kmp_i18n_msg_CnsInvalidNesting,
536 & p->stack_data[ p->s_top ]