Intel® OpenMP* Runtime Library
 All Classes Functions Variables Typedefs Enumerations Enumerator Modules Pages
kmp_error.c
1 /*
2  * kmp_error.c -- KPTS functions for error checking at runtime
3  * $Revision: 42951 $
4  * $Date: 2014-01-21 14:41:41 -0600 (Tue, 21 Jan 2014) $
5  */
6 
7 /* <copyright>
8  Copyright (c) 1997-2014 Intel Corporation. All Rights Reserved.
9 
10  Redistribution and use in source and binary forms, with or without
11  modification, are permitted provided that the following conditions
12  are met:
13 
14  * Redistributions of source code must retain the above copyright
15  notice, this list of conditions and the following disclaimer.
16  * Redistributions in binary form must reproduce the above copyright
17  notice, this list of conditions and the following disclaimer in the
18  documentation and/or other materials provided with the distribution.
19  * Neither the name of Intel Corporation nor the names of its
20  contributors may be used to endorse or promote products derived
21  from this software without specific prior written permission.
22 
23  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
29  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
33  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 
35 </copyright> */
36 
37 #include "kmp.h"
38 #include "kmp_i18n.h"
39 #include "kmp_str.h"
40 #include "kmp_error.h"
41 
42 /* ------------------------------------------------------------------------ */
43 /* ------------------------------------------------------------------------ */
44 
45 #define MIN_STACK 100
46 
47 static char const * cons_text_fort[] = {
48  "(none)",
49  "PARALLEL",
50  "work-sharing", /* this is not called DO because of lowering of SECTIONS and WORKSHARE directives */
51  "ORDERED work-sharing", /* this is not called DO ORDERED because of lowering of SECTIONS directives */
52  "SECTIONS",
53  "work-sharing", /* this is not called SINGLE because of lowering of SECTIONS and WORKSHARE directives */
54  "TASKQ",
55  "TASKQ",
56  "TASKQ ORDERED",
57  "CRITICAL",
58  "ORDERED", /* in PARALLEL */
59  "ORDERED", /* in PDO */
60  "ORDERED", /* in TASKQ */
61  "MASTER",
62  "REDUCE",
63  "BARRIER"
64 };
65 
66 static char const * cons_text_c[] = {
67  "(none)",
68  "\"parallel\"",
69  "work-sharing", /* this is not called "for" because of lowering of "sections" pragmas */
70  "\"ordered\" work-sharing", /* this is not called "for ordered" because of lowering of "sections" pragmas */
71  "\"sections\"",
72  "work-sharing", /* this is not called "single" because of lowering of "sections" pragmas */
73  "\"taskq\"",
74  "\"taskq\"",
75  "\"taskq ordered\"",
76  "\"critical\"",
77  "\"ordered\"", /* in PARALLEL */
78  "\"ordered\"", /* in PDO */
79  "\"ordered\"", /* in TASKQ */
80  "\"master\"",
81  "\"reduce\"",
82  "\"barrier\""
83 };
84 
85 #define get_src( ident ) ( (ident) == NULL ? NULL : (ident)->psource )
86 
87 #define PUSH_MSG( ct, ident ) \
88  "\tpushing on stack: %s (%s)\n", cons_text_c[ (ct) ], get_src( (ident) )
89 #define POP_MSG( p ) \
90  "\tpopping off stack: %s (%s)\n", \
91  cons_text_c[ (p)->stack_data[ tos ].type ], \
92  get_src( (p)->stack_data[ tos ].ident )
93 
94 static int const cons_text_fort_num = sizeof( cons_text_fort ) / sizeof( char const * );
95 static int const cons_text_c_num = sizeof( cons_text_c ) / sizeof( char const * );
96 
97 /* ------------------------------------------------------------------------ */
98 /* --------------- START OF STATIC LOCAL ROUTINES ------------------------- */
99 /* ------------------------------------------------------------------------ */
100 
101 static void
102 __kmp_check_null_func( void )
103 {
104  /* nothing to do */
105 }
106 
107 static void
108 __kmp_expand_cons_stack( int gtid, struct cons_header *p )
109 {
110  int i;
111  struct cons_data *d;
112 
113  /* TODO for monitor perhaps? */
114  if (gtid < 0)
115  __kmp_check_null_func();
116 
117  KE_TRACE( 10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid() ) );
118 
119  d = p->stack_data;
120 
121  p->stack_size = (p->stack_size * 2) + 100;
122 
123  /* TODO free the old data */
124  p->stack_data = (struct cons_data *) __kmp_allocate( sizeof( struct cons_data ) * (p->stack_size+1) );
125 
126  for (i = p->stack_top; i >= 0; --i)
127  p->stack_data[i] = d[i];
128 
129  /* NOTE: we do not free the old stack_data */
130 }
131 
132 // NOTE: Function returns allocated memory, caller must free it!
133 static char const *
134 __kmp_pragma(
135  enum cons_type ct,
136  ident_t const * ident
137 ) {
138  char const * cons = NULL; // Construct name.
139  char * file = NULL; // File name.
140  char * func = NULL; // Function (routine) name.
141  char * line = NULL; // Line number.
142  kmp_str_buf_t buffer;
143  kmp_msg_t prgm;
144  __kmp_str_buf_init( & buffer );
145  if ( 0 < ct && ct <= cons_text_c_num ) {;
146  cons = cons_text_c[ ct ];
147  } else {
148  KMP_DEBUG_ASSERT( 0 );
149  };
150  if ( ident != NULL && ident->psource != NULL ) {
151  char * tail = NULL;
152  __kmp_str_buf_print( & buffer, "%s", ident->psource ); // Copy source to buffer.
153  // Split string in buffer to file, func, and line.
154  tail = buffer.str;
155  __kmp_str_split( tail, ';', NULL, & tail );
156  __kmp_str_split( tail, ';', & file, & tail );
157  __kmp_str_split( tail, ';', & func, & tail );
158  __kmp_str_split( tail, ';', & line, & tail );
159  }; // if
160  prgm = __kmp_msg_format( kmp_i18n_fmt_Pragma, cons, file, func, line );
161  __kmp_str_buf_free( & buffer );
162  return prgm.str;
163 } // __kmp_pragma
164 
165 /* ------------------------------------------------------------------------ */
166 /* ----------------- END OF STATIC LOCAL ROUTINES ------------------------- */
167 /* ------------------------------------------------------------------------ */
168 
169 
170 void
171 __kmp_error_construct(
172  kmp_i18n_id_t id, // Message identifier.
173  enum cons_type ct, // Construct type.
174  ident_t const * ident // Construct ident.
175 ) {
176  char const * construct = __kmp_pragma( ct, ident );
177  __kmp_msg( kmp_ms_fatal, __kmp_msg_format( id, construct ), __kmp_msg_null );
178  KMP_INTERNAL_FREE( (void *) construct );
179 }
180 
181 void
182 __kmp_error_construct2(
183  kmp_i18n_id_t id, // Message identifier.
184  enum cons_type ct, // First construct type.
185  ident_t const * ident, // First construct ident.
186  struct cons_data const * cons // Second construct.
187 ) {
188  char const * construct1 = __kmp_pragma( ct, ident );
189  char const * construct2 = __kmp_pragma( cons->type, cons->ident );
190  __kmp_msg( kmp_ms_fatal, __kmp_msg_format( id, construct1, construct2 ), __kmp_msg_null );
191  KMP_INTERNAL_FREE( (void *) construct1 );
192  KMP_INTERNAL_FREE( (void *) construct2 );
193 }
194 
195 
196 struct cons_header *
197 __kmp_allocate_cons_stack( int gtid )
198 {
199  struct cons_header *p;
200 
201  /* TODO for monitor perhaps? */
202  if ( gtid < 0 ) {
203  __kmp_check_null_func();
204  }; // if
205  KE_TRACE( 10, ("allocate cons_stack (%d)\n", gtid ) );
206  p = (struct cons_header *) __kmp_allocate( sizeof( struct cons_header ) );
207  p->p_top = p->w_top = p->s_top = 0;
208  p->stack_data = (struct cons_data *) __kmp_allocate( sizeof( struct cons_data ) * (MIN_STACK+1) );
209  p->stack_size = MIN_STACK;
210  p->stack_top = 0;
211  p->stack_data[ 0 ].type = ct_none;
212  p->stack_data[ 0 ].prev = 0;
213  p->stack_data[ 0 ].ident = NULL;
214  return p;
215 }
216 
217 void
218 __kmp_free_cons_stack( void * ptr ) {
219  struct cons_header * p = (struct cons_header *) ptr;
220  if ( p != NULL ) {
221  if ( p->stack_data != NULL ) {
222  __kmp_free( p->stack_data );
223  p->stack_data = NULL;
224  }; // if
225  __kmp_free( p );
226  }; // if
227 }
228 
229 
230 static void
231 dump_cons_stack( int gtid, struct cons_header * p ) {
232  int i;
233  int tos = p->stack_top;
234  kmp_str_buf_t buffer;
235  __kmp_str_buf_init( & buffer );
236  __kmp_str_buf_print( & buffer, "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" );
237  __kmp_str_buf_print( & buffer, "Begin construct stack with %d items for thread %d\n", tos, gtid );
238  __kmp_str_buf_print( & buffer, " stack_top=%d { P=%d, W=%d, S=%d }\n", tos, p->p_top, p->w_top, p->s_top );
239  for ( i = tos; i > 0; i-- ) {
240  struct cons_data * c = & ( p->stack_data[ i ] );
241  __kmp_str_buf_print( & buffer, " stack_data[%2d] = { %s (%s) %d %p }\n", i, cons_text_c[ c->type ], get_src( c->ident ), c->prev, c->name );
242  }; // for i
243  __kmp_str_buf_print( & buffer, "End construct stack for thread %d\n", gtid );
244  __kmp_str_buf_print( & buffer, "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" );
245  __kmp_debug_printf( "%s", buffer.str );
246  __kmp_str_buf_free( & buffer );
247 }
248 
249 void
250 __kmp_push_parallel( int gtid, ident_t const * ident )
251 {
252  int tos;
253  struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
254 
255  KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons );
256  KE_TRACE( 10, ("__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) );
257  KE_TRACE( 100, ( PUSH_MSG( ct_parallel, ident ) ) );
258  if ( p->stack_top >= p->stack_size ) {
259  __kmp_expand_cons_stack( gtid, p );
260  }; // if
261  tos = ++p->stack_top;
262  p->stack_data[ tos ].type = ct_parallel;
263  p->stack_data[ tos ].prev = p->p_top;
264  p->stack_data[ tos ].ident = ident;
265  p->stack_data[ tos ].name = NULL;
266  p->p_top = tos;
267  KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
268 }
269 
270 void
271 __kmp_check_workshare( int gtid, enum cons_type ct, ident_t const * ident )
272 {
273  struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
274 
275  KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons );
276  KE_TRACE( 10, ("__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
277 
278 
279  if ( p->stack_top >= p->stack_size ) {
280  __kmp_expand_cons_stack( gtid, p );
281  }; // if
282  if ( p->w_top > p->p_top &&
283  !(IS_CONS_TYPE_TASKQ(p->stack_data[ p->w_top ].type) && IS_CONS_TYPE_TASKQ(ct))) {
284  // We are already in a WORKSHARE construct for this PARALLEL region.
285  __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->w_top ] );
286  }; // if
287  if ( p->s_top > p->p_top ) {
288  // We are already in a SYNC construct for this PARALLEL region.
289  __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->s_top ] );
290  }; // if
291 }
292 
293 void
294 __kmp_push_workshare( int gtid, enum cons_type ct, ident_t const * ident )
295 {
296  int tos;
297  struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
298  KE_TRACE( 10, ("__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
299  __kmp_check_workshare( gtid, ct, ident );
300  KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
301  tos = ++p->stack_top;
302  p->stack_data[ tos ].type = ct;
303  p->stack_data[ tos ].prev = p->w_top;
304  p->stack_data[ tos ].ident = ident;
305  p->stack_data[ tos ].name = NULL;
306  p->w_top = tos;
307  KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
308 }
309 
310 void
311 __kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
312 {
313  struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
314 
315  KE_TRACE( 10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid() ) );
316 
317  if (p->stack_top >= p->stack_size)
318  __kmp_expand_cons_stack( gtid, p );
319 
320  if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo || ct == ct_ordered_in_taskq ) {
321  if (p->w_top <= p->p_top) {
322  /* we are not in a worksharing construct */
323  #ifdef BUILD_PARALLEL_ORDERED
324  /* do not report error messages for PARALLEL ORDERED */
325  KMP_ASSERT( ct == ct_ordered_in_parallel );
326  #else
327  __kmp_error_construct( kmp_i18n_msg_CnsBoundToWorksharing, ct, ident );
328  #endif /* BUILD_PARALLEL_ORDERED */
329  } else {
330  /* inside a WORKSHARING construct for this PARALLEL region */
331  if (!IS_CONS_TYPE_ORDERED(p->stack_data[ p->w_top ].type)) {
332  if (p->stack_data[ p->w_top ].type == ct_taskq) {
333  __kmp_error_construct2(
334  kmp_i18n_msg_CnsNotInTaskConstruct,
335  ct, ident,
336  & p->stack_data[ p->w_top ]
337  );
338  } else {
339  __kmp_error_construct2(
340  kmp_i18n_msg_CnsNoOrderedClause,
341  ct, ident,
342  & p->stack_data[ p->w_top ]
343  );
344  }
345  }
346  }
347  if (p->s_top > p->p_top && p->s_top > p->w_top) {
348  /* inside a sync construct which is inside a worksharing construct */
349  int index = p->s_top;
350  enum cons_type stack_type;
351 
352  stack_type = p->stack_data[ index ].type;
353 
354  if (stack_type == ct_critical ||
355  ( ( stack_type == ct_ordered_in_parallel ||
356  stack_type == ct_ordered_in_pdo ||
357  stack_type == ct_ordered_in_taskq ) && /* C doesn't allow named ordered; ordered in ordered gets error */
358  p->stack_data[ index ].ident != NULL &&
359  (p->stack_data[ index ].ident->flags & KMP_IDENT_KMPC ))) {
360  /* we are in ORDERED which is inside an ORDERED or CRITICAL construct */
361  __kmp_error_construct2(
362  kmp_i18n_msg_CnsInvalidNesting,
363  ct, ident,
364  & p->stack_data[ index ]
365  );
366  }
367  }
368  } else if ( ct == ct_critical ) {
369  if ( lck != NULL && __kmp_get_user_lock_owner( lck ) == gtid ) { /* this same thread already has lock for this critical section */
370  int index = p->s_top;
371  struct cons_data cons = { NULL, ct_critical, 0, NULL };
372  /* walk up construct stack and try to find critical with matching name */
373  while ( index != 0 && p->stack_data[ index ].name != lck ) {
374  index = p->stack_data[ index ].prev;
375  }
376  if ( index != 0 ) {
377  /* found match on the stack (may not always because of interleaved critical for Fortran) */
378  cons = p->stack_data[ index ];
379  }
380  /* we are in CRITICAL which is inside a CRITICAL construct of the same name */
381  __kmp_error_construct2( kmp_i18n_msg_CnsNestingSameName, ct, ident, & cons );
382  }
383  } else if ( ct == ct_master || ct == ct_reduce ) {
384  if (p->w_top > p->p_top) {
385  /* inside a WORKSHARING construct for this PARALLEL region */
386  __kmp_error_construct2(
387  kmp_i18n_msg_CnsInvalidNesting,
388  ct, ident,
389  & p->stack_data[ p->w_top ]
390  );
391  }
392  if (ct == ct_reduce && p->s_top > p->p_top) {
393  /* inside a another SYNC construct for this PARALLEL region */
394  __kmp_error_construct2(
395  kmp_i18n_msg_CnsInvalidNesting,
396  ct, ident,
397  & p->stack_data[ p->s_top ]
398  );
399  }; // if
400  }; // if
401 }
402 
403 void
404 __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
405 {
406  int tos;
407  struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
408 
409  KMP_ASSERT( gtid == __kmp_get_gtid() );
410  KE_TRACE( 10, ("__kmp_push_sync (gtid=%d)\n", gtid ) );
411  __kmp_check_sync( gtid, ct, ident, lck );
412  KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
413  tos = ++ p->stack_top;
414  p->stack_data[ tos ].type = ct;
415  p->stack_data[ tos ].prev = p->s_top;
416  p->stack_data[ tos ].ident = ident;
417  p->stack_data[ tos ].name = lck;
418  p->s_top = tos;
419  KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
420 }
421 
422 /* ------------------------------------------------------------------------ */
423 
424 void
425 __kmp_pop_parallel( int gtid, ident_t const * ident )
426 {
427  int tos;
428  struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
429  tos = p->stack_top;
430  KE_TRACE( 10, ("__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) );
431  if ( tos == 0 || p->p_top == 0 ) {
432  __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident );
433  }
434  if ( tos != p->p_top || p->stack_data[ tos ].type != ct_parallel ) {
435  __kmp_error_construct2(
436  kmp_i18n_msg_CnsExpectedEnd,
437  ct_parallel, ident,
438  & p->stack_data[ tos ]
439  );
440  }
441  KE_TRACE( 100, ( POP_MSG( p ) ) );
442  p->p_top = p->stack_data[ tos ].prev;
443  p->stack_data[ tos ].type = ct_none;
444  p->stack_data[ tos ].ident = NULL;
445  p->stack_top = tos - 1;
446  KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
447 }
448 
449 enum cons_type
450 __kmp_pop_workshare( int gtid, enum cons_type ct, ident_t const * ident )
451 {
452  int tos;
453  struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
454 
455  tos = p->stack_top;
456  KE_TRACE( 10, ("__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
457  if ( tos == 0 || p->w_top == 0 ) {
458  __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident );
459  }
460 
461  if ( tos != p->w_top ||
462  ( p->stack_data[ tos ].type != ct &&
463  /* below are two exceptions to the rule that construct types must match */
464  ! ( p->stack_data[ tos ].type == ct_pdo_ordered && ct == ct_pdo ) &&
465  ! ( p->stack_data[ tos ].type == ct_task_ordered && ct == ct_task )
466  )
467  ) {
468  __kmp_check_null_func();
469  __kmp_error_construct2(
470  kmp_i18n_msg_CnsExpectedEnd,
471  ct, ident,
472  & p->stack_data[ tos ]
473  );
474  }
475  KE_TRACE( 100, ( POP_MSG( p ) ) );
476  p->w_top = p->stack_data[ tos ].prev;
477  p->stack_data[ tos ].type = ct_none;
478  p->stack_data[ tos ].ident = NULL;
479  p->stack_top = tos - 1;
480  KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
481  return p->stack_data[ p->w_top ].type;
482 }
483 
484 void
485 __kmp_pop_sync( int gtid, enum cons_type ct, ident_t const * ident )
486 {
487  int tos;
488  struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
489  tos = p->stack_top;
490  KE_TRACE( 10, ("__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid() ) );
491  if ( tos == 0 || p->s_top == 0 ) {
492  __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident );
493  };
494  if ( tos != p->s_top || p->stack_data[ tos ].type != ct ) {
495  __kmp_check_null_func();
496  __kmp_error_construct2(
497  kmp_i18n_msg_CnsExpectedEnd,
498  ct, ident,
499  & p->stack_data[ tos ]
500  );
501  };
502  if ( gtid < 0 ) {
503  __kmp_check_null_func();
504  };
505  KE_TRACE( 100, ( POP_MSG( p ) ) );
506  p->s_top = p->stack_data[ tos ].prev;
507  p->stack_data[ tos ].type = ct_none;
508  p->stack_data[ tos ].ident = NULL;
509  p->stack_top = tos - 1;
510  KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
511 }
512 
513 /* ------------------------------------------------------------------------ */
514 
515 void
516 __kmp_check_barrier( int gtid, enum cons_type ct, ident_t const * ident )
517 {
518  struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
519  KE_TRACE( 10, ("__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid, __kmp_get_gtid() ) );
520  if ( ident != 0 ) {
521  __kmp_check_null_func();
522  }
523  if ( p->w_top > p->p_top ) {
524  /* we are already in a WORKSHARING construct for this PARALLEL region */
525  __kmp_error_construct2(
526  kmp_i18n_msg_CnsInvalidNesting,
527  ct, ident,
528  & p->stack_data[ p->w_top ]
529  );
530  }
531  if (p->s_top > p->p_top) {
532  /* we are already in a SYNC construct for this PARALLEL region */
533  __kmp_error_construct2(
534  kmp_i18n_msg_CnsInvalidNesting,
535  ct, ident,
536  & p->stack_data[ p->s_top ]
537  );
538  }
539 }
540 
541 /* ------------------------------------------------------------------------ */
542 
543 
544 /* ------------------------------------------------------------------------ */
545 /* ------------------------------------------------------------------------ */
Definition: kmp.h:218
#define KMP_IDENT_KMPC
Definition: kmp.h:198
char const * psource
Definition: kmp.h:227