00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047 #include "kmp.h"
00048 #include "kmp_i18n.h"
00049 #include "kmp_str.h"
00050 #include "kmp_error.h"
00051
00052
00053
00054
00055 #define MIN_STACK 100
00056
00057 static char const * cons_text_fort[] = {
00058 "(none)",
00059 "PARALLEL",
00060 "work-sharing",
00061 "ORDERED work-sharing",
00062 "SECTIONS",
00063 "work-sharing",
00064 "TASKQ",
00065 "TASKQ",
00066 "TASKQ ORDERED",
00067 "CRITICAL",
00068 "ORDERED",
00069 "ORDERED",
00070 "ORDERED",
00071 "MASTER",
00072 "REDUCE",
00073 "BARRIER"
00074 };
00075
00076 static char const * cons_text_c[] = {
00077 "(none)",
00078 "\"parallel\"",
00079 "work-sharing",
00080 "\"ordered\" work-sharing",
00081 "\"sections\"",
00082 "work-sharing",
00083 "\"taskq\"",
00084 "\"taskq\"",
00085 "\"taskq ordered\"",
00086 "\"critical\"",
00087 "\"ordered\"",
00088 "\"ordered\"",
00089 "\"ordered\"",
00090 "\"master\"",
00091 "\"reduce\"",
00092 "\"barrier\""
00093 };
00094
00095 #define get_src( ident ) ( (ident) == NULL ? NULL : (ident)->psource )
00096
00097 #define PUSH_MSG( ct, ident ) \
00098 "\tpushing on stack: %s (%s)\n", cons_text_c[ (ct) ], get_src( (ident) )
00099 #define POP_MSG( p ) \
00100 "\tpopping off stack: %s (%s)\n", \
00101 cons_text_c[ (p)->stack_data[ tos ].type ], \
00102 get_src( (p)->stack_data[ tos ].ident )
00103
00104 static int const cons_text_fort_num = sizeof( cons_text_fort ) / sizeof( char const * );
00105 static int const cons_text_c_num = sizeof( cons_text_c ) / sizeof( char const * );
00106
00107
00108
00109
00110
00111 static void
00112 __kmp_check_null_func( void )
00113 {
00114
00115 }
00116
00117 static void
00118 __kmp_expand_cons_stack( int gtid, struct cons_header *p )
00119 {
00120 int i;
00121 struct cons_data *d;
00122
00123
00124 if (gtid < 0)
00125 __kmp_check_null_func();
00126
00127 KE_TRACE( 10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid() ) );
00128
00129 d = p->stack_data;
00130
00131 p->stack_size = (p->stack_size * 2) + 100;
00132
00133
00134 p->stack_data = (struct cons_data *) __kmp_allocate( sizeof( struct cons_data ) * (p->stack_size+1) );
00135
00136 for (i = p->stack_top; i >= 0; --i)
00137 p->stack_data[i] = d[i];
00138
00139
00140 }
00141
00142
00143 static char const *
00144 __kmp_pragma(
00145 enum cons_type ct,
00146 ident_t const * ident
00147 ) {
00148 char const * cons = NULL;
00149 char * file = NULL;
00150 char * func = NULL;
00151 char * line = NULL;
00152 kmp_str_buf_t buffer;
00153 kmp_msg_t prgm;
00154 __kmp_str_buf_init( & buffer );
00155 if ( 0 < ct && ct <= cons_text_c_num ) {;
00156 cons = cons_text_c[ ct ];
00157 } else {
00158 KMP_DEBUG_ASSERT( 0 );
00159 };
00160 if ( ident != NULL && ident->psource != NULL ) {
00161 char * tail = NULL;
00162 __kmp_str_buf_print( & buffer, "%s", ident->psource );
00163
00164 tail = buffer.str;
00165 __kmp_str_split( tail, ';', NULL, & tail );
00166 __kmp_str_split( tail, ';', & file, & tail );
00167 __kmp_str_split( tail, ';', & func, & tail );
00168 __kmp_str_split( tail, ';', & line, & tail );
00169 };
00170 prgm = __kmp_msg_format( kmp_i18n_fmt_Pragma, cons, file, func, line );
00171 __kmp_str_buf_free( & buffer );
00172 return prgm.str;
00173 }
00174
00175
00176
00177
00178
00179
00180 void
00181 __kmp_error_construct(
00182 kmp_i18n_id_t id,
00183 enum cons_type ct,
00184 ident_t const * ident
00185 ) {
00186 char const * construct = __kmp_pragma( ct, ident );
00187 __kmp_msg( kmp_ms_fatal, __kmp_msg_format( id, construct ), __kmp_msg_null );
00188 KMP_INTERNAL_FREE( (void *) construct );
00189 }
00190
00191 void
00192 __kmp_error_construct2(
00193 kmp_i18n_id_t id,
00194 enum cons_type ct,
00195 ident_t const * ident,
00196 struct cons_data const * cons
00197 ) {
00198 char const * construct1 = __kmp_pragma( ct, ident );
00199 char const * construct2 = __kmp_pragma( cons->type, cons->ident );
00200 __kmp_msg( kmp_ms_fatal, __kmp_msg_format( id, construct1, construct2 ), __kmp_msg_null );
00201 KMP_INTERNAL_FREE( (void *) construct1 );
00202 KMP_INTERNAL_FREE( (void *) construct2 );
00203 }
00204
00205
00206 struct cons_header *
00207 __kmp_allocate_cons_stack( int gtid )
00208 {
00209 struct cons_header *p;
00210
00211
00212 if ( gtid < 0 ) {
00213 __kmp_check_null_func();
00214 };
00215 KE_TRACE( 10, ("allocate cons_stack (%d)\n", gtid ) );
00216 p = (struct cons_header *) __kmp_allocate( sizeof( struct cons_header ) );
00217 p->p_top = p->w_top = p->s_top = 0;
00218 p->stack_data = (struct cons_data *) __kmp_allocate( sizeof( struct cons_data ) * (MIN_STACK+1) );
00219 p->stack_size = MIN_STACK;
00220 p->stack_top = 0;
00221 p->stack_data[ 0 ].type = ct_none;
00222 p->stack_data[ 0 ].prev = 0;
00223 p->stack_data[ 0 ].ident = NULL;
00224 return p;
00225 }
00226
00227 void
00228 __kmp_free_cons_stack( void * ptr ) {
00229 struct cons_header * p = (struct cons_header *) ptr;
00230 if ( p != NULL ) {
00231 if ( p->stack_data != NULL ) {
00232 __kmp_free( p->stack_data );
00233 p->stack_data = NULL;
00234 };
00235 __kmp_free( p );
00236 };
00237 }
00238
00239
00240 static void
00241 dump_cons_stack( int gtid, struct cons_header * p ) {
00242 int i;
00243 int tos = p->stack_top;
00244 kmp_str_buf_t buffer;
00245 __kmp_str_buf_init( & buffer );
00246 __kmp_str_buf_print( & buffer, "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" );
00247 __kmp_str_buf_print( & buffer, "Begin construct stack with %d items for thread %d\n", tos, gtid );
00248 __kmp_str_buf_print( & buffer, " stack_top=%d { P=%d, W=%d, S=%d }\n", tos, p->p_top, p->w_top, p->s_top );
00249 for ( i = tos; i > 0; i-- ) {
00250 struct cons_data * c = & ( p->stack_data[ i ] );
00251 __kmp_str_buf_print( & buffer, " stack_data[%2d] = { %s (%s) %d %p }\n", i, cons_text_c[ c->type ], get_src( c->ident ), c->prev, c->name );
00252 };
00253 __kmp_str_buf_print( & buffer, "End construct stack for thread %d\n", gtid );
00254 __kmp_str_buf_print( & buffer, "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" );
00255 __kmp_debug_printf( "%s", buffer.str );
00256 __kmp_str_buf_free( & buffer );
00257 }
00258
00259 void
00260 __kmp_push_parallel( int gtid, ident_t const * ident )
00261 {
00262 int tos;
00263 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
00264
00265 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons );
00266 KE_TRACE( 10, ("__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) );
00267 KE_TRACE( 100, ( PUSH_MSG( ct_parallel, ident ) ) );
00268 if ( p->stack_top >= p->stack_size ) {
00269 __kmp_expand_cons_stack( gtid, p );
00270 };
00271 tos = ++p->stack_top;
00272 p->stack_data[ tos ].type = ct_parallel;
00273 p->stack_data[ tos ].prev = p->p_top;
00274 p->stack_data[ tos ].ident = ident;
00275 p->stack_data[ tos ].name = NULL;
00276 p->p_top = tos;
00277 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
00278 }
00279
00280 void
00281 __kmp_check_workshare( int gtid, enum cons_type ct, ident_t const * ident )
00282 {
00283 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
00284
00285 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons );
00286 KE_TRACE( 10, ("__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
00287
00288
00289 if ( p->stack_top >= p->stack_size ) {
00290 __kmp_expand_cons_stack( gtid, p );
00291 };
00292 if ( p->w_top > p->p_top &&
00293 !(IS_CONS_TYPE_TASKQ(p->stack_data[ p->w_top ].type) && IS_CONS_TYPE_TASKQ(ct))) {
00294
00295 __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->w_top ] );
00296 };
00297 if ( p->s_top > p->p_top ) {
00298
00299 __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->s_top ] );
00300 };
00301 }
00302
00303 void
00304 __kmp_push_workshare( int gtid, enum cons_type ct, ident_t const * ident )
00305 {
00306 int tos;
00307 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
00308 KE_TRACE( 10, ("__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
00309 __kmp_check_workshare( gtid, ct, ident );
00310 KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
00311 tos = ++p->stack_top;
00312 p->stack_data[ tos ].type = ct;
00313 p->stack_data[ tos ].prev = p->w_top;
00314 p->stack_data[ tos ].ident = ident;
00315 p->stack_data[ tos ].name = NULL;
00316 p->w_top = tos;
00317 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
00318 }
00319
00320 void
00321 __kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
00322 {
00323 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
00324
00325 KE_TRACE( 10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid() ) );
00326
00327 if (p->stack_top >= p->stack_size)
00328 __kmp_expand_cons_stack( gtid, p );
00329
00330 if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo || ct == ct_ordered_in_taskq ) {
00331 if (p->w_top <= p->p_top) {
00332
00333 #ifdef BUILD_PARALLEL_ORDERED
00334
00335 KMP_ASSERT( ct == ct_ordered_in_parallel );
00336 #else
00337 __kmp_error_construct( kmp_i18n_msg_CnsBoundToWorksharing, ct, ident );
00338 #endif
00339 } else {
00340
00341 if (!IS_CONS_TYPE_ORDERED(p->stack_data[ p->w_top ].type)) {
00342 if (p->stack_data[ p->w_top ].type == ct_taskq) {
00343 __kmp_error_construct2(
00344 kmp_i18n_msg_CnsNotInTaskConstruct,
00345 ct, ident,
00346 & p->stack_data[ p->w_top ]
00347 );
00348 } else {
00349 __kmp_error_construct2(
00350 kmp_i18n_msg_CnsNoOrderedClause,
00351 ct, ident,
00352 & p->stack_data[ p->w_top ]
00353 );
00354 }
00355 }
00356 }
00357 if (p->s_top > p->p_top && p->s_top > p->w_top) {
00358
00359 int index = p->s_top;
00360 enum cons_type stack_type;
00361
00362 stack_type = p->stack_data[ index ].type;
00363
00364 if (stack_type == ct_critical ||
00365 ( ( stack_type == ct_ordered_in_parallel ||
00366 stack_type == ct_ordered_in_pdo ||
00367 stack_type == ct_ordered_in_taskq ) &&
00368 p->stack_data[ index ].ident != NULL &&
00369 (p->stack_data[ index ].ident->flags & KMP_IDENT_KMPC ))) {
00370
00371 __kmp_error_construct2(
00372 kmp_i18n_msg_CnsInvalidNesting,
00373 ct, ident,
00374 & p->stack_data[ index ]
00375 );
00376 }
00377 }
00378 } else if ( ct == ct_critical ) {
00379 if ( lck != NULL && __kmp_get_user_lock_owner( lck ) == gtid ) {
00380 int index = p->s_top;
00381 struct cons_data cons = { NULL, ct_critical, 0, NULL };
00382
00383 while ( index != 0 && p->stack_data[ index ].name != lck ) {
00384 index = p->stack_data[ index ].prev;
00385 }
00386 if ( index != 0 ) {
00387
00388 cons = p->stack_data[ index ];
00389 }
00390
00391 __kmp_error_construct2( kmp_i18n_msg_CnsNestingSameName, ct, ident, & cons );
00392 }
00393 } else if ( ct == ct_master || ct == ct_reduce ) {
00394 if (p->w_top > p->p_top) {
00395
00396 __kmp_error_construct2(
00397 kmp_i18n_msg_CnsInvalidNesting,
00398 ct, ident,
00399 & p->stack_data[ p->w_top ]
00400 );
00401 }
00402 if (ct == ct_reduce && p->s_top > p->p_top) {
00403
00404 __kmp_error_construct2(
00405 kmp_i18n_msg_CnsInvalidNesting,
00406 ct, ident,
00407 & p->stack_data[ p->s_top ]
00408 );
00409 };
00410 };
00411 }
00412
00413 void
00414 __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
00415 {
00416 int tos;
00417 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
00418
00419 KMP_ASSERT( gtid == __kmp_get_gtid() );
00420 KE_TRACE( 10, ("__kmp_push_sync (gtid=%d)\n", gtid ) );
00421 __kmp_check_sync( gtid, ct, ident, lck );
00422 KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
00423 tos = ++ p->stack_top;
00424 p->stack_data[ tos ].type = ct;
00425 p->stack_data[ tos ].prev = p->s_top;
00426 p->stack_data[ tos ].ident = ident;
00427 p->stack_data[ tos ].name = lck;
00428 p->s_top = tos;
00429 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
00430 }
00431
00432
00433
00434 void
00435 __kmp_pop_parallel( int gtid, ident_t const * ident )
00436 {
00437 int tos;
00438 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
00439 tos = p->stack_top;
00440 KE_TRACE( 10, ("__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) );
00441 if ( tos == 0 || p->p_top == 0 ) {
00442 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident );
00443 }
00444 if ( tos != p->p_top || p->stack_data[ tos ].type != ct_parallel ) {
00445 __kmp_error_construct2(
00446 kmp_i18n_msg_CnsExpectedEnd,
00447 ct_parallel, ident,
00448 & p->stack_data[ tos ]
00449 );
00450 }
00451 KE_TRACE( 100, ( POP_MSG( p ) ) );
00452 p->p_top = p->stack_data[ tos ].prev;
00453 p->stack_data[ tos ].type = ct_none;
00454 p->stack_data[ tos ].ident = NULL;
00455 p->stack_top = tos - 1;
00456 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
00457 }
00458
00459 enum cons_type
00460 __kmp_pop_workshare( int gtid, enum cons_type ct, ident_t const * ident )
00461 {
00462 int tos;
00463 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
00464
00465 tos = p->stack_top;
00466 KE_TRACE( 10, ("__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
00467 if ( tos == 0 || p->w_top == 0 ) {
00468 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident );
00469 }
00470
00471 if ( tos != p->w_top ||
00472 ( p->stack_data[ tos ].type != ct &&
00473
00474 ! ( p->stack_data[ tos ].type == ct_pdo_ordered && ct == ct_pdo ) &&
00475 ! ( p->stack_data[ tos ].type == ct_task_ordered && ct == ct_task )
00476 )
00477 ) {
00478 __kmp_check_null_func();
00479 __kmp_error_construct2(
00480 kmp_i18n_msg_CnsExpectedEnd,
00481 ct, ident,
00482 & p->stack_data[ tos ]
00483 );
00484 }
00485 KE_TRACE( 100, ( POP_MSG( p ) ) );
00486 p->w_top = p->stack_data[ tos ].prev;
00487 p->stack_data[ tos ].type = ct_none;
00488 p->stack_data[ tos ].ident = NULL;
00489 p->stack_top = tos - 1;
00490 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
00491 return p->stack_data[ p->w_top ].type;
00492 }
00493
00494 void
00495 __kmp_pop_sync( int gtid, enum cons_type ct, ident_t const * ident )
00496 {
00497 int tos;
00498 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
00499 tos = p->stack_top;
00500 KE_TRACE( 10, ("__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid() ) );
00501 if ( tos == 0 || p->s_top == 0 ) {
00502 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident );
00503 };
00504 if ( tos != p->s_top || p->stack_data[ tos ].type != ct ) {
00505 __kmp_check_null_func();
00506 __kmp_error_construct2(
00507 kmp_i18n_msg_CnsExpectedEnd,
00508 ct, ident,
00509 & p->stack_data[ tos ]
00510 );
00511 };
00512 if ( gtid < 0 ) {
00513 __kmp_check_null_func();
00514 };
00515 KE_TRACE( 100, ( POP_MSG( p ) ) );
00516 p->s_top = p->stack_data[ tos ].prev;
00517 p->stack_data[ tos ].type = ct_none;
00518 p->stack_data[ tos ].ident = NULL;
00519 p->stack_top = tos - 1;
00520 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
00521 }
00522
00523
00524
00525 void
00526 __kmp_check_barrier( int gtid, enum cons_type ct, ident_t const * ident )
00527 {
00528 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
00529 KE_TRACE( 10, ("__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid, __kmp_get_gtid() ) );
00530 if ( ident != 0 ) {
00531 __kmp_check_null_func();
00532 }
00533 if ( p->w_top > p->p_top ) {
00534
00535 __kmp_error_construct2(
00536 kmp_i18n_msg_CnsInvalidNesting,
00537 ct, ident,
00538 & p->stack_data[ p->w_top ]
00539 );
00540 }
00541 if (p->s_top > p->p_top) {
00542
00543 __kmp_error_construct2(
00544 kmp_i18n_msg_CnsInvalidNesting,
00545 ct, ident,
00546 & p->stack_data[ p->s_top ]
00547 );
00548 }
00549 }
00550
00551
00552
00553
00554
00555