00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047 #include "kmp.h"
00048 #include "kmp_wrapper_getpid.h"
00049 #include "kmp_str.h"
00050 #include "kmp_i18n.h"
00051 #include "kmp_io.h"
00052
00053 #include <alloca.h>
00054 #include <unistd.h>
00055 #include <math.h>
00056 #include <sys/time.h>
00057 #include <sys/times.h>
00058 #include <sys/resource.h>
00059 #include <sys/syscall.h>
00060
00061 #if KMP_OS_LINUX
00062 # include <sys/sysinfo.h>
00063 # if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
00064
00065
00066
00067
00068
00069 # ifndef FUTEX_WAIT
00070 # define FUTEX_WAIT 0
00071 # endif
00072 # ifndef FUTEX_WAKE
00073 # define FUTEX_WAKE 1
00074 # endif
00075 # endif
00076 #elif KMP_OS_DARWIN
00077 # include <sys/sysctl.h>
00078 # include <mach/mach.h>
00079 #endif
00080
00081
00082 #include <dirent.h>
00083 #include <ctype.h>
00084 #include <fcntl.h>
00085
00086
00087
00088
00089 struct kmp_sys_timer {
00090 struct timespec start;
00091 };
00092
00093
00094 #define TS2NS(timespec) (((timespec).tv_sec * 1e9) + (timespec).tv_nsec)
00095
00096 static struct kmp_sys_timer __kmp_sys_timer_data;
00097
00098 #if KMP_HANDLE_SIGNALS
00099 typedef void (* sig_func_t )( int );
00100 STATIC_EFI2_WORKAROUND struct sigaction __kmp_sighldrs[ NSIG ];
00101 static sigset_t __kmp_sigset;
00102 #endif
00103
00104 static int __kmp_init_runtime = FALSE;
00105
00106 static int __kmp_fork_count = 0;
00107
00108 static pthread_condattr_t __kmp_suspend_cond_attr;
00109 static pthread_mutexattr_t __kmp_suspend_mutex_attr;
00110
00111 static kmp_cond_align_t __kmp_wait_cv;
00112 static kmp_mutex_align_t __kmp_wait_mx;
00113
00114
00115
00116
00117 #ifdef DEBUG_SUSPEND
00118 static void
00119 __kmp_print_cond( char *buffer, kmp_cond_align_t *cond )
00120 {
00121 sprintf( buffer, "(cond (lock (%ld, %d)), (descr (%p)))",
00122 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
00123 cond->c_cond.__c_waiting );
00124 }
00125 #endif
00126
00127
00128
00129
00130 #if KMP_OS_LINUX
00131
00132
00133
00134
00135
00136
00137
00138
00139
00140
00141
00142
00143 # if KMP_ARCH_X86
00144 # ifndef __NR_sched_setaffinity
00145 # define __NR_sched_setaffinity 241
00146 # elif __NR_sched_setaffinity != 241
00147 # error Wrong code for setaffinity system call.
00148 # endif
00149 # ifndef __NR_sched_getaffinity
00150 # define __NR_sched_getaffinity 242
00151 # elif __NR_sched_getaffinity != 242
00152 # error Wrong code for getaffinity system call.
00153 # endif
00154
00155 # elif KMP_ARCH_X86_64
00156 # ifndef __NR_sched_setaffinity
00157 # define __NR_sched_setaffinity 203
00158 # elif __NR_sched_setaffinity != 203
00159 # error Wrong code for setaffinity system call.
00160 # endif
00161 # ifndef __NR_sched_getaffinity
00162 # define __NR_sched_getaffinity 204
00163 # elif __NR_sched_getaffinity != 204
00164 # error Wrong code for getaffinity system call.
00165 # endif
00166
00167 # else
00168 # error Unknown or unsupported architecture
00169
00170 # endif
00171
00172 int
00173 __kmp_set_system_affinity( kmp_affin_mask_t const *mask, int abort_on_error )
00174 {
00175 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
00176 "Illegal set affinity operation when not capable");
00177
00178 int retval = syscall( __NR_sched_setaffinity, 0, __kmp_affin_mask_size, mask );
00179 if (retval >= 0) {
00180 return 0;
00181 }
00182 int error = errno;
00183 if (abort_on_error) {
00184 __kmp_msg(
00185 kmp_ms_fatal,
00186 KMP_MSG( FatalSysError ),
00187 KMP_ERR( error ),
00188 __kmp_msg_null
00189 );
00190 }
00191 return error;
00192 }
00193
00194 int
00195 __kmp_get_system_affinity( kmp_affin_mask_t *mask, int abort_on_error )
00196 {
00197 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
00198 "Illegal get affinity operation when not capable");
00199
00200 int retval = syscall( __NR_sched_getaffinity, 0, __kmp_affin_mask_size, mask );
00201 if (retval >= 0) {
00202 return 0;
00203 }
00204 int error = errno;
00205 if (abort_on_error) {
00206 __kmp_msg(
00207 kmp_ms_fatal,
00208 KMP_MSG( FatalSysError ),
00209 KMP_ERR( error ),
00210 __kmp_msg_null
00211 );
00212 }
00213 return error;
00214 }
00215
00216 void
00217 __kmp_affinity_bind_thread( int which )
00218 {
00219 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
00220 "Illegal set affinity operation when not capable");
00221
00222 kmp_affin_mask_t *mask = (kmp_affin_mask_t *)alloca(__kmp_affin_mask_size);
00223 KMP_CPU_ZERO(mask);
00224 KMP_CPU_SET(which, mask);
00225 __kmp_set_system_affinity(mask, TRUE);
00226 }
00227
00228
00229
00230
00231
00232
00233 void
00234 __kmp_affinity_determine_capable(const char *env_var)
00235 {
00236
00237
00238
00239
00240 # define KMP_CPU_SET_SIZE_LIMIT (1024*1024)
00241
00242 int gCode;
00243 int sCode;
00244 kmp_affin_mask_t *buf;
00245 buf = ( kmp_affin_mask_t * ) KMP_INTERNAL_MALLOC( KMP_CPU_SET_SIZE_LIMIT );
00246
00247
00248
00249
00250 gCode = syscall( __NR_sched_getaffinity, 0, KMP_CPU_SET_SIZE_LIMIT, buf );
00251 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
00252 "intial getaffinity call returned %d errno = %d\n",
00253 gCode, errno));
00254
00255
00256 if (gCode < 0) {
00257
00258
00259
00260 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
00261 && (__kmp_affinity_type != affinity_none)
00262 && (__kmp_affinity_type != affinity_default)
00263 && (__kmp_affinity_type != affinity_disabled))) {
00264 int error = errno;
00265 __kmp_msg(
00266 kmp_ms_warning,
00267 KMP_MSG( GetAffSysCallNotSupported, env_var ),
00268 KMP_ERR( error ),
00269 __kmp_msg_null
00270 );
00271 }
00272 __kmp_affin_mask_size = 0;
00273 KMP_INTERNAL_FREE(buf);
00274 return;
00275 }
00276 if (gCode > 0) {
00277
00278
00279
00280
00281
00282 sCode = syscall( __NR_sched_setaffinity, 0, gCode, NULL );
00283 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
00284 "setaffinity for mask size %d returned %d errno = %d\n",
00285 gCode, sCode, errno));
00286 if (sCode < 0) {
00287 if (errno == ENOSYS) {
00288 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
00289 && (__kmp_affinity_type != affinity_none)
00290 && (__kmp_affinity_type != affinity_default)
00291 && (__kmp_affinity_type != affinity_disabled))) {
00292 int error = errno;
00293 __kmp_msg(
00294 kmp_ms_warning,
00295 KMP_MSG( SetAffSysCallNotSupported, env_var ),
00296 KMP_ERR( error ),
00297 __kmp_msg_null
00298 );
00299 }
00300 __kmp_affin_mask_size = 0;
00301 KMP_INTERNAL_FREE(buf);
00302 }
00303 if (errno == EFAULT) {
00304 __kmp_affin_mask_size = gCode;
00305 KA_TRACE(10, ( "__kmp_affinity_determine_capable: "
00306 "affinity supported (mask size %d)\n",
00307 (int)__kmp_affin_mask_size));
00308 KMP_INTERNAL_FREE(buf);
00309 return;
00310 }
00311 }
00312 }
00313
00314
00315
00316
00317
00318 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
00319 "searching for proper set size\n"));
00320 int size;
00321 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
00322 gCode = syscall( __NR_sched_getaffinity, 0, size, buf );
00323 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
00324 "getaffinity for mask size %d returned %d errno = %d\n", size,
00325 gCode, errno));
00326
00327 if (gCode < 0) {
00328 if ( errno == ENOSYS )
00329 {
00330
00331
00332
00333 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
00334 "inconsistent OS call behavior: errno == ENOSYS for mask size %d\n",
00335 size));
00336 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
00337 && (__kmp_affinity_type != affinity_none)
00338 && (__kmp_affinity_type != affinity_default)
00339 && (__kmp_affinity_type != affinity_disabled))) {
00340 int error = errno;
00341 __kmp_msg(
00342 kmp_ms_warning,
00343 KMP_MSG( GetAffSysCallNotSupported, env_var ),
00344 KMP_ERR( error ),
00345 __kmp_msg_null
00346 );
00347 }
00348 __kmp_affin_mask_size = 0;
00349 KMP_INTERNAL_FREE(buf);
00350 return;
00351 }
00352 continue;
00353 }
00354
00355 sCode = syscall( __NR_sched_setaffinity, 0, gCode, NULL );
00356 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
00357 "setaffinity for mask size %d returned %d errno = %d\n",
00358 gCode, sCode, errno));
00359 if (sCode < 0) {
00360 if (errno == ENOSYS) {
00361
00362
00363
00364 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
00365 "inconsistent OS call behavior: errno == ENOSYS for mask size %d\n",
00366 size));
00367 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
00368 && (__kmp_affinity_type != affinity_none)
00369 && (__kmp_affinity_type != affinity_default)
00370 && (__kmp_affinity_type != affinity_disabled))) {
00371 int error = errno;
00372 __kmp_msg(
00373 kmp_ms_warning,
00374 KMP_MSG( SetAffSysCallNotSupported, env_var ),
00375 KMP_ERR( error ),
00376 __kmp_msg_null
00377 );
00378 }
00379 __kmp_affin_mask_size = 0;
00380 KMP_INTERNAL_FREE(buf);
00381 return;
00382 }
00383 if (errno == EFAULT) {
00384 __kmp_affin_mask_size = gCode;
00385 KA_TRACE(10, ( "__kmp_affinity_determine_capable: "
00386 "affinity supported (mask size %d)\n",
00387 (int)__kmp_affin_mask_size));
00388 KMP_INTERNAL_FREE(buf);
00389 return;
00390 }
00391 }
00392 }
00393
00394 KMP_INTERNAL_FREE(buf);
00395
00396
00397
00398
00399
00400 __kmp_affin_mask_size = 0;
00401 KA_TRACE(10, ( "__kmp_affinity_determine_capable: "
00402 "cannot determine mask size - affinity not supported\n"));
00403 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
00404 && (__kmp_affinity_type != affinity_none)
00405 && (__kmp_affinity_type != affinity_default)
00406 && (__kmp_affinity_type != affinity_disabled))) {
00407 KMP_WARNING( AffCantGetMaskSize, env_var );
00408 }
00409 }
00410
00411
00412
00413
00414
00415
00416
00417
00418 void
00419 __kmp_change_thread_affinity_mask( int gtid, kmp_affin_mask_t *new_mask,
00420 kmp_affin_mask_t *old_mask )
00421 {
00422 KMP_DEBUG_ASSERT( gtid == __kmp_get_gtid() );
00423 if ( KMP_AFFINITY_CAPABLE() ) {
00424 int status;
00425 kmp_info_t *th = __kmp_threads[ gtid ];
00426
00427 KMP_DEBUG_ASSERT( new_mask != NULL );
00428
00429 if ( old_mask != NULL ) {
00430 status = __kmp_get_system_affinity( old_mask, TRUE );
00431 int error = errno;
00432 if ( status != 0 ) {
00433 __kmp_msg(
00434 kmp_ms_fatal,
00435 KMP_MSG( ChangeThreadAffMaskError ),
00436 KMP_ERR( error ),
00437 __kmp_msg_null
00438 );
00439 }
00440 }
00441
00442 __kmp_set_system_affinity( new_mask, TRUE );
00443
00444 if (__kmp_affinity_verbose) {
00445 char old_buf[KMP_AFFIN_MASK_PRINT_LEN];
00446 char new_buf[KMP_AFFIN_MASK_PRINT_LEN];
00447 __kmp_affinity_print_mask(old_buf, KMP_AFFIN_MASK_PRINT_LEN, old_mask);
00448 __kmp_affinity_print_mask(new_buf, KMP_AFFIN_MASK_PRINT_LEN, new_mask);
00449 KMP_INFORM( ChangeAffMask, "KMP_AFFINITY (Bind)", gtid, old_buf, new_buf );
00450
00451 }
00452
00453
00454 KMP_DEBUG_ASSERT( old_mask != NULL && (memcmp(old_mask,
00455 th->th.th_affin_mask, __kmp_affin_mask_size) == 0) );
00456 KMP_CPU_COPY( th->th.th_affin_mask, new_mask );
00457 }
00458 }
00459
00460 #endif // KMP_OS_LINUX
00461
00462
00463
00464
00465 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
00466
00467 int
00468 __kmp_futex_determine_capable()
00469 {
00470 int loc = 0;
00471 int rc = syscall( __NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0 );
00472 int retval = ( rc == 0 ) || ( errno != ENOSYS );
00473
00474 KA_TRACE(10, ( "__kmp_futex_determine_capable: rc = %d errno = %d\n", rc,
00475 errno ) );
00476 KA_TRACE(10, ( "__kmp_futex_determine_capable: futex syscall%s supported\n",
00477 retval ? "" : " not" ) );
00478
00479 return retval;
00480 }
00481
00482 #endif // KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
00483
00484
00485
00486
00487 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
00488
00489
00490
00491
00492
00493 kmp_int32
00494 __kmp_test_then_or32( volatile kmp_int32 *p, kmp_int32 d )
00495 {
00496 kmp_int32 old_value, new_value;
00497
00498 old_value = TCR_4( *p );
00499 new_value = old_value | d;
00500
00501 while ( ! __kmp_compare_and_store32 ( p, old_value, new_value ) )
00502 {
00503 KMP_CPU_PAUSE();
00504 old_value = TCR_4( *p );
00505 new_value = old_value | d;
00506 }
00507 return old_value;
00508 }
00509
00510 kmp_int32
00511 __kmp_test_then_and32( volatile kmp_int32 *p, kmp_int32 d )
00512 {
00513 kmp_int32 old_value, new_value;
00514
00515 old_value = TCR_4( *p );
00516 new_value = old_value & d;
00517
00518 while ( ! __kmp_compare_and_store32 ( p, old_value, new_value ) )
00519 {
00520 KMP_CPU_PAUSE();
00521 old_value = TCR_4( *p );
00522 new_value = old_value & d;
00523 }
00524 return old_value;
00525 }
00526
00527 # if KMP_ARCH_X86
00528 kmp_int64
00529 __kmp_test_then_add64( volatile kmp_int64 *p, kmp_int64 d )
00530 {
00531 kmp_int64 old_value, new_value;
00532
00533 old_value = TCR_8( *p );
00534 new_value = old_value + d;
00535
00536 while ( ! __kmp_compare_and_store64 ( p, old_value, new_value ) )
00537 {
00538 KMP_CPU_PAUSE();
00539 old_value = TCR_8( *p );
00540 new_value = old_value + d;
00541 }
00542 return old_value;
00543 }
00544 # endif
00545
00546 kmp_int64
00547 __kmp_test_then_or64( volatile kmp_int64 *p, kmp_int64 d )
00548 {
00549 kmp_int64 old_value, new_value;
00550
00551 old_value = TCR_8( *p );
00552 new_value = old_value | d;
00553 while ( ! __kmp_compare_and_store64 ( p, old_value, new_value ) )
00554 {
00555 KMP_CPU_PAUSE();
00556 old_value = TCR_8( *p );
00557 new_value = old_value | d;
00558 }
00559 return old_value;
00560 }
00561
00562 kmp_int64
00563 __kmp_test_then_and64( volatile kmp_int64 *p, kmp_int64 d )
00564 {
00565 kmp_int64 old_value, new_value;
00566
00567 old_value = TCR_8( *p );
00568 new_value = old_value & d;
00569 while ( ! __kmp_compare_and_store64 ( p, old_value, new_value ) )
00570 {
00571 KMP_CPU_PAUSE();
00572 old_value = TCR_8( *p );
00573 new_value = old_value & d;
00574 }
00575 return old_value;
00576 }
00577
00578 #endif
00579
00580 void
00581 __kmp_terminate_thread( int gtid )
00582 {
00583 int status;
00584 kmp_info_t *th = __kmp_threads[ gtid ];
00585
00586 if ( !th ) return;
00587
00588 #ifdef KMP_CANCEL_THREADS
00589 KA_TRACE( 10, ("__kmp_terminate_thread: kill (%d)\n", gtid ) );
00590 status = pthread_cancel( th->th.th_info.ds.ds_thread );
00591 if ( status != 0 && status != ESRCH ) {
00592 __kmp_msg(
00593 kmp_ms_fatal,
00594 KMP_MSG( CantTerminateWorkerThread ),
00595 KMP_ERR( status ),
00596 __kmp_msg_null
00597 );
00598 };
00599 #endif
00600 __kmp_yield( TRUE );
00601 }
00602
00603
00604
00605
00606
00607
00608
00609
00610
00611
00612
00613
00614
00615
00616
00617 static kmp_int32
00618 __kmp_set_stack_info( int gtid, kmp_info_t *th )
00619 {
00620 int stack_data;
00621 #if KMP_OS_LINUX
00622
00623 pthread_attr_t attr;
00624 int status;
00625 size_t size = 0;
00626 void * addr = 0;
00627
00628
00629
00630
00631 if ( ! KMP_UBER_GTID(gtid) ) {
00632
00633
00634 status = pthread_attr_init( &attr );
00635 KMP_CHECK_SYSFAIL( "pthread_attr_init", status );
00636 status = pthread_getattr_np( pthread_self(), &attr );
00637 KMP_CHECK_SYSFAIL( "pthread_getattr_np", status );
00638 status = pthread_attr_getstack( &attr, &addr, &size );
00639 KMP_CHECK_SYSFAIL( "pthread_attr_getstack", status );
00640 KA_TRACE( 60, ( "__kmp_set_stack_info: T#%d pthread_attr_getstack returned size: %lu, "
00641 "low addr: %p\n",
00642 gtid, size, addr ));
00643
00644 status = pthread_attr_destroy( &attr );
00645 KMP_CHECK_SYSFAIL( "pthread_attr_destroy", status );
00646 }
00647
00648 if ( size != 0 && addr != 0 ) {
00649
00650 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((char *)addr) + size));
00651 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
00652 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
00653 return TRUE;
00654 } else {
00655 #endif
00656
00657 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
00658 TCW_PTR(th -> th.th_info.ds.ds_stackbase, &stack_data);
00659 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
00660 return FALSE;
00661 #if KMP_OS_LINUX
00662 }
00663 #endif
00664 }
00665
00666 static void*
00667 __kmp_launch_worker( void *thr )
00668 {
00669 int status, old_type, old_state;
00670 #ifdef KMP_BLOCK_SIGNALS
00671 sigset_t new_set, old_set;
00672 #endif
00673 void *exit_val;
00674 void *padding = 0;
00675 int gtid;
00676 int error;
00677
00678 gtid = ((kmp_info_t*)thr) -> th.th_info.ds.ds_gtid;
00679 __kmp_gtid_set_specific( gtid );
00680 #ifdef KMP_TDATA_GTID
00681 __kmp_gtid = gtid;
00682 #endif
00683
00684
00685 #if KMP_OS_LINUX
00686 __kmp_affinity_set_init_mask( gtid, FALSE );
00687 #elif KMP_OS_DARWIN
00688
00689 #else
00690 #error "Unknown or unsupported OS"
00691 #endif
00692
00693 #ifdef KMP_CANCEL_THREADS
00694 status = pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, & old_type );
00695 KMP_CHECK_SYSFAIL( "pthread_setcanceltype", status );
00696
00697 status = pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, & old_state );
00698 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
00699 #endif
00700
00701 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
00702
00703
00704
00705
00706 __kmp_clear_x87_fpu_status_word();
00707 __kmp_load_x87_fpu_control_word( &__kmp_init_x87_fpu_control_word );
00708 __kmp_load_mxcsr( &__kmp_init_mxcsr );
00709 #endif
00710
00711 #ifdef KMP_BLOCK_SIGNALS
00712 status = sigfillset( & new_set );
00713 KMP_CHECK_SYSFAIL_ERRNO( "sigfillset", status );
00714 status = pthread_sigmask( SIG_BLOCK, & new_set, & old_set );
00715 KMP_CHECK_SYSFAIL( "pthread_sigmask", status );
00716 #endif
00717
00718 #if KMP_OS_LINUX
00719 if ( __kmp_stkoffset > 0 && gtid > 0 ) {
00720 padding = alloca( gtid * __kmp_stkoffset );
00721 }
00722 #endif
00723
00724 KMP_MB();
00725 __kmp_set_stack_info( gtid, (kmp_info_t*)thr );
00726
00727 __kmp_check_stack_overlap( (kmp_info_t*)thr );
00728
00729 exit_val = __kmp_launch_thread( (kmp_info_t *) thr );
00730
00731 #ifdef KMP_BLOCK_SIGNALS
00732 status = pthread_sigmask( SIG_SETMASK, & old_set, NULL );
00733 KMP_CHECK_SYSFAIL( "pthread_sigmask", status );
00734 #endif
00735
00736 return exit_val;
00737 }
00738
00739
00740
00741
00742 static void*
00743 __kmp_launch_monitor( void *thr )
00744 {
00745 int status, old_type, old_state;
00746 #ifdef KMP_BLOCK_SIGNALS
00747 sigset_t new_set;
00748 #endif
00749 struct timespec interval;
00750 int yield_count;
00751 int yield_cycles = 0;
00752 int error;
00753
00754 KMP_MB();
00755
00756 KA_TRACE( 10, ("__kmp_launch_monitor: #1 launched\n" ) );
00757
00758
00759 __kmp_gtid_set_specific( KMP_GTID_MONITOR );
00760 #ifdef KMP_TDATA_GTID
00761 __kmp_gtid = KMP_GTID_MONITOR;
00762 #endif
00763
00764 KMP_MB();
00765
00766
00767 __kmp_set_stack_info( ((kmp_info_t*)thr)->th.th_info.ds.ds_gtid, (kmp_info_t*)thr );
00768
00769 __kmp_check_stack_overlap( (kmp_info_t*)thr );
00770
00771 #ifdef KMP_CANCEL_THREADS
00772 status = pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, & old_type );
00773 KMP_CHECK_SYSFAIL( "pthread_setcanceltype", status );
00774
00775 status = pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, & old_state );
00776 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
00777 #endif
00778
00779 #if KMP_REAL_TIME_FIX
00780
00781
00782 {
00783 int sched = sched_getscheduler( 0 );
00784 if ( sched == SCHED_FIFO || sched == SCHED_RR ) {
00785
00786
00787 struct sched_param param;
00788 int max_priority = sched_get_priority_max( sched );
00789 int rc;
00790 KMP_WARNING( RealTimeSchedNotSupported );
00791 sched_getparam( 0, & param );
00792 if ( param.sched_priority < max_priority ) {
00793 param.sched_priority += 1;
00794 rc = sched_setscheduler( 0, sched, & param );
00795 if ( rc != 0 ) {
00796 int error = errno;
00797 __kmp_msg(
00798 kmp_ms_warning,
00799 KMP_MSG( CantChangeMonitorPriority ),
00800 KMP_ERR( error ),
00801 KMP_MSG( MonitorWillStarve ),
00802 __kmp_msg_null
00803 );
00804 };
00805 } else {
00806
00807
00808 __kmp_msg(
00809 kmp_ms_warning,
00810 KMP_MSG( RunningAtMaxPriority ),
00811 KMP_MSG( MonitorWillStarve ),
00812 KMP_HNT( RunningAtMaxPriority ),
00813 __kmp_msg_null
00814 );
00815 };
00816 };
00817 }
00818 #endif // KMP_REAL_TIME_FIX
00819
00820 KMP_MB();
00821
00822 if ( __kmp_monitor_wakeups == 1 ) {
00823 interval.tv_sec = 1;
00824 interval.tv_nsec = 0;
00825 } else {
00826 interval.tv_sec = 0;
00827 interval.tv_nsec = (NSEC_PER_SEC / __kmp_monitor_wakeups);
00828 }
00829
00830 KA_TRACE( 10, ("__kmp_launch_monitor: #2 monitor\n" ) );
00831
00832 if (__kmp_yield_cycle) {
00833 __kmp_yielding_on = 0;
00834 yield_count = __kmp_yield_off_count;
00835 } else {
00836 __kmp_yielding_on = 1;
00837 }
00838
00839 while( ! TCR_4( __kmp_global.g.g_done ) ) {
00840 struct timespec now;
00841 struct timeval tval;
00842
00843
00844
00845 KA_TRACE( 15, ( "__kmp_launch_monitor: update\n" ) );
00846
00847 status = gettimeofday( &tval, NULL );
00848 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
00849 TIMEVAL_TO_TIMESPEC( &tval, &now );
00850
00851 now.tv_sec += interval.tv_sec;
00852 now.tv_nsec += interval.tv_nsec;
00853
00854 if (now.tv_nsec >= NSEC_PER_SEC) {
00855 now.tv_sec += 1;
00856 now.tv_nsec -= NSEC_PER_SEC;
00857 }
00858
00859 status = pthread_mutex_lock( & __kmp_wait_mx.m_mutex );
00860 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
00861 status = pthread_cond_timedwait( & __kmp_wait_cv.c_cond, & __kmp_wait_mx.m_mutex,
00862 & now );
00863 if ( status != 0 ) {
00864 if ( status != ETIMEDOUT && status != EINTR ) {
00865 KMP_SYSFAIL( "pthread_cond_timedwait", status );
00866 };
00867 };
00868
00869 status = pthread_mutex_unlock( & __kmp_wait_mx.m_mutex );
00870 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
00871
00872 if (__kmp_yield_cycle) {
00873 yield_cycles++;
00874 if ( (yield_cycles % yield_count) == 0 ) {
00875 if (__kmp_yielding_on) {
00876 __kmp_yielding_on = 0;
00877 yield_count = __kmp_yield_off_count;
00878 } else {
00879 __kmp_yielding_on = 1;
00880 yield_count = __kmp_yield_on_count;
00881 }
00882 yield_cycles = 0;
00883 }
00884 } else {
00885 __kmp_yielding_on = 1;
00886 }
00887
00888 TCW_4( __kmp_global.g.g_time.dt.t_value,
00889 TCR_4( __kmp_global.g.g_time.dt.t_value ) + 1 );
00890
00891 KMP_MB();
00892 }
00893
00894 KA_TRACE( 10, ("__kmp_launch_monitor: #3 cleanup\n" ) );
00895
00896 #ifdef KMP_BLOCK_SIGNALS
00897 status = sigfillset( & new_set );
00898 KMP_CHECK_SYSFAIL_ERRNO( "sigfillset", status );
00899 status = pthread_sigmask( SIG_UNBLOCK, & new_set, NULL );
00900 KMP_CHECK_SYSFAIL( "pthread_sigmask", status );
00901 #endif
00902
00903 KA_TRACE( 10, ("__kmp_launch_monitor: #4 finished\n" ) );
00904
00905 if( __kmp_global.g.g_abort != 0 ) {
00906
00907
00908
00909 int gtid;
00910
00911 KA_TRACE( 10, ("__kmp_launch_monitor: #5 terminate sig=%d\n", __kmp_global.g.g_abort ) );
00912
00913
00914
00915
00916 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
00917 __kmp_terminate_thread( gtid );
00918
00919 __kmp_cleanup();
00920
00921 KA_TRACE( 10, ("__kmp_launch_monitor: #6 raise sig=%d\n", __kmp_global.g.g_abort ) );
00922
00923 if (__kmp_global.g.g_abort > 0)
00924 raise( __kmp_global.g.g_abort );
00925
00926 }
00927
00928 KA_TRACE( 10, ("__kmp_launch_monitor: #7 exit\n" ) );
00929
00930 return thr;
00931 }
00932
00933 void
00934 __kmp_create_worker( int gtid, kmp_info_t *th, size_t stack_size )
00935 {
00936 pthread_t handle;
00937 pthread_attr_t thread_attr;
00938 int status;
00939
00940
00941 th->th.th_info.ds.ds_gtid = gtid;
00942
00943 if ( KMP_UBER_GTID(gtid) ) {
00944 KA_TRACE( 10, ("__kmp_create_worker: uber thread (%d)\n", gtid ) );
00945 th -> th.th_info.ds.ds_thread = pthread_self();
00946 __kmp_set_stack_info( gtid, th );
00947 __kmp_check_stack_overlap( th );
00948 return;
00949 };
00950
00951 KA_TRACE( 10, ("__kmp_create_worker: try to create thread (%d)\n", gtid ) );
00952
00953 KMP_MB();
00954
00955 #ifdef KMP_THREAD_ATTR
00956 {
00957 status = pthread_attr_init( &thread_attr );
00958 if ( status != 0 ) {
00959 __kmp_msg(
00960 kmp_ms_fatal,
00961 KMP_MSG( CantInitThreadAttrs ),
00962 KMP_ERR( status ),
00963 __kmp_msg_null
00964 );
00965 };
00966 status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE );
00967 if ( status != 0 ) {
00968 __kmp_msg(
00969 kmp_ms_fatal,
00970 KMP_MSG( CantSetWorkerState ),
00971 KMP_ERR( status ),
00972 __kmp_msg_null
00973 );
00974 };
00975
00976
00977 stack_size += gtid * __kmp_stkoffset;
00978
00979 KA_TRACE( 10, ( "__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
00980 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
00981 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size ) );
00982
00983 # ifdef _POSIX_THREAD_ATTR_STACKSIZE
00984 status = pthread_attr_setstacksize( & thread_attr, stack_size );
00985 # ifdef KMP_BACKUP_STKSIZE
00986 if ( status != 0 ) {
00987 if ( ! __kmp_env_stksize ) {
00988 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
00989 __kmp_stksize = KMP_BACKUP_STKSIZE;
00990 KA_TRACE( 10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
00991 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
00992 "bytes\n",
00993 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size )
00994 );
00995 status = pthread_attr_setstacksize( &thread_attr, stack_size );
00996 };
00997 };
00998 # endif
00999 if ( status != 0 ) {
01000 __kmp_msg(
01001 kmp_ms_fatal,
01002 KMP_MSG( CantSetWorkerStackSize, stack_size ),
01003 KMP_ERR( status ),
01004 KMP_HNT( ChangeWorkerStackSize ),
01005 __kmp_msg_null
01006 );
01007 };
01008 # endif
01009 }
01010 #endif
01011
01012 {
01013 status = pthread_create( & handle, & thread_attr, __kmp_launch_worker, (void *) th );
01014 if ( status != 0 || ! handle ) {
01015 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
01016 if ( status == EINVAL ) {
01017 __kmp_msg(
01018 kmp_ms_fatal,
01019 KMP_MSG( CantSetWorkerStackSize, stack_size ),
01020 KMP_ERR( status ),
01021 KMP_HNT( IncreaseWorkerStackSize ),
01022 __kmp_msg_null
01023 );
01024 };
01025 if ( status == ENOMEM ) {
01026 __kmp_msg(
01027 kmp_ms_fatal,
01028 KMP_MSG( CantSetWorkerStackSize, stack_size ),
01029 KMP_ERR( status ),
01030 KMP_HNT( DecreaseWorkerStackSize ),
01031 __kmp_msg_null
01032 );
01033 };
01034 #endif
01035 if ( status == EAGAIN ) {
01036 __kmp_msg(
01037 kmp_ms_fatal,
01038 KMP_MSG( NoResourcesForWorkerThread ),
01039 KMP_ERR( status ),
01040 KMP_HNT( Decrease_NUM_THREADS ),
01041 __kmp_msg_null
01042 );
01043 };
01044 KMP_SYSFAIL( "pthread_create", status );
01045 };
01046
01047 th->th.th_info.ds.ds_thread = handle;
01048 }
01049
01050 #ifdef KMP_THREAD_ATTR
01051 {
01052 status = pthread_attr_destroy( & thread_attr );
01053 if ( status ) {
01054 __kmp_msg(
01055 kmp_ms_warning,
01056 KMP_MSG( CantDestroyThreadAttrs ),
01057 KMP_ERR( status ),
01058 __kmp_msg_null
01059 );
01060 };
01061 }
01062 #endif
01063
01064 KMP_MB();
01065
01066 KA_TRACE( 10, ("__kmp_create_worker: done creating thread (%d)\n", gtid ) );
01067
01068 }
01069
01070
01071 void
01072 __kmp_create_monitor( kmp_info_t *th )
01073 {
01074 pthread_t handle;
01075 pthread_attr_t thread_attr;
01076 size_t size;
01077 int status;
01078 int caller_gtid = __kmp_get_gtid();
01079 int auto_adj_size = FALSE;
01080
01081 KA_TRACE( 10, ("__kmp_create_monitor: try to create monitor\n" ) );
01082
01083 KMP_MB();
01084
01085 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
01086 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
01087 #if KMP_REAL_TIME_FIX
01088 TCW_4( __kmp_global.g.g_time.dt.t_value, -1 );
01089 #endif // KMP_REAL_TIME_FIX
01090
01091 #ifdef KMP_THREAD_ATTR
01092 if ( __kmp_monitor_stksize == 0 ) {
01093 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
01094 auto_adj_size = TRUE;
01095 }
01096 status = pthread_attr_init( &thread_attr );
01097 if ( status != 0 ) {
01098 __kmp_msg(
01099 kmp_ms_fatal,
01100 KMP_MSG( CantInitThreadAttrs ),
01101 KMP_ERR( status ),
01102 __kmp_msg_null
01103 );
01104 };
01105 status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE );
01106 if ( status != 0 ) {
01107 __kmp_msg(
01108 kmp_ms_fatal,
01109 KMP_MSG( CantSetMonitorState ),
01110 KMP_ERR( status ),
01111 __kmp_msg_null
01112 );
01113 };
01114
01115 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
01116 status = pthread_attr_getstacksize( & thread_attr, & size );
01117 KMP_CHECK_SYSFAIL( "pthread_attr_getstacksize", status );
01118 #else
01119 size = __kmp_sys_min_stksize;
01120 #endif
01121 #endif
01122
01123 if ( __kmp_monitor_stksize == 0 ) {
01124 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
01125 }
01126 if ( __kmp_monitor_stksize < __kmp_sys_min_stksize ) {
01127 __kmp_monitor_stksize = __kmp_sys_min_stksize;
01128 }
01129
01130 KA_TRACE( 10, ( "__kmp_create_monitor: default stacksize = %lu bytes,"
01131 "requested stacksize = %lu bytes\n",
01132 size, __kmp_monitor_stksize ) );
01133
01134 retry:
01135
01136
01137
01138 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
01139 KA_TRACE( 10, ( "__kmp_create_monitor: setting stacksize = %lu bytes,",
01140 __kmp_monitor_stksize ) );
01141 status = pthread_attr_setstacksize( & thread_attr, __kmp_monitor_stksize );
01142 if ( status != 0 ) {
01143 if ( auto_adj_size ) {
01144 __kmp_monitor_stksize *= 2;
01145 goto retry;
01146 }
01147 __kmp_msg(
01148 kmp_ms_warning,
01149 KMP_MSG( CantSetMonitorStackSize, (long int) __kmp_monitor_stksize ),
01150 KMP_ERR( status ),
01151 KMP_HNT( ChangeMonitorStackSize ),
01152 __kmp_msg_null
01153 );
01154 };
01155 #endif
01156
01157 TCW_4( __kmp_global.g.g_time.dt.t_value, 0 );
01158
01159 status = pthread_create( &handle, & thread_attr, __kmp_launch_monitor, (void *) th );
01160
01161 if ( status != 0 ) {
01162 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
01163 if ( status == EINVAL ) {
01164 if ( auto_adj_size && ( __kmp_monitor_stksize < (size_t)0x40000000 ) ) {
01165 __kmp_monitor_stksize *= 2;
01166 goto retry;
01167 }
01168 __kmp_msg(
01169 kmp_ms_fatal,
01170 KMP_MSG( CantSetMonitorStackSize, __kmp_monitor_stksize ),
01171 KMP_ERR( status ),
01172 KMP_HNT( IncreaseMonitorStackSize ),
01173 __kmp_msg_null
01174 );
01175 };
01176 if ( status == ENOMEM ) {
01177 __kmp_msg(
01178 kmp_ms_fatal,
01179 KMP_MSG( CantSetMonitorStackSize, __kmp_monitor_stksize ),
01180 KMP_ERR( status ),
01181 KMP_HNT( DecreaseMonitorStackSize ),
01182 __kmp_msg_null
01183 );
01184 };
01185 #endif
01186 if ( status == EAGAIN ) {
01187 __kmp_msg(
01188 kmp_ms_fatal,
01189 KMP_MSG( NoResourcesForMonitorThread ),
01190 KMP_ERR( status ),
01191 KMP_HNT( DecreaseNumberOfThreadsInUse ),
01192 __kmp_msg_null
01193 );
01194 };
01195 KMP_SYSFAIL( "pthread_create", status );
01196 };
01197
01198 th->th.th_info.ds.ds_thread = handle;
01199
01200 #if KMP_REAL_TIME_FIX
01201
01202 KMP_DEBUG_ASSERT( sizeof( kmp_uint32 ) == sizeof( __kmp_global.g.g_time.dt.t_value ) );
01203 __kmp_wait_yield_4(
01204 (kmp_uint32 volatile *) & __kmp_global.g.g_time.dt.t_value, -1, & __kmp_neq_4, NULL
01205 );
01206 #endif // KMP_REAL_TIME_FIX
01207
01208 #ifdef KMP_THREAD_ATTR
01209 status = pthread_attr_destroy( & thread_attr );
01210 if ( status != 0 ) {
01211 __kmp_msg(
01212 kmp_ms_warning,
01213 KMP_MSG( CantDestroyThreadAttrs ),
01214 KMP_ERR( status ),
01215 __kmp_msg_null
01216 );
01217 };
01218 #endif
01219
01220 KMP_MB();
01221
01222 KA_TRACE( 10, ( "__kmp_create_monitor: monitor created %#.8lx\n", th->th.th_info.ds.ds_thread ) );
01223
01224 }
01225
01226 void
01227 __kmp_exit_thread(
01228 int exit_status
01229 ) {
01230 pthread_exit( (void *) exit_status );
01231 }
01232
01233 void
01234 __kmp_reap_monitor( kmp_info_t *th )
01235 {
01236 int status, i;
01237 void *exit_val;
01238
01239 KA_TRACE( 10, ("__kmp_reap_monitor: try to reap monitor thread with handle %#.8lx\n",
01240 th->th.th_info.ds.ds_thread ) );
01241
01242
01243
01244
01245 KMP_DEBUG_ASSERT( th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid );
01246 if ( th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR ) {
01247 return;
01248 };
01249
01250 KMP_MB();
01251
01252
01253
01254
01255
01256
01257 status = pthread_kill( th->th.th_info.ds.ds_thread, 0 );
01258 if (status == ESRCH) {
01259
01260 KA_TRACE( 10, ("__kmp_reap_monitor: monitor does not exist, returning\n") );
01261
01262 } else
01263 {
01264 status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val);
01265 if (exit_val != th) {
01266 __kmp_msg(
01267 kmp_ms_fatal,
01268 KMP_MSG( ReapMonitorError ),
01269 KMP_ERR( status ),
01270 __kmp_msg_null
01271 );
01272 }
01273 }
01274
01275 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
01276 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
01277
01278 KA_TRACE( 10, ("__kmp_reap_monitor: done reaping monitor thread with handle %#.8lx\n",
01279 th->th.th_info.ds.ds_thread ) );
01280
01281 KMP_MB();
01282
01283 }
01284
01285 void
01286 __kmp_reap_worker( kmp_info_t *th )
01287 {
01288 int status;
01289 void *exit_val;
01290
01291 KMP_MB();
01292
01293 KA_TRACE( 10, ("__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid ) );
01294
01295
01296
01297
01298
01299 {
01300 status = pthread_kill( th->th.th_info.ds.ds_thread, 0 );
01301 if (status == ESRCH) {
01302 KA_TRACE( 10, ("__kmp_reap_worker: worker T#%d does not exist, returning\n",
01303 th->th.th_info.ds.ds_gtid ) );
01304 }
01305 else {
01306 KA_TRACE( 10, ("__kmp_reap_worker: try to join with worker T#%d\n",
01307 th->th.th_info.ds.ds_gtid ) );
01308
01309 status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val);
01310 #ifdef KMP_DEBUG
01311
01312 if ( status != 0 ) {
01313 __kmp_msg(
01314 kmp_ms_fatal,
01315 KMP_MSG( ReapWorkerError ),
01316 KMP_ERR( status ),
01317 __kmp_msg_null
01318 );
01319 }
01320 if ( exit_val != th ) {
01321 KA_TRACE( 10, ( "__kmp_reap_worker: worker T#%d did not reap properly, "
01322 "exit_val = %p\n",
01323 th->th.th_info.ds.ds_gtid, exit_val ) );
01324 }
01325 #endif
01326 }
01327 }
01328
01329 KA_TRACE( 10, ("__kmp_reap_worker: done reaping T#%d\n", th->th.th_info.ds.ds_gtid ) );
01330
01331 KMP_MB();
01332 }
01333
01334
01335
01336
01337
01338 #if KMP_HANDLE_SIGNALS
01339
01340
01341 static void
01342 __kmp_null_handler( int signo )
01343 {
01344
01345 }
01346
01347
01348 static void
01349 __kmp_team_handler( int signo )
01350 {
01351 if ( __kmp_global.g.g_abort == 0 ) {
01352
01353 #ifdef KMP_DEBUG
01354 __kmp_debug_printf( "__kmp_team_handler: caught signal = %d\n", signo );
01355 #endif
01356 switch ( signo ) {
01357 case SIGHUP :
01358 case SIGINT :
01359 case SIGQUIT :
01360 case SIGILL :
01361 case SIGABRT :
01362 case SIGFPE :
01363 case SIGBUS :
01364 case SIGSEGV :
01365 #ifdef SIGSYS
01366 case SIGSYS :
01367 #endif
01368 case SIGTERM :
01369 if ( __kmp_debug_buf ) {
01370 __kmp_dump_debug_buffer( );
01371 };
01372 KMP_MB();
01373 TCW_4( __kmp_global.g.g_abort, signo );
01374 KMP_MB();
01375 TCW_4( __kmp_global.g.g_done, TRUE );
01376 KMP_MB();
01377 break;
01378 default:
01379 #ifdef KMP_DEBUG
01380 __kmp_debug_printf( "__kmp_team_handler: unknown signal type" );
01381 #endif
01382 break;
01383 };
01384 };
01385 }
01386
01387
01388 static
01389 void __kmp_sigaction( int signum, const struct sigaction * act, struct sigaction * oldact ) {
01390 int rc = sigaction( signum, act, oldact );
01391 KMP_CHECK_SYSFAIL_ERRNO( "sigaction", rc );
01392 }
01393
01394
01395 static void
01396 __kmp_install_one_handler( int sig, sig_func_t handler_func, int parallel_init )
01397 {
01398 KMP_MB();
01399 KB_TRACE( 60, ( "__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init ) );
01400 if ( parallel_init ) {
01401 struct sigaction new_action;
01402 struct sigaction old_action;
01403 new_action.sa_handler = handler_func;
01404 new_action.sa_flags = 0;
01405 sigfillset( & new_action.sa_mask );
01406 __kmp_sigaction( sig, & new_action, & old_action );
01407 if ( old_action.sa_handler == __kmp_sighldrs[ sig ].sa_handler ) {
01408 sigaddset( & __kmp_sigset, sig );
01409 } else {
01410
01411 __kmp_sigaction( sig, & old_action, NULL );
01412 };
01413 } else {
01414
01415 __kmp_sigaction( sig, NULL, & __kmp_sighldrs[ sig ] );
01416 };
01417 KMP_MB();
01418 }
01419
01420
01421 static void
01422 __kmp_remove_one_handler( int sig )
01423 {
01424 KB_TRACE( 60, ( "__kmp_remove_one_handler( %d )\n", sig ) );
01425 if ( sigismember( & __kmp_sigset, sig ) ) {
01426 struct sigaction old;
01427 KMP_MB();
01428 __kmp_sigaction( sig, & __kmp_sighldrs[ sig ], & old );
01429 if ( ( old.sa_handler != __kmp_team_handler ) && ( old.sa_handler != __kmp_null_handler ) ) {
01430
01431 KB_TRACE( 10, ( "__kmp_remove_one_handler: oops, not our handler, restoring: sig=%d\n", sig ) );
01432 __kmp_sigaction( sig, & old, NULL );
01433 };
01434 sigdelset( & __kmp_sigset, sig );
01435 KMP_MB();
01436 };
01437 }
01438
01439
01440 void
01441 __kmp_install_signals( int parallel_init )
01442 {
01443 KB_TRACE( 10, ( "__kmp_install_signals( %d )\n", parallel_init ) );
01444 if ( __kmp_handle_signals || ! parallel_init ) {
01445
01446
01447 sigemptyset( & __kmp_sigset );
01448 __kmp_install_one_handler( SIGHUP, __kmp_team_handler, parallel_init );
01449 __kmp_install_one_handler( SIGINT, __kmp_team_handler, parallel_init );
01450 __kmp_install_one_handler( SIGQUIT, __kmp_team_handler, parallel_init );
01451 __kmp_install_one_handler( SIGILL, __kmp_team_handler, parallel_init );
01452 __kmp_install_one_handler( SIGABRT, __kmp_team_handler, parallel_init );
01453 __kmp_install_one_handler( SIGFPE, __kmp_team_handler, parallel_init );
01454 __kmp_install_one_handler( SIGBUS, __kmp_team_handler, parallel_init );
01455 __kmp_install_one_handler( SIGSEGV, __kmp_team_handler, parallel_init );
01456 #ifdef SIGSYS
01457 __kmp_install_one_handler( SIGSYS, __kmp_team_handler, parallel_init );
01458 #endif // SIGSYS
01459 __kmp_install_one_handler( SIGTERM, __kmp_team_handler, parallel_init );
01460 #ifdef SIGPIPE
01461 __kmp_install_one_handler( SIGPIPE, __kmp_team_handler, parallel_init );
01462 #endif // SIGPIPE
01463 };
01464 }
01465
01466
01467 void
01468 __kmp_remove_signals( void )
01469 {
01470 int sig;
01471 KB_TRACE( 10, ( "__kmp_remove_signals()\n" ) );
01472 for ( sig = 1; sig < NSIG; ++ sig ) {
01473 __kmp_remove_one_handler( sig );
01474 };
01475 }
01476
01477
01478 #endif // KMP_HANDLE_SIGNALS
01479
01480
01481
01482
01483 void
01484 __kmp_enable( int new_state )
01485 {
01486 #ifdef KMP_CANCEL_THREADS
01487 int status, old_state;
01488 status = pthread_setcancelstate( new_state, & old_state );
01489 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
01490 KMP_DEBUG_ASSERT( old_state == PTHREAD_CANCEL_DISABLE );
01491 #endif
01492 }
01493
01494 void
01495 __kmp_disable( int * old_state )
01496 {
01497 #ifdef KMP_CANCEL_THREADS
01498 int status;
01499 status = pthread_setcancelstate( PTHREAD_CANCEL_DISABLE, old_state );
01500 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
01501 #endif
01502 }
01503
01504
01505
01506
01507 static void
01508 __kmp_atfork_prepare (void)
01509 {
01510
01511 }
01512
01513 static void
01514 __kmp_atfork_parent (void)
01515 {
01516
01517 }
01518
01519
01520
01521
01522
01523
01524 static void
01525 __kmp_atfork_child (void)
01526 {
01527
01528
01529
01530
01531 ++__kmp_fork_count;
01532
01533 __kmp_init_runtime = FALSE;
01534 __kmp_init_monitor = 0;
01535 __kmp_init_parallel = FALSE;
01536 __kmp_init_middle = FALSE;
01537 __kmp_init_serial = FALSE;
01538 TCW_4(__kmp_init_gtid, FALSE);
01539 __kmp_init_common = FALSE;
01540
01541 TCW_4(__kmp_init_user_locks, FALSE);
01542 __kmp_user_lock_table.used = 0;
01543 __kmp_user_lock_table.allocated = 0;
01544 __kmp_user_lock_table.table = NULL;
01545 __kmp_lock_blocks = NULL;
01546
01547 __kmp_all_nth = 0;
01548 TCW_4(__kmp_nth, 0);
01549
01550
01551
01552 KA_TRACE( 10, ( "__kmp_atfork_child: checking cache address list %p\n",
01553 __kmp_threadpriv_cache_list ) );
01554
01555 while ( __kmp_threadpriv_cache_list != NULL ) {
01556
01557 if ( *__kmp_threadpriv_cache_list -> addr != NULL ) {
01558 KC_TRACE( 50, ( "__kmp_atfork_child: zeroing cache at address %p\n",
01559 &(*__kmp_threadpriv_cache_list -> addr) ) );
01560
01561 *__kmp_threadpriv_cache_list -> addr = NULL;
01562 }
01563 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list -> next;
01564 }
01565
01566 __kmp_init_runtime = FALSE;
01567
01568
01569 __kmp_init_bootstrap_lock( &__kmp_initz_lock );
01570 __kmp_init_bootstrap_lock( &__kmp_stdio_lock );
01571 __kmp_init_bootstrap_lock( &__kmp_console_lock );
01572
01573
01574
01575
01576
01577
01578
01579
01580
01581
01582
01583 }
01584
01585 void
01586 __kmp_register_atfork(void) {
01587 if ( __kmp_need_register_atfork ) {
01588 int status = pthread_atfork( __kmp_atfork_prepare, __kmp_atfork_parent, __kmp_atfork_child );
01589 KMP_CHECK_SYSFAIL( "pthread_atfork", status );
01590 __kmp_need_register_atfork = FALSE;
01591 }
01592 }
01593
01594 void
01595 __kmp_suspend_initialize( void )
01596 {
01597 int status;
01598 status = pthread_mutexattr_init( &__kmp_suspend_mutex_attr );
01599 KMP_CHECK_SYSFAIL( "pthread_mutexattr_init", status );
01600 status = pthread_condattr_init( &__kmp_suspend_cond_attr );
01601 KMP_CHECK_SYSFAIL( "pthread_condattr_init", status );
01602 }
01603
01604 static void
01605 __kmp_suspend_initialize_thread( kmp_info_t *th )
01606 {
01607 if ( th->th.th_suspend_init_count <= __kmp_fork_count ) {
01608
01609
01610 int status;
01611 status = pthread_cond_init( &th->th.th_suspend_cv.c_cond, &__kmp_suspend_cond_attr );
01612 KMP_CHECK_SYSFAIL( "pthread_cond_init", status );
01613 status = pthread_mutex_init( &th->th.th_suspend_mx.m_mutex, & __kmp_suspend_mutex_attr );
01614 KMP_CHECK_SYSFAIL( "pthread_mutex_init", status );
01615 *(volatile int*)&th->th.th_suspend_init_count = __kmp_fork_count + 1;
01616 };
01617 }
01618
01619 void
01620 __kmp_suspend_uninitialize_thread( kmp_info_t *th )
01621 {
01622 if(th->th.th_suspend_init_count > __kmp_fork_count) {
01623
01624
01625 int status;
01626
01627 status = pthread_cond_destroy( &th->th.th_suspend_cv.c_cond );
01628 if ( status != 0 && status != EBUSY ) {
01629 KMP_SYSFAIL( "pthread_cond_destroy", status );
01630 };
01631 status = pthread_mutex_destroy( &th->th.th_suspend_mx.m_mutex );
01632 if ( status != 0 && status != EBUSY ) {
01633 KMP_SYSFAIL( "pthread_mutex_destroy", status );
01634 };
01635 --th->th.th_suspend_init_count;
01636 KMP_DEBUG_ASSERT(th->th.th_suspend_init_count == __kmp_fork_count);
01637 }
01638 }
01639
01640
01641
01642
01643
01644
01645 void
01646 __kmp_suspend( int th_gtid, volatile kmp_uint *spinner, kmp_uint checker )
01647 {
01648 kmp_info_t *th = __kmp_threads[th_gtid];
01649 int status;
01650 kmp_uint old_spin;
01651
01652 KF_TRACE( 30, ("__kmp_suspend: T#%d enter for spin = %p\n", th_gtid, spinner ) );
01653
01654 __kmp_suspend_initialize_thread( th );
01655
01656 status = pthread_mutex_lock( &th->th.th_suspend_mx.m_mutex );
01657 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
01658
01659 KF_TRACE( 10, ( "__kmp_suspend: T#%d setting sleep bit for spin(%p)\n",
01660 th_gtid, spinner ) );
01661
01662
01663
01664
01665 old_spin = __kmp_test_then_or32( (volatile kmp_int32 *) spinner,
01666 KMP_BARRIER_SLEEP_STATE );
01667
01668 KF_TRACE( 5, ( "__kmp_suspend: T#%d set sleep bit for spin(%p)==%d\n",
01669 th_gtid, spinner, *spinner ) );
01670
01671 if ( old_spin == checker ) {
01672 __kmp_test_then_and32( (volatile kmp_int32 *) spinner, ~(KMP_BARRIER_SLEEP_STATE) );
01673
01674 KF_TRACE( 5, ( "__kmp_suspend: T#%d false alarm, reset sleep bit for spin(%p)\n",
01675 th_gtid, spinner) );
01676 } else {
01677
01678
01679
01680
01681
01682 int deactivated = FALSE;
01683 TCW_PTR(th->th.th_sleep_loc, spinner);
01684 while ( TCR_4( *spinner ) & KMP_BARRIER_SLEEP_STATE ) {
01685 #ifdef DEBUG_SUSPEND
01686 char buffer[128];
01687 __kmp_suspend_count++;
01688 __kmp_print_cond( buffer, &th->th.th_suspend_cv );
01689 __kmp_printf( "__kmp_suspend: suspending T#%d: %s\n", th_gtid, buffer );
01690 #endif
01691
01692
01693
01694
01695
01696 if ( ! deactivated ) {
01697 th->th.th_active = FALSE;
01698 if ( th->th.th_active_in_pool ) {
01699 th->th.th_active_in_pool = FALSE;
01700 KMP_TEST_THEN_DEC32(
01701 (kmp_int32 *) &__kmp_thread_pool_active_nth );
01702 KMP_DEBUG_ASSERT( TCR_4(__kmp_thread_pool_active_nth) >= 0 );
01703 }
01704 deactivated = TRUE;
01705
01706
01707 }
01708
01709 #if USE_SUSPEND_TIMEOUT
01710 struct timespec now;
01711 struct timeval tval;
01712 int msecs;
01713
01714 status = gettimeofday( &tval, NULL );
01715 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
01716 TIMEVAL_TO_TIMESPEC( &tval, &now );
01717
01718 msecs = (4*__kmp_dflt_blocktime) + 200;
01719 now.tv_sec += msecs / 1000;
01720 now.tv_nsec += (msecs % 1000)*1000;
01721
01722 KF_TRACE( 15, ( "__kmp_suspend: T#%d about to perform pthread_cond_timedwait\n",
01723 th_gtid ) );
01724 status = pthread_cond_timedwait( &th->th.th_suspend_cv.c_cond, &th->th.th_suspend_mx.m_mutex, & now );
01725 #else
01726 KF_TRACE( 15, ( "__kmp_suspend: T#%d about to perform pthread_cond_wait\n",
01727 th_gtid ) );
01728
01729 status = pthread_cond_wait( &th->th.th_suspend_cv.c_cond, &th->th.th_suspend_mx.m_mutex );
01730 #endif
01731
01732 if ( (status != 0) && (status != EINTR) && (status != ETIMEDOUT) ) {
01733 KMP_SYSFAIL( "pthread_cond_wait", status );
01734 }
01735 #ifdef KMP_DEBUG
01736 if (status == ETIMEDOUT) {
01737 if ( (*spinner) & KMP_BARRIER_SLEEP_STATE ) {
01738 KF_TRACE( 100, ( "__kmp_suspend: T#%d timeout wakeup\n", th_gtid ) );
01739 } else {
01740 KF_TRACE( 2, ( "__kmp_suspend: T#%d timeout wakeup, sleep bit not set!\n",
01741 th_gtid ) );
01742 }
01743 } else if ( (*spinner) & KMP_BARRIER_SLEEP_STATE ) {
01744 KF_TRACE( 100, ( "__kmp_suspend: T#%d spurious wakeup\n", th_gtid ) );
01745 }
01746 #endif
01747
01748 }
01749
01750
01751
01752
01753
01754 if ( deactivated ) {
01755 th->th.th_active = TRUE;
01756 if ( TCR_4(th->th.th_in_pool) ) {
01757 KMP_TEST_THEN_INC32(
01758 (kmp_int32 *) &__kmp_thread_pool_active_nth );
01759 th->th.th_active_in_pool = TRUE;
01760 }
01761 }
01762 }
01763
01764 #ifdef DEBUG_SUSPEND
01765 {
01766 char buffer[128];
01767 __kmp_print_cond( buffer, &th->th.th_suspend_cv);
01768 __kmp_printf( "__kmp_suspend: T#%d has awakened: %s\n", th_gtid, buffer );
01769 }
01770 #endif
01771
01772
01773 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
01774 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
01775
01776 KF_TRACE( 30, ("__kmp_suspend: T#%d exit\n", th_gtid ) );
01777 }
01778
01779
01780
01781
01782
01783
01784
01785 void
01786 __kmp_resume( int target_gtid, volatile kmp_uint *spin )
01787 {
01788 kmp_info_t *th = __kmp_threads[target_gtid];
01789 int status;
01790 kmp_uint old_spin;
01791
01792 #ifdef KMP_DEBUG
01793 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
01794 #endif
01795
01796 KF_TRACE( 30, ( "__kmp_resume: T#%d wants to wakeup T#%d enter\n",
01797 gtid, target_gtid ) );
01798
01799 KMP_DEBUG_ASSERT( gtid != target_gtid );
01800
01801 __kmp_suspend_initialize_thread( th );
01802
01803 status = pthread_mutex_lock( &th->th.th_suspend_mx.m_mutex );
01804 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
01805 if ( spin == NULL ) {
01806 spin = (volatile kmp_uint *)TCR_PTR(th->th.th_sleep_loc);
01807 if ( spin == NULL ) {
01808 KF_TRACE( 5, ( "__kmp_resume: T#%d exiting, thread T#%d already awake - spin(%p)\n",
01809 gtid, target_gtid, spin ) );
01810
01811 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
01812 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
01813 return;
01814 }
01815 }
01816
01817 old_spin = __kmp_test_then_and32( (kmp_int32 volatile *) spin,
01818 ~( KMP_BARRIER_SLEEP_STATE ) );
01819 if ( ( old_spin & KMP_BARRIER_SLEEP_STATE ) == 0 ) {
01820 KF_TRACE( 5, ( "__kmp_resume: T#%d exiting, thread T#%d already awake - spin(%p): "
01821 "%u => %u\n",
01822 gtid, target_gtid, spin, old_spin, *spin ) );
01823
01824 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
01825 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
01826 return;
01827 }
01828 TCW_PTR(th->th.th_sleep_loc, NULL);
01829
01830 KF_TRACE( 5, ( "__kmp_resume: T#%d about to wakeup T#%d, reset sleep bit for spin(%p): "
01831 "%u => %u\n",
01832 gtid, target_gtid, spin, old_spin, *spin ) );
01833
01834 #ifdef DEBUG_SUSPEND
01835 {
01836 char buffer[128];
01837 __kmp_print_cond( buffer, &th->th.th_suspend_cv );
01838 __kmp_printf( "__kmp_resume: T#%d resuming T#%d: %s\n", gtid, target_gtid, buffer );
01839 }
01840 #endif
01841
01842
01843 status = pthread_cond_signal( &th->th.th_suspend_cv.c_cond );
01844 KMP_CHECK_SYSFAIL( "pthread_cond_signal", status );
01845 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
01846 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
01847 KF_TRACE( 30, ( "__kmp_resume: T#%d exiting after signaling wake up for T#%d\n",
01848 gtid, target_gtid ) );
01849 }
01850
01851
01852
01853
01854
01855 void
01856 __kmp_yield( int cond )
01857 {
01858 if (cond && __kmp_yielding_on) {
01859 sched_yield();
01860 }
01861 }
01862
01863
01864
01865
01866 void
01867 __kmp_gtid_set_specific( int gtid )
01868 {
01869 int status;
01870 KMP_ASSERT( __kmp_init_runtime );
01871 status = pthread_setspecific( __kmp_gtid_threadprivate_key, (void*)(gtid+1) );
01872 KMP_CHECK_SYSFAIL( "pthread_setspecific", status );
01873 }
01874
01875 int
01876 __kmp_gtid_get_specific()
01877 {
01878 int gtid;
01879 if ( !__kmp_init_runtime ) {
01880 KA_TRACE( 50, ("__kmp_get_specific: runtime shutdown, returning KMP_GTID_SHUTDOWN\n" ) );
01881 return KMP_GTID_SHUTDOWN;
01882 }
01883 gtid = (int)(size_t)pthread_getspecific( __kmp_gtid_threadprivate_key );
01884 if ( gtid == 0 ) {
01885 gtid = KMP_GTID_DNE;
01886 }
01887 else {
01888 gtid--;
01889 }
01890 KA_TRACE( 50, ("__kmp_gtid_get_specific: key:%d gtid:%d\n",
01891 __kmp_gtid_threadprivate_key, gtid ));
01892 return gtid;
01893 }
01894
01895
01896
01897
01898 double
01899 __kmp_read_cpu_time( void )
01900 {
01901
01902 struct tms buffer;
01903
01904 times( & buffer );
01905
01906 return (buffer.tms_utime + buffer.tms_cutime) / (double) CLOCKS_PER_SEC;
01907 }
01908
01909 int
01910 __kmp_read_system_info( struct kmp_sys_info *info )
01911 {
01912 int status;
01913 struct rusage r_usage;
01914
01915 memset( info, 0, sizeof( *info ) );
01916
01917 status = getrusage( RUSAGE_SELF, &r_usage);
01918 KMP_CHECK_SYSFAIL_ERRNO( "getrusage", status );
01919
01920 info->maxrss = r_usage.ru_maxrss;
01921 info->minflt = r_usage.ru_minflt;
01922 info->majflt = r_usage.ru_majflt;
01923 info->nswap = r_usage.ru_nswap;
01924 info->inblock = r_usage.ru_inblock;
01925 info->oublock = r_usage.ru_oublock;
01926 info->nvcsw = r_usage.ru_nvcsw;
01927 info->nivcsw = r_usage.ru_nivcsw;
01928
01929 return (status != 0);
01930 }
01931
01932
01933
01934
01935
01936 void
01937 __kmp_read_system_time( double *delta )
01938 {
01939 double t_ns;
01940 struct timeval tval;
01941 struct timespec stop;
01942 int status;
01943
01944 status = gettimeofday( &tval, NULL );
01945 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
01946 TIMEVAL_TO_TIMESPEC( &tval, &stop );
01947 t_ns = TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start);
01948 *delta = (t_ns * 1e-9);
01949 }
01950
01951 void
01952 __kmp_clear_system_time( void )
01953 {
01954 struct timeval tval;
01955 int status;
01956 status = gettimeofday( &tval, NULL );
01957 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
01958 TIMEVAL_TO_TIMESPEC( &tval, &__kmp_sys_timer_data.start );
01959 }
01960
01961
01962
01963
01964 #ifdef BUILD_TV
01965
01966 void
01967 __kmp_tv_threadprivate_store( kmp_info_t *th, void *global_addr, void *thread_addr )
01968 {
01969 struct tv_data *p;
01970
01971 p = (struct tv_data *) __kmp_allocate( sizeof( *p ) );
01972
01973 p->u.tp.global_addr = global_addr;
01974 p->u.tp.thread_addr = thread_addr;
01975
01976 p->type = (void *) 1;
01977
01978 p->next = th->th.th_local.tv_data;
01979 th->th.th_local.tv_data = p;
01980
01981 if ( p->next == 0 ) {
01982 int rc = pthread_setspecific( __kmp_tv_key, p );
01983 KMP_CHECK_SYSFAIL( "pthread_setspecific", rc );
01984 }
01985 }
01986
01987 #endif
01988
01989
01990
01991
01992 static int
01993 __kmp_get_xproc( void ) {
01994
01995 int r = 0;
01996
01997 #if KMP_OS_LINUX
01998
01999 r = sysconf( _SC_NPROCESSORS_ONLN );
02000
02001 #elif KMP_OS_DARWIN
02002
02003
02004
02005
02006 kern_return_t rc;
02007 host_basic_info_data_t info;
02008 mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
02009 rc = host_info( mach_host_self(), HOST_BASIC_INFO, (host_info_t) & info, & num );
02010 if ( rc == 0 && num == HOST_BASIC_INFO_COUNT ) {
02011
02012
02013 r = info.avail_cpus;
02014 } else {
02015 KMP_WARNING( CantGetNumAvailCPU );
02016 KMP_INFORM( AssumedNumCPU );
02017 };
02018
02019 #else
02020
02021 #error "Unknown or unsupported OS."
02022
02023 #endif
02024
02025 return r > 0 ? r : 2;
02026
02027 }
02028
02029
02030
02031
02032
02033 static
02034 kmp_uint64
02035 __kmp_get_frequency_from_proc(
02036 ) {
02037
02038 kmp_uint64 result = ~ 0;
02039 FILE * file = NULL;
02040 double freq = HUGE_VAL;
02041 int rc;
02042
02043
02044
02045
02046 file = fopen( "/proc/cpuinfo", "r" );
02047 if ( file == NULL ) {
02048 return result;
02049 };
02050 for ( ; ; ) {
02051 rc = fscanf( file, "cpu MHz : %lf\n", & freq );
02052 if ( rc == 1 ) {
02053 break;
02054 };
02055 fscanf( file, "%*[^\n]\n" );
02056 };
02057 fclose( file );
02058 if ( freq == HUGE_VAL || freq <= 0 ) {
02059 return result;
02060 };
02061 result = (kmp_uint64)( freq * 1.0E+6 );
02062 KA_TRACE( 5, ( "cpu frequency from /proc/cpuinfo: %" KMP_UINT64_SPEC "\n", result ) );
02063 return result;
02064 }
02065
02066
02067 void
02068 __kmp_runtime_initialize( void )
02069 {
02070 int status;
02071 pthread_mutexattr_t mutex_attr;
02072 pthread_condattr_t cond_attr;
02073
02074 if ( __kmp_init_runtime ) {
02075 return;
02076 };
02077
02078 #if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 )
02079 if ( ! __kmp_cpuinfo.initialized ) {
02080 __kmp_query_cpuid( &__kmp_cpuinfo );
02081 };
02082 #endif
02083
02084 if ( __kmp_cpu_frequency == 0 ) {
02085
02086 __kmp_cpu_frequency = __kmp_cpuinfo.frequency;
02087 if ( __kmp_cpu_frequency == 0 || __kmp_cpu_frequency == ~ 0 ) {
02088
02089 __kmp_cpu_frequency = __kmp_get_frequency_from_proc();
02090 };
02091 };
02092
02093 __kmp_xproc = __kmp_get_xproc();
02094
02095 if ( sysconf( _SC_THREADS ) ) {
02096
02097
02098 __kmp_sys_max_nth = sysconf( _SC_THREAD_THREADS_MAX );
02099 if ( __kmp_sys_max_nth == -1 ) {
02100
02101 __kmp_sys_max_nth = INT_MAX;
02102 }
02103 else if ( __kmp_sys_max_nth <= 1 ) {
02104
02105 __kmp_sys_max_nth = KMP_MAX_NTH;
02106 }
02107
02108
02109 __kmp_sys_min_stksize = sysconf( _SC_THREAD_STACK_MIN );
02110 if ( __kmp_sys_min_stksize <= 1 ) {
02111 __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
02112 }
02113 }
02114
02115
02116 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
02117
02118
02119 #ifdef BUILD_TV
02120 {
02121 int rc = pthread_key_create( & __kmp_tv_key, 0 );
02122 KMP_CHECK_SYSFAIL( "pthread_key_create", rc );
02123 }
02124 #endif
02125
02126 status = pthread_key_create( &__kmp_gtid_threadprivate_key, __kmp_internal_end_dest );
02127 KMP_CHECK_SYSFAIL( "pthread_key_create", status );
02128 status = pthread_mutexattr_init( & mutex_attr );
02129 KMP_CHECK_SYSFAIL( "pthread_mutexattr_init", status );
02130 status = pthread_mutex_init( & __kmp_wait_mx.m_mutex, & mutex_attr );
02131 KMP_CHECK_SYSFAIL( "pthread_mutex_init", status );
02132 status = pthread_condattr_init( & cond_attr );
02133 KMP_CHECK_SYSFAIL( "pthread_condattr_init", status );
02134 status = pthread_cond_init( & __kmp_wait_cv.c_cond, & cond_attr );
02135 KMP_CHECK_SYSFAIL( "pthread_cond_init", status );
02136
02137 __kmp_init_runtime = TRUE;
02138 }
02139
02140 void
02141 __kmp_runtime_destroy( void )
02142 {
02143 int status;
02144
02145 if ( ! __kmp_init_runtime ) {
02146 return;
02147 };
02148
02149
02150 status = pthread_key_delete( __kmp_gtid_threadprivate_key );
02151 KMP_CHECK_SYSFAIL( "pthread_key_delete", status );
02152 #ifdef BUILD_TV
02153 status = pthread_key_delete( __kmp_tv_key );
02154 KMP_CHECK_SYSFAIL( "pthread_key_delete", status );
02155 #endif
02156
02157 status = pthread_mutex_destroy( & __kmp_wait_mx.m_mutex );
02158 if ( status != 0 && status != EBUSY ) {
02159 KMP_SYSFAIL( "pthread_mutex_destroy", status );
02160 }
02161 status = pthread_cond_destroy( & __kmp_wait_cv.c_cond );
02162 if ( status != 0 && status != EBUSY ) {
02163 KMP_SYSFAIL( "pthread_cond_destroy", status );
02164 }
02165 #if KMP_OS_LINUX
02166 __kmp_affinity_uninitialize();
02167 #elif KMP_OS_DARWIN
02168
02169 #else
02170 #error "Unknown or unsupported OS"
02171 #endif
02172
02173 __kmp_init_runtime = FALSE;
02174 }
02175
02176
02177
02178
02179 void
02180 __kmp_thread_sleep( int millis )
02181 {
02182 sleep( ( millis + 500 ) / 1000 );
02183 }
02184
02185
02186 void
02187 __kmp_elapsed( double *t )
02188 {
02189 int status;
02190 # ifdef FIX_SGI_CLOCK
02191 struct timespec ts;
02192
02193 status = clock_gettime( CLOCK_PROCESS_CPUTIME_ID, &ts );
02194 KMP_CHECK_SYSFAIL_ERRNO( "clock_gettime", status );
02195 *t = (double) ts.tv_nsec * (1.0 / (double) NSEC_PER_SEC) +
02196 (double) ts.tv_sec;
02197 # else
02198 struct timeval tv;
02199
02200 status = gettimeofday( & tv, NULL );
02201 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
02202 *t = (double) tv.tv_usec * (1.0 / (double) USEC_PER_SEC) +
02203 (double) tv.tv_sec;
02204 # endif
02205 }
02206
02207
02208 void
02209 __kmp_elapsed_tick( double *t )
02210 {
02211 *t = 1 / (double) CLOCKS_PER_SEC;
02212 }
02213
02214
02215
02216
02217
02218 int
02219 __kmp_is_address_mapped( void * addr ) {
02220
02221 int found = 0;
02222 int rc;
02223
02224 #if KMP_OS_LINUX
02225
02226
02227
02228
02229
02230
02231 char * name = __kmp_str_format( "/proc/%d/maps", getpid() );
02232 FILE * file = NULL;
02233
02234 file = fopen( name, "r" );
02235 KMP_ASSERT( file != NULL );
02236
02237 for ( ; ; ) {
02238
02239 void * beginning = NULL;
02240 void * ending = NULL;
02241 char perms[ 5 ];
02242
02243 rc = fscanf( file, "%p-%p %4s %*[^\n]\n", & beginning, & ending, perms );
02244 if ( rc == EOF ) {
02245 break;
02246 };
02247 KMP_ASSERT( rc == 3 && strlen( perms ) == 4 );
02248
02249
02250 if ( ( addr >= beginning ) && ( addr < ending ) ) {
02251 perms[ 2 ] = 0;
02252 if ( strcmp( perms, "rw" ) == 0 ) {
02253
02254 found = 1;
02255 };
02256 break;
02257 };
02258
02259 };
02260
02261
02262 fclose( file );
02263 KMP_INTERNAL_FREE( name );
02264
02265 #elif KMP_OS_DARWIN
02266
02267
02268
02269
02270
02271
02272 int buffer;
02273 vm_size_t count;
02274 rc =
02275 vm_read_overwrite(
02276 mach_task_self(),
02277 (vm_address_t)( addr ),
02278 1,
02279 (vm_address_t)( & buffer ),
02280 & count
02281 );
02282 if ( rc == 0 ) {
02283
02284 found = 1;
02285 };
02286
02287 #else
02288
02289 #error "Unknown or unsupported OS"
02290
02291 #endif
02292
02293 return found;
02294
02295 }
02296
02297 #ifdef USE_LOAD_BALANCE
02298
02299
02300 # if KMP_OS_DARWIN
02301
02302
02303
02304
02305
02306
02307 int
02308 __kmp_get_load_balance( int max )
02309 {
02310 double averages[3];
02311 int ret_avg = 0;
02312
02313 int res = getloadavg( averages, 3 );
02314
02315
02316
02317
02318 if ( __kmp_load_balance_interval < 180 && ( res >= 1 ) ) {
02319 ret_avg = averages[0];
02320 } else if ( ( __kmp_load_balance_interval >= 180
02321 && __kmp_load_balance_interval < 600 ) && ( res >= 2 ) ) {
02322 ret_avg = averages[1];
02323 } else if ( ( __kmp_load_balance_interval >= 600 ) && ( res == 3 ) ) {
02324 ret_avg = averages[2];
02325 } else {
02326 return -1;
02327 }
02328
02329 return ret_avg;
02330 }
02331
02332 # else // Linux* OS
02333
02334
02335
02336
02337 int
02338 __kmp_get_load_balance( int max )
02339 {
02340 static int permanent_error = 0;
02341
02342 static int glb_running_threads = 0;
02343 static double glb_call_time = 0;
02344
02345 int running_threads = 0;
02346
02347 DIR * proc_dir = NULL;
02348 struct dirent * proc_entry = NULL;
02349
02350 kmp_str_buf_t task_path;
02351 DIR * task_dir = NULL;
02352 struct dirent * task_entry = NULL;
02353 int task_path_fixed_len;
02354
02355 kmp_str_buf_t stat_path;
02356 int stat_file = -1;
02357 int stat_path_fixed_len;
02358
02359 int total_processes = 0;
02360 int total_threads = 0;
02361
02362 double call_time = 0.0;
02363
02364 __kmp_str_buf_init( & task_path );
02365 __kmp_str_buf_init( & stat_path );
02366
02367 __kmp_elapsed( & call_time );
02368
02369 if ( glb_call_time &&
02370 ( call_time - glb_call_time < __kmp_load_balance_interval ) ) {
02371 running_threads = glb_running_threads;
02372 goto finish;
02373 }
02374
02375 glb_call_time = call_time;
02376
02377
02378 if ( permanent_error ) {
02379 running_threads = -1;
02380 goto finish;
02381 };
02382
02383 if ( max <= 0 ) {
02384 max = INT_MAX;
02385 };
02386
02387
02388 proc_dir = opendir( "/proc" );
02389 if ( proc_dir == NULL ) {
02390
02391
02392 running_threads = -1;
02393 permanent_error = 1;
02394 goto finish;
02395 };
02396
02397
02398 __kmp_str_buf_cat( & task_path, "/proc/", 6 );
02399 task_path_fixed_len = task_path.used;
02400
02401 proc_entry = readdir( proc_dir );
02402 while ( proc_entry != NULL ) {
02403
02404
02405 if ( proc_entry->d_type == DT_DIR && isdigit( proc_entry->d_name[ 0 ] ) ) {
02406
02407 ++ total_processes;
02408
02409
02410
02411
02412
02413 KMP_DEBUG_ASSERT( total_processes != 1 || strcmp( proc_entry->d_name, "1" ) == 0 );
02414
02415
02416 task_path.used = task_path_fixed_len;
02417 __kmp_str_buf_cat( & task_path, proc_entry->d_name, strlen( proc_entry->d_name ) );
02418 __kmp_str_buf_cat( & task_path, "/task", 5 );
02419
02420 task_dir = opendir( task_path.str );
02421 if ( task_dir == NULL ) {
02422
02423
02424
02425
02426
02427
02428
02429 if ( strcmp( proc_entry->d_name, "1" ) == 0 ) {
02430 running_threads = -1;
02431 permanent_error = 1;
02432 goto finish;
02433 };
02434 } else {
02435
02436 __kmp_str_buf_clear( & stat_path );
02437 __kmp_str_buf_cat( & stat_path, task_path.str, task_path.used );
02438 __kmp_str_buf_cat( & stat_path, "/", 1 );
02439 stat_path_fixed_len = stat_path.used;
02440
02441 task_entry = readdir( task_dir );
02442 while ( task_entry != NULL ) {
02443
02444 if ( proc_entry->d_type == DT_DIR && isdigit( task_entry->d_name[ 0 ] ) ) {
02445
02446 ++ total_threads;
02447
02448
02449
02450
02451 stat_path.used = stat_path_fixed_len;
02452 __kmp_str_buf_cat( & stat_path, task_entry->d_name, strlen( task_entry->d_name ) );
02453 __kmp_str_buf_cat( & stat_path, "/stat", 5 );
02454
02455
02456
02457 stat_file = open( stat_path.str, O_RDONLY );
02458 if ( stat_file == -1 ) {
02459
02460
02461 } else {
02462
02463
02464
02465
02466
02467
02468
02469
02470
02471
02472
02473
02474
02475
02476
02477
02478
02479
02480
02481
02482
02483
02484
02485
02486
02487
02488
02489 char buffer[ 65 ];
02490 int len;
02491 len = read( stat_file, buffer, sizeof( buffer ) - 1 );
02492 if ( len >= 0 ) {
02493 buffer[ len ] = 0;
02494
02495
02496
02497
02498 char * close_parent = strstr( buffer, ") " );
02499 if ( close_parent != NULL ) {
02500 char state = * ( close_parent + 2 );
02501 if ( state == 'R' ) {
02502 ++ running_threads;
02503 if ( running_threads >= max ) {
02504 goto finish;
02505 };
02506 };
02507 };
02508 };
02509 close( stat_file );
02510 stat_file = -1;
02511 };
02512 };
02513 task_entry = readdir( task_dir );
02514 };
02515 closedir( task_dir );
02516 task_dir = NULL;
02517 };
02518 };
02519 proc_entry = readdir( proc_dir );
02520 };
02521
02522
02523
02524
02525
02526
02527 KMP_DEBUG_ASSERT( running_threads > 0 );
02528 if ( running_threads <= 0 ) {
02529 running_threads = 1;
02530 }
02531
02532 finish:
02533 if ( proc_dir != NULL ) {
02534 closedir( proc_dir );
02535 };
02536 __kmp_str_buf_free( & task_path );
02537 if ( task_dir != NULL ) {
02538 closedir( task_dir );
02539 };
02540 __kmp_str_buf_free( & stat_path );
02541 if ( stat_file != -1 ) {
02542 close( stat_file );
02543 };
02544
02545 glb_running_threads = running_threads;
02546
02547 return running_threads;
02548
02549 }
02550
02551 # endif // KMP_OS_DARWIN
02552
02553 #endif // USE_LOAD_BALANCE
02554
02555
02556