00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047 #ifndef KMP_LOCK_H
00048 #define KMP_LOCK_H
00049
00050 #include <limits.h>
00051 #include <stddef.h>
00052
00053 #include "kmp_os.h"
00054 #include "kmp_debug.h"
00055
00056 #ifdef __cplusplus
00057 extern "C" {
00058 #endif // __cplusplus
00059
00060
00061
00062
00063
00064 #define KMP_PAD(type, sz) (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
00065 #define KMP_GTID_DNE (-2)
00066
00067
00068
00069 struct ident;
00070 typedef struct ident ident_t;
00071
00072
00073
00074
00075
00076
00077
00078
00079
00080
00081
00082
00083
00084
00085
00086 #if KMP_OS_LINUX && defined(KMP_GOMP_COMPAT)
00087 # define OMP_LOCK_T_SIZE sizeof(int)
00088 # define OMP_NEST_LOCK_T_SIZE sizeof(void *)
00089 #else
00090 # define OMP_LOCK_T_SIZE sizeof(void *)
00091 # define OMP_NEST_LOCK_T_SIZE sizeof(void *)
00092 #endif
00093
00094
00095
00096
00097
00098
00099 #define OMP_CRITICAL_SIZE sizeof(void *)
00100 #define INTEL_CRITICAL_SIZE 32
00101
00102
00103
00104
00105 typedef kmp_uint32 kmp_lock_flags_t;
00106
00107 #define kmp_lf_critical_section 1
00108 #define kmp_lf_atomic 2
00109
00110
00111
00112
00113 typedef kmp_uint32 kmp_lock_index_t;
00114
00115
00116
00117
00118
00119 struct kmp_lock_pool {
00120 union kmp_user_lock *next;
00121 kmp_lock_index_t index;
00122 };
00123
00124 typedef struct kmp_lock_pool kmp_lock_pool_t;
00125
00126
00127 extern void __kmp_validate_locks( void );
00128
00129
00130
00131
00132
00133
00134
00135
00136
00137
00138
00139
00140
00141
00142
00143
00144
00145
00146
00147
00148
00149
00150
00151
00152
00153
00154
00155
00156
00157
00158
00159
00160
00161
00162
00163
00164
00165
00166
00167
00168
00169
00170
00171 struct kmp_base_tas_lock {
00172 volatile kmp_int32 poll;
00173
00174 kmp_int32 depth_locked;
00175 };
00176
00177 typedef struct kmp_base_tas_lock kmp_base_tas_lock_t;
00178
00179 union kmp_tas_lock {
00180 kmp_base_tas_lock_t lk;
00181 kmp_lock_pool_t pool;
00182 double lk_align;
00183
00184 };
00185
00186 typedef union kmp_tas_lock kmp_tas_lock_t;
00187
00188
00189
00190
00191
00192 #define KMP_TAS_LOCK_INITIALIZER( lock ) { { 0, 0 } }
00193
00194 extern void __kmp_acquire_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
00195 extern int __kmp_test_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
00196 extern void __kmp_release_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
00197 extern void __kmp_init_tas_lock( kmp_tas_lock_t *lck );
00198 extern void __kmp_destroy_tas_lock( kmp_tas_lock_t *lck );
00199
00200 extern void __kmp_acquire_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
00201 extern int __kmp_test_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
00202 extern int __kmp_release_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
00203 extern void __kmp_init_nested_tas_lock( kmp_tas_lock_t *lck );
00204 extern void __kmp_destroy_nested_tas_lock( kmp_tas_lock_t *lck );
00205
00206 #define KMP_NESTED_LOCK_RELEASED 1
00207 #define KMP_NESTED_LOCK_HELD 0
00208
00209
00210 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
00211
00212
00213
00214
00215
00216
00217
00218
00219
00220
00221
00222
00223
00224 struct kmp_base_futex_lock {
00225 volatile kmp_int32 poll;
00226
00227
00228 kmp_int32 depth_locked;
00229 };
00230
00231 typedef struct kmp_base_futex_lock kmp_base_futex_lock_t;
00232
00233 union kmp_futex_lock {
00234 kmp_base_futex_lock_t lk;
00235 kmp_lock_pool_t pool;
00236 double lk_align;
00237
00238 };
00239
00240 typedef union kmp_futex_lock kmp_futex_lock_t;
00241
00242
00243
00244
00245
00246 #define KMP_FUTEX_LOCK_INITIALIZER( lock ) { { 0, 0 } }
00247
00248 extern void __kmp_acquire_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
00249 extern int __kmp_test_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
00250 extern void __kmp_release_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
00251 extern void __kmp_init_futex_lock( kmp_futex_lock_t *lck );
00252 extern void __kmp_destroy_futex_lock( kmp_futex_lock_t *lck );
00253
00254 extern void __kmp_acquire_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
00255 extern int __kmp_test_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
00256 extern int __kmp_release_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
00257 extern void __kmp_init_nested_futex_lock( kmp_futex_lock_t *lck );
00258 extern void __kmp_destroy_nested_futex_lock( kmp_futex_lock_t *lck );
00259
00260 #endif // KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
00261
00262
00263
00264
00265
00266
00267 struct kmp_base_ticket_lock {
00268
00269 volatile union kmp_ticket_lock * initialized;
00270 ident_t const * location;
00271 volatile kmp_uint32 next_ticket;
00272 volatile kmp_uint32 now_serving;
00273 volatile kmp_int32 owner_id;
00274 kmp_int32 depth_locked;
00275 kmp_lock_flags_t flags;
00276 };
00277
00278 typedef struct kmp_base_ticket_lock kmp_base_ticket_lock_t;
00279
00280 union KMP_ALIGN_CACHE kmp_ticket_lock {
00281 kmp_base_ticket_lock_t lk;
00282 kmp_lock_pool_t pool;
00283 double lk_align;
00284 char lk_pad[ KMP_PAD( kmp_base_ticket_lock_t, CACHE_LINE ) ];
00285 };
00286
00287 typedef union kmp_ticket_lock kmp_ticket_lock_t;
00288
00289
00290
00291
00292
00293
00294 #define KMP_TICKET_LOCK_INITIALIZER( lock ) { { (kmp_ticket_lock_t *) & (lock), NULL, 0, 0, 0, -1 } }
00295
00296 extern void __kmp_acquire_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
00297 extern int __kmp_test_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
00298 extern int __kmp_test_ticket_lock_with_cheks( kmp_ticket_lock_t *lck, kmp_int32 gtid );
00299 extern void __kmp_release_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
00300 extern void __kmp_init_ticket_lock( kmp_ticket_lock_t *lck );
00301 extern void __kmp_destroy_ticket_lock( kmp_ticket_lock_t *lck );
00302
00303 extern void __kmp_acquire_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
00304 extern int __kmp_test_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
00305 extern int __kmp_release_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
00306 extern void __kmp_init_nested_ticket_lock( kmp_ticket_lock_t *lck );
00307 extern void __kmp_destroy_nested_ticket_lock( kmp_ticket_lock_t *lck );
00308
00309
00310
00311
00312
00313
00314 struct kmp_base_queuing_lock {
00315
00316
00317 volatile union kmp_queuing_lock *initialized;
00318
00319 ident_t const * location;
00320
00321 KMP_ALIGN( 8 )
00322
00323 volatile kmp_int32 tail_id;
00324
00325 volatile kmp_int32 head_id;
00326
00327
00328 volatile kmp_uint32 next_ticket;
00329 volatile kmp_uint32 now_serving;
00330 volatile kmp_int32 owner_id;
00331 kmp_int32 depth_locked;
00332
00333 kmp_lock_flags_t flags;
00334 };
00335
00336 typedef struct kmp_base_queuing_lock kmp_base_queuing_lock_t;
00337
00338 KMP_BUILD_ASSERT( offsetof( kmp_base_queuing_lock_t, tail_id ) % 8 == 0 );
00339
00340 union KMP_ALIGN_CACHE kmp_queuing_lock {
00341 kmp_base_queuing_lock_t lk;
00342 kmp_lock_pool_t pool;
00343 double lk_align;
00344 char lk_pad[ KMP_PAD( kmp_base_queuing_lock_t, CACHE_LINE ) ];
00345 };
00346
00347 typedef union kmp_queuing_lock kmp_queuing_lock_t;
00348
00349 extern void __kmp_acquire_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
00350 extern int __kmp_test_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
00351 extern void __kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
00352 extern void __kmp_init_queuing_lock( kmp_queuing_lock_t *lck );
00353 extern void __kmp_destroy_queuing_lock( kmp_queuing_lock_t *lck );
00354
00355 #define OMPT_SUPPORT 1
00356 #if OMPT_SUPPORT
00357
00358 extern void __kmp_set_queuing_lock_flags( kmp_queuing_lock_t *lck, kmp_lock_flags_t flags );
00359 #endif
00360
00361 extern void __kmp_acquire_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
00362 extern int __kmp_test_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
00363 extern int __kmp_release_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
00364 extern void __kmp_init_nested_queuing_lock( kmp_queuing_lock_t *lck );
00365 extern void __kmp_destroy_nested_queuing_lock( kmp_queuing_lock_t *lck );
00366
00367
00368
00369
00370
00371
00372 struct kmp_base_drdpa_lock {
00373
00374
00375
00376
00377
00378
00379
00380
00381 KMP_ALIGN_CACHE
00382
00383 volatile union kmp_drdpa_lock * initialized;
00384 ident_t const * location;
00385 volatile struct kmp_lock_poll {
00386 kmp_uint64 poll;
00387 } * volatile polls;
00388 volatile kmp_uint64 mask;
00389 kmp_uint64 cleanup_ticket;
00390 volatile struct kmp_lock_poll * old_polls;
00391 kmp_uint32 num_polls;
00392
00393
00394
00395
00396
00397 KMP_ALIGN_CACHE
00398
00399 volatile kmp_uint64 next_ticket;
00400
00401
00402
00403
00404
00405
00406
00407
00408
00409
00410
00411
00412
00413
00414
00415
00416 KMP_ALIGN_CACHE
00417
00418 kmp_uint64 now_serving;
00419 volatile kmp_uint32 owner_id;
00420 kmp_int32 depth_locked;
00421 kmp_lock_flags_t flags;
00422 };
00423
00424 typedef struct kmp_base_drdpa_lock kmp_base_drdpa_lock_t;
00425
00426 union KMP_ALIGN_CACHE kmp_drdpa_lock {
00427 kmp_base_drdpa_lock_t lk;
00428 kmp_lock_pool_t pool;
00429 double lk_align;
00430 char lk_pad[ KMP_PAD( kmp_base_drdpa_lock_t, CACHE_LINE ) ];
00431 };
00432
00433 typedef union kmp_drdpa_lock kmp_drdpa_lock_t;
00434
00435 extern void __kmp_acquire_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
00436 extern int __kmp_test_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
00437 extern void __kmp_release_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
00438 extern void __kmp_init_drdpa_lock( kmp_drdpa_lock_t *lck );
00439 extern void __kmp_destroy_drdpa_lock( kmp_drdpa_lock_t *lck );
00440
00441 extern void __kmp_acquire_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
00442 extern int __kmp_test_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
00443 extern int __kmp_release_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
00444 extern void __kmp_init_nested_drdpa_lock( kmp_drdpa_lock_t *lck );
00445 extern void __kmp_destroy_nested_drdpa_lock( kmp_drdpa_lock_t *lck );
00446
00447
00448
00449
00450
00451
00452
00453
00454
00455
00456
00457
00458
00459
00460
00461
00462
00463 typedef kmp_ticket_lock_t kmp_bootstrap_lock_t;
00464
00465 #define KMP_BOOTSTRAP_LOCK_INITIALIZER( lock ) KMP_TICKET_LOCK_INITIALIZER( (lock) )
00466
00467 inline void
00468 __kmp_acquire_bootstrap_lock( kmp_bootstrap_lock_t *lck )
00469 {
00470 __kmp_acquire_ticket_lock( lck, KMP_GTID_DNE );
00471 }
00472
00473 inline int
00474 __kmp_test_bootstrap_lock( kmp_bootstrap_lock_t *lck )
00475 {
00476 return __kmp_test_ticket_lock( lck, KMP_GTID_DNE );
00477 }
00478
00479 inline void
00480 __kmp_release_bootstrap_lock( kmp_bootstrap_lock_t *lck )
00481 {
00482 __kmp_release_ticket_lock( lck, KMP_GTID_DNE );
00483 }
00484
00485 inline void
00486 __kmp_init_bootstrap_lock( kmp_bootstrap_lock_t *lck )
00487 {
00488 __kmp_init_ticket_lock( lck );
00489 }
00490
00491 inline void
00492 __kmp_destroy_bootstrap_lock( kmp_bootstrap_lock_t *lck )
00493 {
00494 __kmp_destroy_ticket_lock( lck );
00495 }
00496
00497
00498
00499
00500
00501
00502
00503
00504
00505
00506
00507
00508
00509
00510 typedef kmp_ticket_lock_t kmp_lock_t;
00511
00512 inline void
00513 __kmp_acquire_lock( kmp_lock_t *lck, kmp_int32 gtid )
00514 {
00515 __kmp_acquire_ticket_lock( lck, gtid );
00516 }
00517
00518 inline int
00519 __kmp_test_lock( kmp_lock_t *lck, kmp_int32 gtid )
00520 {
00521 return __kmp_test_ticket_lock( lck, gtid );
00522 }
00523
00524 inline void
00525 __kmp_release_lock( kmp_lock_t *lck, kmp_int32 gtid )
00526 {
00527 __kmp_release_ticket_lock( lck, gtid );
00528 }
00529
00530 inline void
00531 __kmp_init_lock( kmp_lock_t *lck )
00532 {
00533 __kmp_init_ticket_lock( lck );
00534 }
00535
00536 inline void
00537 __kmp_destroy_lock( kmp_lock_t *lck )
00538 {
00539 __kmp_destroy_ticket_lock( lck );
00540 }
00541
00542
00543
00544
00545
00546
00547
00548
00549
00550
00551
00552
00553
00554
00555 enum kmp_lock_kind {
00556 lk_default = 0,
00557 lk_tas,
00558 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
00559 lk_futex,
00560 #endif
00561 lk_ticket,
00562 lk_queuing,
00563 lk_drdpa
00564 };
00565
00566 typedef enum kmp_lock_kind kmp_lock_kind_t;
00567
00568 extern kmp_lock_kind_t __kmp_user_lock_kind;
00569
00570 union kmp_user_lock {
00571 kmp_tas_lock_t tas;
00572 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
00573 kmp_futex_lock_t futex;
00574 #endif
00575 kmp_ticket_lock_t ticket;
00576 kmp_queuing_lock_t queuing;
00577 kmp_drdpa_lock_t drdpa;
00578 kmp_lock_pool_t pool;
00579 };
00580
00581 typedef union kmp_user_lock *kmp_user_lock_p;
00582
00583 extern size_t __kmp_base_user_lock_size;
00584 extern size_t __kmp_user_lock_size;
00585
00586 extern kmp_int32 ( *__kmp_get_user_lock_owner_ )( kmp_user_lock_p lck );
00587
00588 inline kmp_int32
00589 __kmp_get_user_lock_owner( kmp_user_lock_p lck )
00590 {
00591 KMP_DEBUG_ASSERT( __kmp_get_user_lock_owner_ != NULL );
00592 return ( *__kmp_get_user_lock_owner_ )( lck );
00593 }
00594
00595 extern void ( *__kmp_acquire_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
00596
00597 #define KMP_FSYNC_PREPARE(x) ((void)0)
00598 #define KMP_FSYNC_ACQUIRED(x) ((void)0)
00599
00600 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
00601
00602 #define __kmp_acquire_user_lock_with_checks(lck,gtid) \
00603 if (__kmp_user_lock_kind == lk_tas) { \
00604 if ( __kmp_env_consistency_check ) { \
00605 char const * const func = "omp_set_lock"; \
00606 if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_LOCK_T_SIZE ) \
00607 && lck->tas.lk.depth_locked != -1 ) { \
00608 KMP_FATAL( LockNestableUsedAsSimple, func ); \
00609 } \
00610 if ( ( gtid >= 0 ) && ( lck->tas.lk.poll - 1 == gtid ) ) { \
00611 KMP_FATAL( LockIsAlreadyOwned, func ); \
00612 } \
00613 } \
00614 if ( __sync_val_compare_and_swap( &(lck->tas.lk.poll), 0, gtid + 1 ) ) { \
00615 kmp_uint32 spins; \
00616 KMP_FSYNC_PREPARE( lck ); \
00617 KMP_INIT_YIELD( spins ); \
00618 if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
00619 KMP_YIELD( TRUE ); \
00620 } else { \
00621 KMP_YIELD_SPIN( spins ); \
00622 } \
00623 while ( __sync_val_compare_and_swap( &(lck->tas.lk.poll), 0, gtid + 1 ) ) { \
00624 if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
00625 KMP_YIELD( TRUE ); \
00626 } else { \
00627 KMP_YIELD_SPIN( spins ); \
00628 } \
00629 } \
00630 } \
00631 KMP_FSYNC_ACQUIRED( lck ); \
00632 } else { \
00633 KMP_DEBUG_ASSERT( __kmp_acquire_user_lock_with_checks_ != NULL ); \
00634 ( *__kmp_acquire_user_lock_with_checks_ )( lck, gtid ); \
00635 }
00636
00637 #else
00638 inline void
00639 __kmp_acquire_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
00640 {
00641 KMP_DEBUG_ASSERT( __kmp_acquire_user_lock_with_checks_ != NULL );
00642 ( *__kmp_acquire_user_lock_with_checks_ )( lck, gtid );
00643 }
00644 #endif
00645
00646 extern int ( *__kmp_test_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
00647
00648 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
00649
00650 #include "kmp_i18n.h"
00651 extern int __kmp_env_consistency_check;
00652 inline int
00653 __kmp_test_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
00654 {
00655 if ( __kmp_user_lock_kind == lk_tas ) {
00656 if ( __kmp_env_consistency_check ) {
00657 char const * const func = "omp_test_lock";
00658 if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_LOCK_T_SIZE )
00659 && lck->tas.lk.depth_locked != -1 ) {
00660 KMP_FATAL( LockNestableUsedAsSimple, func );
00661 }
00662 }
00663 return !__sync_val_compare_and_swap( &(lck->tas.lk.poll), 0, gtid + 1 );
00664 } else {
00665 KMP_DEBUG_ASSERT( __kmp_test_user_lock_with_checks_ != NULL );
00666 return ( *__kmp_test_user_lock_with_checks_ )( lck, gtid );
00667 }
00668 }
00669 #else
00670 inline int
00671 __kmp_test_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
00672 {
00673 KMP_DEBUG_ASSERT( __kmp_test_user_lock_with_checks_ != NULL );
00674 return ( *__kmp_test_user_lock_with_checks_ )( lck, gtid );
00675 }
00676 #endif
00677
00678 extern void ( *__kmp_release_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
00679
00680 inline void
00681 __kmp_release_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
00682 {
00683 KMP_DEBUG_ASSERT( __kmp_release_user_lock_with_checks_ != NULL );
00684 ( *__kmp_release_user_lock_with_checks_ ) ( lck, gtid );
00685 }
00686
00687 extern void ( *__kmp_init_user_lock_with_checks_ )( kmp_user_lock_p lck );
00688
00689 inline void
00690 __kmp_init_user_lock_with_checks( kmp_user_lock_p lck )
00691 {
00692 KMP_DEBUG_ASSERT( __kmp_init_user_lock_with_checks_ != NULL );
00693 ( *__kmp_init_user_lock_with_checks_ )( lck );
00694 }
00695
00696
00697
00698
00699
00700 extern void ( *__kmp_destroy_user_lock_ )( kmp_user_lock_p lck );
00701
00702 inline void
00703 __kmp_destroy_user_lock( kmp_user_lock_p lck )
00704 {
00705 KMP_DEBUG_ASSERT( __kmp_destroy_user_lock_ != NULL );
00706 ( *__kmp_destroy_user_lock_ )( lck );
00707 }
00708
00709 extern void ( *__kmp_destroy_user_lock_with_checks_ )( kmp_user_lock_p lck );
00710
00711 inline void
00712 __kmp_destroy_user_lock_with_checks( kmp_user_lock_p lck )
00713 {
00714 KMP_DEBUG_ASSERT( __kmp_destroy_user_lock_with_checks_ != NULL );
00715 ( *__kmp_destroy_user_lock_with_checks_ )( lck );
00716 }
00717
00718 extern void ( *__kmp_acquire_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
00719
00720 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
00721
00722 #define __kmp_acquire_nested_user_lock_with_checks(lck,gtid) \
00723 if (__kmp_user_lock_kind == lk_tas) { \
00724 if ( __kmp_env_consistency_check ) { \
00725 char const * const func = "omp_set_nest_lock"; \
00726 if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_NEST_LOCK_T_SIZE ) \
00727 && lck->tas.lk.depth_locked == -1 ) { \
00728 KMP_FATAL( LockSimpleUsedAsNestable, func ); \
00729 } \
00730 } \
00731 if ( lck->tas.lk.poll - 1 == gtid ) { \
00732 lck->tas.lk.depth_locked += 1; \
00733 } else { \
00734 if ( __sync_val_compare_and_swap( &(lck->tas.lk.poll), 0, gtid + 1 ) ) { \
00735 kmp_uint32 spins; \
00736 KMP_FSYNC_PREPARE( lck ); \
00737 KMP_INIT_YIELD( spins ); \
00738 if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
00739 KMP_YIELD( TRUE ); \
00740 } else { \
00741 KMP_YIELD_SPIN( spins ); \
00742 } \
00743 while ( __sync_val_compare_and_swap( &(lck->tas.lk.poll), 0, gtid + 1 ) ) { \
00744 if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
00745 KMP_YIELD( TRUE ); \
00746 } else { \
00747 KMP_YIELD_SPIN( spins ); \
00748 } \
00749 } \
00750 } \
00751 lck->tas.lk.depth_locked = 1; \
00752 } \
00753 KMP_FSYNC_ACQUIRED( lck ); \
00754 } else { \
00755 KMP_DEBUG_ASSERT( __kmp_acquire_nested_user_lock_with_checks_ != NULL ); \
00756 ( *__kmp_acquire_nested_user_lock_with_checks_ )( lck, gtid ); \
00757 }
00758
00759 #else
00760 inline void
00761 __kmp_acquire_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
00762 {
00763 KMP_DEBUG_ASSERT( __kmp_acquire_nested_user_lock_with_checks_ != NULL );
00764 ( *__kmp_acquire_nested_user_lock_with_checks_ )( lck, gtid );
00765 }
00766 #endif
00767
00768 extern int ( *__kmp_test_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
00769
00770 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
00771 inline int
00772 __kmp_test_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
00773 {
00774 if ( __kmp_user_lock_kind == lk_tas ) {
00775 int retval;
00776 if ( __kmp_env_consistency_check ) {
00777 char const * const func = "omp_test_nest_lock";
00778 if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_NEST_LOCK_T_SIZE )
00779 && lck->tas.lk.depth_locked == -1 ) {
00780 KMP_FATAL( LockSimpleUsedAsNestable, func );
00781 }
00782 }
00783 KMP_DEBUG_ASSERT( gtid >= 0 );
00784 if ( lck->tas.lk.poll - 1 == gtid ) {
00785 return ++lck->tas.lk.depth_locked;
00786 }
00787 retval = !__sync_val_compare_and_swap( &(lck->tas.lk.poll), 0, gtid + 1 );
00788 if ( retval ) {
00789 KMP_MB();
00790 lck->tas.lk.depth_locked = 1;
00791 }
00792 return retval;
00793 } else {
00794 KMP_DEBUG_ASSERT( __kmp_test_nested_user_lock_with_checks_ != NULL );
00795 return ( *__kmp_test_nested_user_lock_with_checks_ )( lck, gtid );
00796 }
00797 }
00798 #else
00799 inline int
00800 __kmp_test_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
00801 {
00802 KMP_DEBUG_ASSERT( __kmp_test_nested_user_lock_with_checks_ != NULL );
00803 return ( *__kmp_test_nested_user_lock_with_checks_ )( lck, gtid );
00804 }
00805 #endif
00806
00807 extern int ( *__kmp_release_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
00808
00809 inline int
00810 __kmp_release_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
00811 {
00812 KMP_DEBUG_ASSERT( __kmp_release_nested_user_lock_with_checks_ != NULL );
00813 return ( *__kmp_release_nested_user_lock_with_checks_ )( lck, gtid );
00814 }
00815
00816 extern void ( *__kmp_init_nested_user_lock_with_checks_ )( kmp_user_lock_p lck );
00817
00818 inline void __kmp_init_nested_user_lock_with_checks( kmp_user_lock_p lck )
00819 {
00820 KMP_DEBUG_ASSERT( __kmp_init_nested_user_lock_with_checks_ != NULL );
00821 ( *__kmp_init_nested_user_lock_with_checks_ )( lck );
00822 }
00823
00824 extern void ( *__kmp_destroy_nested_user_lock_with_checks_ )( kmp_user_lock_p lck );
00825
00826 inline void
00827 __kmp_destroy_nested_user_lock_with_checks( kmp_user_lock_p lck )
00828 {
00829 KMP_DEBUG_ASSERT( __kmp_destroy_nested_user_lock_with_checks_ != NULL );
00830 ( *__kmp_destroy_nested_user_lock_with_checks_ )( lck );
00831 }
00832
00833
00834
00835
00836
00837
00838
00839
00840
00841
00842
00843
00844
00845
00846
00847
00848 extern int ( *__kmp_is_user_lock_initialized_ )( kmp_user_lock_p lck );
00849
00850
00851
00852 extern const ident_t * ( *__kmp_get_user_lock_location_ )( kmp_user_lock_p lck );
00853
00854 inline const ident_t *
00855 __kmp_get_user_lock_location( kmp_user_lock_p lck )
00856 {
00857 if ( __kmp_get_user_lock_location_ != NULL ) {
00858 return ( *__kmp_get_user_lock_location_ )( lck );
00859 }
00860 else {
00861 return NULL;
00862 }
00863 }
00864
00865 extern void ( *__kmp_set_user_lock_location_ )( kmp_user_lock_p lck, const ident_t *loc );
00866
00867 inline void
00868 __kmp_set_user_lock_location( kmp_user_lock_p lck, const ident_t *loc )
00869 {
00870 if ( __kmp_set_user_lock_location_ != NULL ) {
00871 ( *__kmp_set_user_lock_location_ )( lck, loc );
00872 }
00873 }
00874
00875 extern kmp_lock_flags_t ( *__kmp_get_user_lock_flags_ )( kmp_user_lock_p lck );
00876
00877 extern void ( *__kmp_set_user_lock_flags_ )( kmp_user_lock_p lck, kmp_lock_flags_t flags );
00878
00879 inline void
00880 __kmp_set_user_lock_flags( kmp_user_lock_p lck, kmp_lock_flags_t flags )
00881 {
00882 if ( __kmp_set_user_lock_flags_ != NULL ) {
00883 ( *__kmp_set_user_lock_flags_ )( lck, flags );
00884 }
00885 }
00886
00887
00888
00889
00890 extern void __kmp_set_user_lock_vptrs( kmp_lock_kind_t user_lock_kind );
00891
00892
00893
00894
00895
00896
00897
00898
00899
00900
00901
00902
00903
00904
00905
00906
00907
00908
00909
00910
00911
00912
00913
00914
00915
00916
00917
00918
00919
00920 struct kmp_lock_table {
00921 kmp_lock_index_t used;
00922 kmp_lock_index_t allocated;
00923 kmp_user_lock_p * table;
00924 };
00925
00926 typedef struct kmp_lock_table kmp_lock_table_t;
00927
00928 extern kmp_lock_table_t __kmp_user_lock_table;
00929 extern kmp_user_lock_p __kmp_lock_pool;
00930
00931 struct kmp_block_of_locks {
00932 struct kmp_block_of_locks * next_block;
00933 void * locks;
00934 };
00935
00936 typedef struct kmp_block_of_locks kmp_block_of_locks_t;
00937
00938 extern kmp_block_of_locks_t *__kmp_lock_blocks;
00939 extern int __kmp_num_locks_in_block;
00940
00941 extern kmp_user_lock_p __kmp_user_lock_allocate( void **user_lock, kmp_int32 gtid, kmp_lock_flags_t flags = 0 );
00942 extern void __kmp_user_lock_free( void **user_lock, kmp_int32 gtid, kmp_user_lock_p lck );
00943 extern kmp_user_lock_p __kmp_lookup_user_lock( void **user_lock, char const *func );
00944 extern void __kmp_cleanup_user_locks();
00945
00946 #define KMP_CHECK_USER_LOCK_INIT() \
00947 { \
00948 if ( ! TCR_4( __kmp_init_user_locks ) ) { \
00949 __kmp_acquire_bootstrap_lock( &__kmp_initz_lock ); \
00950 if ( ! TCR_4( __kmp_init_user_locks ) ) { \
00951 TCW_4( __kmp_init_user_locks, TRUE ); \
00952 } \
00953 __kmp_release_bootstrap_lock( &__kmp_initz_lock ); \
00954 } \
00955 }
00956
00957 #undef KMP_PAD
00958 #undef KMP_GTID_DNE
00959
00960 #ifdef __cplusplus
00961 }
00962 #endif // __cplusplus
00963
00964 #endif
00965