00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047 #include "kmp.h"
00048 #include "kmp_i18n.h"
00049
00050
00051
00052
00053 #define USE_CHECKS_COMMON
00054
00055 #define KMP_INLINE_SUBR 1
00056
00057
00058
00059
00060
00061 void
00062 kmp_threadprivate_insert_private_data( int gtid, void *pc_addr, void *data_addr, size_t pc_size );
00063 struct private_common *
00064 kmp_threadprivate_insert( int gtid, void *pc_addr, void *data_addr, size_t pc_size );
00065
00066 struct shared_table __kmp_threadprivate_d_table;
00067
00068
00069
00070
00071 static
00072 #ifdef KMP_INLINE_SUBR
00073 __forceinline
00074 #endif
00075 struct private_common *
00076 __kmp_threadprivate_find_task_common( struct common_table *tbl, int gtid, void *pc_addr )
00077
00078 {
00079 struct private_common *tn;
00080
00081 #ifdef KMP_TASK_COMMON_DEBUG
00082 KC_TRACE( 10, ( "__kmp_threadprivate_find_task_common: thread#%d, called with address %p\n",
00083 gtid, pc_addr ) );
00084 dump_list();
00085 #endif
00086
00087 for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) {
00088 if (tn->gbl_addr == pc_addr) {
00089 #ifdef KMP_TASK_COMMON_DEBUG
00090 KC_TRACE( 10, ( "__kmp_threadprivate_find_task_common: thread#%d, found node %p on list\n",
00091 gtid, pc_addr ) );
00092 #endif
00093 return tn;
00094 }
00095 }
00096 return 0;
00097 }
00098
00099 static
00100 #ifdef KMP_INLINE_SUBR
00101 __forceinline
00102 #endif
00103 struct shared_common *
00104 __kmp_find_shared_task_common( struct shared_table *tbl, int gtid, void *pc_addr )
00105 {
00106 struct shared_common *tn;
00107
00108 for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) {
00109 if (tn->gbl_addr == pc_addr) {
00110 #ifdef KMP_TASK_COMMON_DEBUG
00111 KC_TRACE( 10, ( "__kmp_find_shared_task_common: thread#%d, found node %p on list\n",
00112 gtid, pc_addr ) );
00113 #endif
00114 return tn;
00115 }
00116 }
00117 return 0;
00118 }
00119
00120
00121
00122
00123
00124
00125
00126
00127 static struct private_data *
00128 __kmp_init_common_data( void *pc_addr, size_t pc_size )
00129 {
00130 struct private_data *d;
00131 size_t i;
00132 char *p;
00133
00134 d = (struct private_data *) __kmp_allocate( sizeof( struct private_data ) );
00135
00136
00137
00138
00139 d->size = pc_size;
00140 d->more = 1;
00141
00142 p = (char*)pc_addr;
00143
00144 for (i = pc_size; i > 0; --i) {
00145 if (*p++ != '\0') {
00146 d->data = __kmp_allocate( pc_size );
00147 memcpy( d->data, pc_addr, pc_size );
00148 break;
00149 }
00150 }
00151
00152 return d;
00153 }
00154
00155
00156
00157
00158
00159 static void
00160 __kmp_copy_common_data( void *pc_addr, struct private_data *d )
00161 {
00162 char *addr = (char *) pc_addr;
00163 int i, offset;
00164
00165 for (offset = 0; d != 0; d = d->next) {
00166 for (i = d->more; i > 0; --i) {
00167 if (d->data == 0)
00168 memset( & addr[ offset ], '\0', d->size );
00169 else
00170 memcpy( & addr[ offset ], d->data, d->size );
00171 offset += d->size;
00172 }
00173 }
00174 }
00175
00176
00177
00178
00179
00180 void
00181 __kmp_common_initialize( void )
00182 {
00183 if( ! TCR_4(__kmp_init_common) ) {
00184 int q;
00185 #ifdef KMP_DEBUG
00186 int gtid;
00187 #endif
00188
00189 __kmp_threadpriv_cache_list = NULL;
00190
00191 #ifdef KMP_DEBUG
00192
00193 for(gtid = 0 ; gtid < __kmp_threads_capacity; gtid++ )
00194 if( __kmp_root[gtid] ) {
00195 KMP_DEBUG_ASSERT( __kmp_root[gtid]->r.r_uber_thread );
00196 for ( q = 0; q< KMP_HASH_TABLE_SIZE; ++q)
00197 KMP_DEBUG_ASSERT( !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q] );
00198
00199 }
00200 #endif
00201
00202 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
00203 __kmp_threadprivate_d_table.data[ q ] = 0;
00204
00205 TCW_4(__kmp_init_common, TRUE);
00206 }
00207 }
00208
00209
00210
00211 void
00212 __kmp_common_destroy( void )
00213 {
00214 if( TCR_4(__kmp_init_common) ) {
00215 int q;
00216
00217 TCW_4(__kmp_init_common, FALSE);
00218
00219 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
00220 int gtid;
00221 struct private_common *tn;
00222 struct shared_common *d_tn;
00223
00224
00225
00226
00227 for (d_tn = __kmp_threadprivate_d_table.data[ q ]; d_tn; d_tn = d_tn->next) {
00228 if (d_tn->is_vec) {
00229 if (d_tn->dt.dtorv != 0) {
00230 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
00231 if( __kmp_threads[gtid] ) {
00232 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
00233 (! KMP_UBER_GTID (gtid)) ) {
00234 tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common,
00235 gtid, d_tn->gbl_addr );
00236 if (tn) {
00237 (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len);
00238 }
00239 }
00240 }
00241 }
00242 if (d_tn->obj_init != 0) {
00243 (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len);
00244 }
00245 }
00246 } else {
00247 if (d_tn->dt.dtor != 0) {
00248 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
00249 if( __kmp_threads[gtid] ) {
00250 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
00251 (! KMP_UBER_GTID (gtid)) ) {
00252 tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common,
00253 gtid, d_tn->gbl_addr );
00254 if (tn) {
00255 (*d_tn->dt.dtor) (tn->par_addr);
00256 }
00257 }
00258 }
00259 }
00260 if (d_tn->obj_init != 0) {
00261 (*d_tn->dt.dtor) (d_tn->obj_init);
00262 }
00263 }
00264 }
00265 }
00266 __kmp_threadprivate_d_table.data[ q ] = 0;
00267 }
00268 }
00269 }
00270
00271
00272 void
00273 __kmp_common_destroy_gtid( int gtid )
00274 {
00275 struct private_common *tn;
00276 struct shared_common *d_tn;
00277
00278 KC_TRACE( 10, ("__kmp_common_destroy_gtid: T#%d called\n", gtid ) );
00279 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
00280 (! KMP_UBER_GTID (gtid)) ) {
00281
00282 if( TCR_4(__kmp_init_common) ) {
00283
00284
00285
00286
00287 for (tn = __kmp_threads[ gtid ]->th.th_pri_head; tn; tn = tn->link) {
00288
00289 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
00290 gtid, tn->gbl_addr );
00291
00292 KMP_DEBUG_ASSERT( d_tn );
00293
00294 if (d_tn->is_vec) {
00295 if (d_tn->dt.dtorv != 0) {
00296 (void) (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len);
00297 }
00298 if (d_tn->obj_init != 0) {
00299 (void) (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len);
00300 }
00301 } else {
00302 if (d_tn->dt.dtor != 0) {
00303 (void) (*d_tn->dt.dtor) (tn->par_addr);
00304 }
00305 if (d_tn->obj_init != 0) {
00306 (void) (*d_tn->dt.dtor) (d_tn->obj_init);
00307 }
00308 }
00309 }
00310 KC_TRACE( 30, ("__kmp_common_destroy_gtid: T#%d threadprivate destructors complete\n",
00311 gtid ) );
00312 }
00313 }
00314 }
00315
00316
00317
00318
00319 #ifdef KMP_TASK_COMMON_DEBUG
00320 static void
00321 dump_list( void )
00322 {
00323 int p, q;
00324
00325 for (p = 0; p < __kmp_all_nth; ++p) {
00326 if( !__kmp_threads[p] ) continue;
00327 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
00328 if (__kmp_threads[ p ]->th.th_pri_common->data[ q ]) {
00329 struct private_common *tn;
00330
00331 KC_TRACE( 10, ( "\tdump_list: gtid:%d addresses\n", p ) );
00332
00333 for (tn = __kmp_threads[ p ]->th.th_pri_common->data[ q ]; tn; tn = tn->next) {
00334 KC_TRACE( 10, ( "\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n",
00335 tn->gbl_addr, tn->par_addr ) );
00336 }
00337 }
00338 }
00339 }
00340 }
00341 #endif
00342
00343
00344
00345
00346
00347
00348 void
00349 kmp_threadprivate_insert_private_data( int gtid, void *pc_addr, void *data_addr, size_t pc_size )
00350 {
00351 struct shared_common **lnk_tn, *d_tn;
00352 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ] &&
00353 __kmp_threads[ gtid ] -> th.th_root -> r.r_active == 0 );
00354
00355 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
00356 gtid, pc_addr );
00357
00358 if (d_tn == 0) {
00359 d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
00360
00361 d_tn->gbl_addr = pc_addr;
00362 d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size );
00363
00364
00365
00366
00367
00368
00369
00370
00371 d_tn->cmn_size = pc_size;
00372
00373 __kmp_acquire_lock( &__kmp_global_lock, gtid );
00374
00375 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]);
00376
00377 d_tn->next = *lnk_tn;
00378 *lnk_tn = d_tn;
00379
00380 __kmp_release_lock( &__kmp_global_lock, gtid );
00381 }
00382 }
00383
00384 struct private_common *
00385 kmp_threadprivate_insert( int gtid, void *pc_addr, void *data_addr, size_t pc_size )
00386 {
00387 struct private_common *tn, **tt;
00388 struct shared_common *d_tn;
00389
00390
00391
00392 __kmp_acquire_lock( & __kmp_global_lock, gtid );
00393
00394 tn = (struct private_common *) __kmp_allocate( sizeof (struct private_common) );
00395
00396 tn->gbl_addr = pc_addr;
00397
00398 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
00399 gtid, pc_addr );
00400
00401 if (d_tn != 0) {
00402
00403
00404 if ( d_tn->pod_init == 0 && d_tn->obj_init == 0 ) {
00405 d_tn->cmn_size = pc_size;
00406
00407 if (d_tn->is_vec) {
00408 if (d_tn->ct.ctorv != 0) {
00409
00410 d_tn->obj_init = 0;
00411 }
00412 else if (d_tn->cct.cctorv != 0) {
00413
00414 d_tn->obj_init = (void *) __kmp_allocate( d_tn->cmn_size );
00415 (void) (*d_tn->cct.cctorv) (d_tn->obj_init, pc_addr, d_tn->vec_len);
00416 }
00417 else {
00418 d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size );
00419 }
00420 } else {
00421 if (d_tn->ct.ctor != 0) {
00422
00423 d_tn->obj_init = 0;
00424 }
00425 else if (d_tn->cct.cctor != 0) {
00426
00427 d_tn->obj_init = (void *) __kmp_allocate( d_tn->cmn_size );
00428 (void) (*d_tn->cct.cctor) (d_tn->obj_init, pc_addr);
00429 }
00430 else {
00431 d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size );
00432 }
00433 }
00434 }
00435 }
00436 else {
00437 struct shared_common **lnk_tn;
00438
00439 d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
00440 d_tn->gbl_addr = pc_addr;
00441 d_tn->cmn_size = pc_size;
00442 d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size );
00443
00444
00445
00446
00447
00448
00449
00450
00451 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]);
00452
00453 d_tn->next = *lnk_tn;
00454 *lnk_tn = d_tn;
00455 }
00456
00457 tn->cmn_size = d_tn->cmn_size;
00458
00459 if ( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) ) {
00460 tn->par_addr = (void *) pc_addr;
00461 }
00462 else {
00463 tn->par_addr = (void *) __kmp_allocate( tn->cmn_size );
00464 }
00465
00466 __kmp_release_lock( & __kmp_global_lock, gtid );
00467
00468
00469
00470 #ifdef USE_CHECKS_COMMON
00471 if (pc_size > d_tn->cmn_size) {
00472 KC_TRACE( 10, ( "__kmp_threadprivate_insert: THREADPRIVATE: %p (%"
00473 KMP_UINTPTR_SPEC " ,%" KMP_UINTPTR_SPEC ")\n",
00474 pc_addr, pc_size, d_tn->cmn_size ) );
00475 KMP_FATAL( TPCommonBlocksInconsist );
00476 }
00477 #endif
00478
00479 tt = &(__kmp_threads[ gtid ]->th.th_pri_common->data[ KMP_HASH(pc_addr) ]);
00480
00481 #ifdef KMP_TASK_COMMON_DEBUG
00482 if (*tt != 0) {
00483 KC_TRACE( 10, ( "__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n",
00484 gtid, pc_addr ) );
00485 }
00486 #endif
00487 tn->next = *tt;
00488 *tt = tn;
00489
00490 #ifdef KMP_TASK_COMMON_DEBUG
00491 KC_TRACE( 10, ( "__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n",
00492 gtid, pc_addr ) );
00493 dump_list( );
00494 #endif
00495
00496
00497
00498 tn->link = __kmp_threads[ gtid ]->th.th_pri_head;
00499 __kmp_threads[ gtid ]->th.th_pri_head = tn;
00500
00501 #ifdef BUILD_TV
00502 __kmp_tv_threadprivate_store( __kmp_threads[ gtid ], tn->gbl_addr, tn->par_addr );
00503 #endif
00504
00505 if( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) )
00506 return tn;
00507
00508
00509
00510
00511
00512
00513
00514
00515
00516
00517
00518
00519
00520
00521 if (d_tn->is_vec) {
00522 if ( d_tn->ct.ctorv != 0) {
00523 (void) (*d_tn->ct.ctorv) (tn->par_addr, d_tn->vec_len);
00524 } else if (d_tn->cct.cctorv != 0) {
00525 (void) (*d_tn->cct.cctorv) (tn->par_addr, d_tn->obj_init, d_tn->vec_len);
00526 } else if (tn->par_addr != tn->gbl_addr) {
00527 __kmp_copy_common_data( tn->par_addr, d_tn->pod_init );
00528 }
00529 } else {
00530 if ( d_tn->ct.ctor != 0 ) {
00531 (void) (*d_tn->ct.ctor) (tn->par_addr);
00532 } else if (d_tn->cct.cctor != 0) {
00533 (void) (*d_tn->cct.cctor) (tn->par_addr, d_tn->obj_init);
00534 } else if (tn->par_addr != tn->gbl_addr) {
00535 __kmp_copy_common_data( tn->par_addr, d_tn->pod_init );
00536 }
00537 }
00538
00539
00540
00541
00542 return tn;
00543 }
00544
00545
00546
00547
00548
00561 void
00562 __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
00563 {
00564 struct shared_common *d_tn, **lnk_tn;
00565
00566 KC_TRACE( 10, ("__kmpc_threadprivate_register: called\n" ) );
00567
00568 #ifdef USE_CHECKS_COMMON
00569
00570 KMP_ASSERT( cctor == 0);
00571 #endif
00572
00573
00574 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, -1, data );
00575
00576 if (d_tn == 0) {
00577 d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
00578 d_tn->gbl_addr = data;
00579
00580 d_tn->ct.ctor = ctor;
00581 d_tn->cct.cctor = cctor;
00582 d_tn->dt.dtor = dtor;
00583
00584
00585
00586
00587
00588
00589 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]);
00590
00591 d_tn->next = *lnk_tn;
00592 *lnk_tn = d_tn;
00593 }
00594 }
00595
00596 void *
00597 __kmpc_threadprivate(ident_t *loc, kmp_int32 global_tid, void *data, size_t size)
00598 {
00599 void *ret;
00600 struct private_common *tn;
00601
00602 KC_TRACE( 10, ("__kmpc_threadprivate: T#%d called\n", global_tid ) );
00603
00604 #ifdef USE_CHECKS_COMMON
00605 if (! __kmp_init_serial)
00606 KMP_FATAL( RTLNotInitialized );
00607 #endif
00608
00609 if ( ! __kmp_threads[global_tid] -> th.th_root -> r.r_active && ! __kmp_foreign_tp ) {
00610
00611
00612
00613 KC_TRACE( 20, ("__kmpc_threadprivate: T#%d inserting private data\n", global_tid ) );
00614 kmp_threadprivate_insert_private_data( global_tid, data, data, size );
00615
00616 ret = data;
00617 }
00618 else {
00619 KC_TRACE( 50, ("__kmpc_threadprivate: T#%d try to find private data at address %p\n",
00620 global_tid, data ) );
00621 tn = __kmp_threadprivate_find_task_common( __kmp_threads[ global_tid ]->th.th_pri_common, global_tid, data );
00622
00623 if ( tn ) {
00624 KC_TRACE( 20, ("__kmpc_threadprivate: T#%d found data\n", global_tid ) );
00625 #ifdef USE_CHECKS_COMMON
00626 if ((size_t) size > tn->cmn_size) {
00627 KC_TRACE( 10, ( "THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC " ,%" KMP_UINTPTR_SPEC ")\n",
00628 data, size, tn->cmn_size ) );
00629 KMP_FATAL( TPCommonBlocksInconsist );
00630 }
00631 #endif
00632 }
00633 else {
00634
00635
00636 KC_TRACE( 20, ("__kmpc_threadprivate: T#%d inserting data\n", global_tid ) );
00637 tn = kmp_threadprivate_insert( global_tid, data, data, size );
00638 }
00639
00640 ret = tn->par_addr;
00641 }
00642 KC_TRACE( 10, ("__kmpc_threadprivate: T#%d exiting; return value = %p\n",
00643 global_tid, ret ) );
00644
00645 return ret;
00646 }
00647
00659 void *
00660 __kmpc_threadprivate_cached(
00661 ident_t * loc,
00662 kmp_int32 global_tid,
00663 void * data,
00664 size_t size,
00665 void *** cache
00666 ) {
00667 void *ret, **my_cache;
00668
00669 KC_TRACE( 10, ("__kmpc_threadprivate_cached: T#%d called with cache: %p, address: %p, size: %"
00670 KMP_SIZE_T_SPEC "\n",
00671 global_tid, *cache, data, size ) );
00672
00673 if ( TCR_PTR(*cache) == 0) {
00674 __kmp_acquire_lock( & __kmp_global_lock, global_tid );
00675
00676 if ( TCR_PTR(*cache) == 0) {
00677 int i;
00678 kmp_cached_addr_t *tp_cache_addr;
00679
00680 __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
00681 if(__kmp_threads_capacity > __kmp_tp_capacity)
00682 __kmp_msg(
00683 kmp_ms_fatal,
00684 KMP_MSG( ManyThreadsForTPDirective ),
00685 KMP_HNT( Set_ALL_THREADPRIVATE, __kmp_threads_capacity),
00686 __kmp_msg_null
00687 );
00688 __kmp_tp_cached = 1;
00689 __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
00690
00691 my_cache = (void**)
00692 __kmp_allocate(
00693 sizeof( void * ) * __kmp_tp_capacity + sizeof ( kmp_cached_addr_t )
00694 );
00695
00696 KC_TRACE( 50, ("__kmpc_threadprivate_cached: T#%d allocated cache at address %p\n",
00697 global_tid, my_cache ) );
00698
00699
00700
00701
00702
00703
00704 tp_cache_addr = (kmp_cached_addr_t *) & my_cache[__kmp_tp_capacity];
00705 tp_cache_addr -> addr = my_cache;
00706 tp_cache_addr -> next = __kmp_threadpriv_cache_list;
00707 __kmp_threadpriv_cache_list = tp_cache_addr;
00708
00709 KMP_MB();
00710
00711 TCW_PTR( *cache, my_cache);
00712
00713 KMP_MB();
00714 }
00715
00716 __kmp_release_lock( & __kmp_global_lock, global_tid );
00717 }
00718
00719
00720 if ((ret = TCR_PTR((*cache)[ global_tid ])) == 0) {
00721 ret = __kmpc_threadprivate( loc, global_tid, data, (size_t) size);
00722
00723 TCW_PTR( (*cache)[ global_tid ], ret);
00724 }
00725 KC_TRACE( 10, ("__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n",
00726 global_tid, ret ) );
00727
00728 return ret;
00729 }
00730
00741 void
00742 __kmpc_threadprivate_register_vec( ident_t *loc, void *data, kmpc_ctor_vec ctor,
00743 kmpc_cctor_vec cctor, kmpc_dtor_vec dtor,
00744 size_t vector_length )
00745 {
00746 struct shared_common *d_tn, **lnk_tn;
00747
00748 KC_TRACE( 10, ("__kmpc_threadprivate_register_vec: called\n" ) );
00749
00750 #ifdef USE_CHECKS_COMMON
00751
00752 KMP_ASSERT( cctor == 0);
00753 #endif
00754
00755 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
00756 -1, data );
00757
00758 if (d_tn == 0) {
00759 d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
00760 d_tn->gbl_addr = data;
00761
00762 d_tn->ct.ctorv = ctor;
00763 d_tn->cct.cctorv = cctor;
00764 d_tn->dt.dtorv = dtor;
00765 d_tn->is_vec = TRUE;
00766 d_tn->vec_len = (size_t) vector_length;
00767
00768
00769
00770
00771 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]);
00772
00773 d_tn->next = *lnk_tn;
00774 *lnk_tn = d_tn;
00775 }
00776 }