kmp_threadprivate.c

Go to the documentation of this file.
00001 /*
00002  * kmp_threadprivate.c -- OpenMP threadprivate support library
00003  * $Revision: 42178 $
00004  * $Date: 2013-03-22 07:07:59 -0500 (Fri, 22 Mar 2013) $
00005  */
00006 
00007 /* <copyright>
00008     Copyright (c) 1997-2013 Intel Corporation.  All Rights Reserved.
00009 
00010     Redistribution and use in source and binary forms, with or without
00011     modification, are permitted provided that the following conditions
00012     are met:
00013 
00014       * Redistributions of source code must retain the above copyright
00015         notice, this list of conditions and the following disclaimer.
00016       * Redistributions in binary form must reproduce the above copyright
00017         notice, this list of conditions and the following disclaimer in the
00018         documentation and/or other materials provided with the distribution.
00019       * Neither the name of Intel Corporation nor the names of its
00020         contributors may be used to endorse or promote products derived
00021         from this software without specific prior written permission.
00022 
00023     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
00024     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
00025     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
00026     A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
00027     HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00028     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
00029     LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
00030     DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
00031     THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
00032     (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
00033     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00034 
00035 
00036 ------------------------------------------------------------------------
00037 
00038     Portions of this software are protected under the following patents:
00039         U.S. Patent 5,812,852
00040         U.S. Patent 6,792,599
00041         U.S. Patent 7,069,556
00042         U.S. Patent 7,328,433
00043         U.S. Patent 7,500,242
00044 
00045 </copyright> */
00046 
00047 #include "kmp.h"
00048 #include "kmp_i18n.h"
00049 
00050 /* ------------------------------------------------------------------------ */
00051 /* ------------------------------------------------------------------------ */
00052 
00053 #define USE_CHECKS_COMMON
00054 
00055 #define KMP_INLINE_SUBR         1
00056 
00057 
00058 /* ------------------------------------------------------------------------ */
00059 /* ------------------------------------------------------------------------ */
00060 
00061 void
00062 kmp_threadprivate_insert_private_data( int gtid, void *pc_addr, void *data_addr, size_t pc_size );
00063 struct private_common *
00064 kmp_threadprivate_insert( int gtid, void *pc_addr, void *data_addr, size_t pc_size );
00065 
00066 struct shared_table     __kmp_threadprivate_d_table;
00067 
00068 /* ------------------------------------------------------------------------ */
00069 /* ------------------------------------------------------------------------ */
00070 
00071 static
00072 #ifdef KMP_INLINE_SUBR
00073 __forceinline
00074 #endif
00075 struct private_common *
00076 __kmp_threadprivate_find_task_common( struct common_table *tbl, int gtid, void *pc_addr )
00077 
00078 {
00079     struct private_common *tn;
00080 
00081 #ifdef KMP_TASK_COMMON_DEBUG
00082     KC_TRACE( 10, ( "__kmp_threadprivate_find_task_common: thread#%d, called with address %p\n",
00083                     gtid, pc_addr ) );
00084     dump_list();
00085 #endif
00086 
00087     for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) {
00088         if (tn->gbl_addr == pc_addr) {
00089 #ifdef KMP_TASK_COMMON_DEBUG
00090             KC_TRACE( 10, ( "__kmp_threadprivate_find_task_common: thread#%d, found node %p on list\n",
00091                             gtid, pc_addr ) );
00092 #endif
00093             return tn;
00094         }
00095     }
00096     return 0;
00097 }
00098 
00099 static
00100 #ifdef KMP_INLINE_SUBR
00101 __forceinline
00102 #endif
00103 struct shared_common *
00104 __kmp_find_shared_task_common( struct shared_table *tbl, int gtid, void *pc_addr )
00105 {
00106     struct shared_common *tn;
00107 
00108     for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) {
00109         if (tn->gbl_addr == pc_addr) {
00110 #ifdef KMP_TASK_COMMON_DEBUG
00111             KC_TRACE( 10, ( "__kmp_find_shared_task_common: thread#%d, found node %p on list\n",
00112                             gtid, pc_addr ) );
00113 #endif
00114             return tn;
00115         }
00116     }
00117     return 0;
00118 }
00119 
00120 
00121 /*
00122  *      Create a template for the data initialized storage.
00123  *      Either the template is NULL indicating zero fill,
00124  *      or the template is a copy of the original data.
00125  */
00126 
00127 static struct private_data *
00128 __kmp_init_common_data( void *pc_addr, size_t pc_size )
00129 {
00130     struct private_data *d;
00131     size_t       i;
00132     char        *p;
00133 
00134     d = (struct private_data *) __kmp_allocate( sizeof( struct private_data ) );
00135 /*
00136     d->data = 0;  // AC: commented out because __kmp_allocate zeroes the memory
00137     d->next = 0;
00138 */
00139     d->size = pc_size;
00140     d->more = 1;
00141 
00142     p = (char*)pc_addr;
00143 
00144     for (i = pc_size;  i > 0; --i) {
00145         if (*p++ != '\0') {
00146             d->data = __kmp_allocate( pc_size );
00147             memcpy( d->data, pc_addr, pc_size );
00148             break;
00149         }
00150     }
00151 
00152     return d;
00153 }
00154 
00155 /*
00156  *      Initialize the data area from the template.
00157  */
00158 
00159 static void
00160 __kmp_copy_common_data( void *pc_addr, struct private_data *d )
00161 {
00162     char *addr = (char *) pc_addr;
00163     int   i, offset;
00164 
00165     for (offset = 0; d != 0; d = d->next) {
00166         for (i = d->more; i > 0; --i) {
00167             if (d->data == 0)
00168                 memset( & addr[ offset ], '\0', d->size );
00169             else
00170                 memcpy( & addr[ offset ], d->data, d->size );
00171             offset += d->size;
00172         }
00173     }
00174 }
00175 
00176 /* ------------------------------------------------------------------------ */
00177 /* ------------------------------------------------------------------------ */
00178 
00179 /* we are called from __kmp_serial_initialize() with __kmp_initz_lock held. */
00180 void
00181 __kmp_common_initialize( void )
00182 {
00183     if( ! TCR_4(__kmp_init_common) ) {
00184         int q;
00185 #ifdef KMP_DEBUG
00186         int gtid;
00187 #endif
00188 
00189         __kmp_threadpriv_cache_list = NULL;
00190 
00191 #ifdef KMP_DEBUG
00192         /* verify the uber masters were initialized */
00193         for(gtid = 0 ; gtid < __kmp_threads_capacity; gtid++ )
00194             if( __kmp_root[gtid] ) {
00195                 KMP_DEBUG_ASSERT( __kmp_root[gtid]->r.r_uber_thread );
00196                 for ( q = 0; q< KMP_HASH_TABLE_SIZE; ++q)
00197                     KMP_DEBUG_ASSERT( !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q] );
00198 /*                    __kmp_root[ gitd ]-> r.r_uber_thread -> th.th_pri_common -> data[ q ] = 0;*/
00199             }
00200 #endif /* KMP_DEBUG */
00201 
00202         for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
00203             __kmp_threadprivate_d_table.data[ q ] = 0;
00204 
00205         TCW_4(__kmp_init_common, TRUE);
00206     }
00207 }
00208 
00209 /* Call all destructors for threadprivate data belonging to all threads.
00210    Currently unused! */
00211 void
00212 __kmp_common_destroy( void )
00213 {
00214     if( TCR_4(__kmp_init_common) ) {
00215         int q;
00216 
00217         TCW_4(__kmp_init_common, FALSE);
00218 
00219         for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
00220             int gtid;
00221             struct private_common *tn;
00222             struct shared_common  *d_tn;
00223 
00224             /*  C++ destructors need to be called once per thread before exiting  */
00225             /*  don't call destructors for master thread though unless we used copy constructor */
00226 
00227             for (d_tn = __kmp_threadprivate_d_table.data[ q ]; d_tn; d_tn = d_tn->next) {
00228                 if (d_tn->is_vec) {
00229                     if (d_tn->dt.dtorv != 0) {
00230                         for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
00231                             if( __kmp_threads[gtid] ) {
00232                                 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
00233                                                          (! KMP_UBER_GTID (gtid)) ) {
00234                                     tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common,
00235                                                                                gtid, d_tn->gbl_addr );
00236                                     if (tn) {
00237                                         (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len);
00238                                     }
00239                                 }
00240                             }
00241                         }
00242                         if (d_tn->obj_init != 0) {
00243                             (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len);
00244                         }
00245                     }
00246                 } else {
00247                     if (d_tn->dt.dtor != 0) {
00248                         for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
00249                             if( __kmp_threads[gtid] ) {
00250                                 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
00251                                                          (! KMP_UBER_GTID (gtid)) ) {
00252                                     tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common,
00253                                                                                gtid, d_tn->gbl_addr );
00254                                     if (tn) {
00255                                         (*d_tn->dt.dtor) (tn->par_addr);
00256                                     }
00257                                 }
00258                             }
00259                         }
00260                         if (d_tn->obj_init != 0) {
00261                             (*d_tn->dt.dtor) (d_tn->obj_init);
00262                         }
00263                     }
00264                 }
00265             }
00266             __kmp_threadprivate_d_table.data[ q ] = 0;
00267         }
00268     }
00269 }
00270 
00271 /* Call all destructors for threadprivate data belonging to this thread */
00272 void
00273 __kmp_common_destroy_gtid( int gtid )
00274 {
00275     struct private_common *tn;
00276     struct shared_common *d_tn;
00277 
00278     KC_TRACE( 10, ("__kmp_common_destroy_gtid: T#%d called\n", gtid ) );
00279     if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
00280                              (! KMP_UBER_GTID (gtid)) ) {
00281 
00282         if( TCR_4(__kmp_init_common) ) {
00283 
00284             /* Cannot do this here since not all threads have destroyed their data */
00285             /* TCW_4(__kmp_init_common, FALSE); */
00286 
00287             for (tn = __kmp_threads[ gtid ]->th.th_pri_head; tn; tn = tn->link) {
00288 
00289                 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
00290                                                       gtid, tn->gbl_addr );
00291 
00292                 KMP_DEBUG_ASSERT( d_tn );
00293 
00294                 if (d_tn->is_vec) {
00295                     if (d_tn->dt.dtorv != 0) {
00296                         (void) (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len);
00297                     }
00298                     if (d_tn->obj_init != 0) {
00299                         (void) (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len);
00300                     }
00301                 } else {
00302                     if (d_tn->dt.dtor != 0) {
00303                         (void) (*d_tn->dt.dtor) (tn->par_addr);
00304                     }
00305                     if (d_tn->obj_init != 0) {
00306                         (void) (*d_tn->dt.dtor) (d_tn->obj_init);
00307                     }
00308                 }
00309             }
00310             KC_TRACE( 30, ("__kmp_common_destroy_gtid: T#%d threadprivate destructors complete\n",
00311                            gtid ) );
00312         }
00313     }
00314 }
00315 
00316 /* ------------------------------------------------------------------------ */
00317 /* ------------------------------------------------------------------------ */
00318 
00319 #ifdef KMP_TASK_COMMON_DEBUG
00320 static void
00321 dump_list( void )
00322 {
00323     int p, q;
00324 
00325     for (p = 0; p < __kmp_all_nth; ++p) {
00326         if( !__kmp_threads[p] ) continue;
00327         for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
00328             if (__kmp_threads[ p ]->th.th_pri_common->data[ q ]) {
00329                 struct private_common *tn;
00330 
00331                 KC_TRACE( 10, ( "\tdump_list: gtid:%d addresses\n", p ) );
00332 
00333                 for (tn = __kmp_threads[ p ]->th.th_pri_common->data[ q ]; tn; tn = tn->next)                 {
00334                     KC_TRACE( 10, ( "\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n",
00335                                     tn->gbl_addr, tn->par_addr ) );
00336                 }
00337             }
00338         }
00339     }
00340 }
00341 #endif /* KMP_TASK_COMMON_DEBUG */
00342 
00343 
00344 /*
00345  * NOTE: this routine is to be called only from the serial part of the program.
00346  */
00347 
00348 void
00349 kmp_threadprivate_insert_private_data( int gtid, void *pc_addr, void *data_addr, size_t pc_size )
00350 {
00351     struct shared_common **lnk_tn, *d_tn;
00352     KMP_DEBUG_ASSERT( __kmp_threads[ gtid ] &&
00353             __kmp_threads[ gtid ] -> th.th_root -> r.r_active == 0 );
00354 
00355     d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
00356                                           gtid, pc_addr );
00357 
00358     if (d_tn == 0) {
00359         d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
00360 
00361         d_tn->gbl_addr = pc_addr;
00362         d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size );
00363 /*
00364         d_tn->obj_init = 0;  // AC: commented out because __kmp_allocate zeroes the memory
00365         d_tn->ct.ctor = 0;
00366         d_tn->cct.cctor = 0;;
00367         d_tn->dt.dtor = 0;
00368         d_tn->is_vec = FALSE;
00369         d_tn->vec_len = 0L;
00370 */
00371         d_tn->cmn_size = pc_size;
00372 
00373         __kmp_acquire_lock( &__kmp_global_lock, gtid );
00374 
00375         lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]);
00376 
00377         d_tn->next = *lnk_tn;
00378         *lnk_tn = d_tn;
00379 
00380         __kmp_release_lock( &__kmp_global_lock, gtid );
00381     }
00382 }
00383 
00384 struct private_common *
00385 kmp_threadprivate_insert( int gtid, void *pc_addr, void *data_addr, size_t pc_size )
00386 {
00387     struct private_common *tn, **tt;
00388     struct shared_common  *d_tn;
00389 
00390     /* +++++++++ START OF CRITICAL SECTION +++++++++ */
00391 
00392     __kmp_acquire_lock( & __kmp_global_lock, gtid );
00393 
00394     tn = (struct private_common *) __kmp_allocate( sizeof (struct private_common) );
00395 
00396     tn->gbl_addr = pc_addr;
00397 
00398     d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
00399                                           gtid, pc_addr );     /* Only the MASTER data table exists. */
00400 
00401     if (d_tn != 0) {
00402         /* This threadprivate variable has already been seen. */
00403 
00404         if ( d_tn->pod_init == 0 && d_tn->obj_init == 0 ) {
00405             d_tn->cmn_size = pc_size;
00406 
00407             if (d_tn->is_vec) {
00408                 if (d_tn->ct.ctorv != 0) {
00409                     /* Construct from scratch so no prototype exists */
00410                     d_tn->obj_init = 0;
00411                 }
00412                 else if (d_tn->cct.cctorv != 0) {
00413                     /* Now data initialize the prototype since it was previously registered */
00414                     d_tn->obj_init = (void *) __kmp_allocate( d_tn->cmn_size );
00415                     (void) (*d_tn->cct.cctorv) (d_tn->obj_init, pc_addr, d_tn->vec_len);
00416                 }
00417                 else {
00418                     d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size );
00419                 }
00420             } else {
00421                 if (d_tn->ct.ctor != 0) {
00422                     /* Construct from scratch so no prototype exists */
00423                     d_tn->obj_init = 0;
00424                 }
00425                 else if (d_tn->cct.cctor != 0) {
00426                     /* Now data initialize the prototype since it was previously registered */
00427                     d_tn->obj_init = (void *) __kmp_allocate( d_tn->cmn_size );
00428                     (void) (*d_tn->cct.cctor) (d_tn->obj_init, pc_addr);
00429                 }
00430                 else {
00431                     d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size );
00432                 }
00433             }
00434         }
00435     }
00436     else {
00437         struct shared_common **lnk_tn;
00438 
00439         d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
00440         d_tn->gbl_addr = pc_addr;
00441         d_tn->cmn_size = pc_size;
00442         d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size );
00443 /*
00444         d_tn->obj_init = 0;  // AC: commented out because __kmp_allocate zeroes the memory
00445         d_tn->ct.ctor = 0;
00446         d_tn->cct.cctor = 0;
00447         d_tn->dt.dtor = 0;
00448         d_tn->is_vec = FALSE;
00449         d_tn->vec_len = 0L;
00450 */
00451         lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]);
00452 
00453         d_tn->next = *lnk_tn;
00454         *lnk_tn = d_tn;
00455     }
00456 
00457     tn->cmn_size = d_tn->cmn_size;
00458 
00459     if ( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) ) {
00460         tn->par_addr = (void *) pc_addr;
00461     }
00462     else {
00463         tn->par_addr = (void *) __kmp_allocate( tn->cmn_size );
00464     }
00465 
00466     __kmp_release_lock( & __kmp_global_lock, gtid );
00467 
00468     /* +++++++++ END OF CRITICAL SECTION +++++++++ */
00469 
00470 #ifdef USE_CHECKS_COMMON
00471         if (pc_size > d_tn->cmn_size) {
00472             KC_TRACE( 10, ( "__kmp_threadprivate_insert: THREADPRIVATE: %p (%"
00473                             KMP_UINTPTR_SPEC " ,%" KMP_UINTPTR_SPEC ")\n",
00474                             pc_addr, pc_size, d_tn->cmn_size ) );
00475             KMP_FATAL( TPCommonBlocksInconsist );
00476         }
00477 #endif /* USE_CHECKS_COMMON */
00478 
00479     tt = &(__kmp_threads[ gtid ]->th.th_pri_common->data[ KMP_HASH(pc_addr) ]);
00480 
00481 #ifdef KMP_TASK_COMMON_DEBUG
00482     if (*tt != 0) {
00483         KC_TRACE( 10, ( "__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n",
00484                         gtid, pc_addr ) );
00485     }
00486 #endif
00487     tn->next = *tt;
00488     *tt = tn;
00489 
00490 #ifdef KMP_TASK_COMMON_DEBUG
00491     KC_TRACE( 10, ( "__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n",
00492                     gtid, pc_addr ) );
00493     dump_list( );
00494 #endif
00495 
00496     /* Link the node into a simple list */
00497 
00498     tn->link = __kmp_threads[ gtid ]->th.th_pri_head;
00499     __kmp_threads[ gtid ]->th.th_pri_head = tn;
00500 
00501 #ifdef BUILD_TV
00502     __kmp_tv_threadprivate_store( __kmp_threads[ gtid ], tn->gbl_addr, tn->par_addr );
00503 #endif
00504 
00505     if( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) )
00506         return tn;
00507 
00508     /*
00509      * if C++ object with copy constructor, use it;
00510      * else if C++ object with constructor, use it for the non-master copies only;
00511      * else use pod_init and memcpy
00512      *
00513      * C++ constructors need to be called once for each non-master thread on allocate
00514      * C++ copy constructors need to be called once for each thread on allocate
00515      */
00516 
00517     /*
00518      * C++ object with constructors/destructors;
00519      * don't call constructors for master thread though
00520      */
00521     if (d_tn->is_vec) {
00522         if ( d_tn->ct.ctorv != 0) {
00523             (void) (*d_tn->ct.ctorv) (tn->par_addr, d_tn->vec_len);
00524         } else if (d_tn->cct.cctorv != 0) {
00525             (void) (*d_tn->cct.cctorv) (tn->par_addr, d_tn->obj_init, d_tn->vec_len);
00526         } else if (tn->par_addr != tn->gbl_addr) {
00527             __kmp_copy_common_data( tn->par_addr, d_tn->pod_init );
00528         }
00529     } else {
00530         if ( d_tn->ct.ctor != 0 ) {
00531             (void) (*d_tn->ct.ctor) (tn->par_addr);
00532         } else if (d_tn->cct.cctor != 0) {
00533             (void) (*d_tn->cct.cctor) (tn->par_addr, d_tn->obj_init);
00534         } else if (tn->par_addr != tn->gbl_addr) {
00535             __kmp_copy_common_data( tn->par_addr, d_tn->pod_init );
00536         }
00537     }
00538 /* !BUILD_OPENMP_C
00539     if (tn->par_addr != tn->gbl_addr)
00540         __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); */
00541 
00542     return tn;
00543 }
00544 
00545 /* ------------------------------------------------------------------------ */
00546 /* We are currently parallel, and we know the thread id.                    */
00547 /* ------------------------------------------------------------------------ */
00548 
00561 void
00562 __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
00563 {
00564     struct shared_common *d_tn, **lnk_tn;
00565 
00566     KC_TRACE( 10, ("__kmpc_threadprivate_register: called\n" ) );
00567 
00568 #ifdef USE_CHECKS_COMMON
00569     /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
00570     KMP_ASSERT( cctor == 0);
00571 #endif /* USE_CHECKS_COMMON */
00572 
00573     /* Only the global data table exists. */
00574     d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, -1, data );
00575 
00576     if (d_tn == 0) {
00577         d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
00578         d_tn->gbl_addr = data;
00579 
00580         d_tn->ct.ctor = ctor;
00581         d_tn->cct.cctor = cctor;
00582         d_tn->dt.dtor = dtor;
00583 /*
00584         d_tn->is_vec = FALSE;  // AC: commented out because __kmp_allocate zeroes the memory
00585         d_tn->vec_len = 0L;
00586         d_tn->obj_init = 0;
00587         d_tn->pod_init = 0;
00588 */
00589         lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]);
00590 
00591         d_tn->next = *lnk_tn;
00592         *lnk_tn = d_tn;
00593     }
00594 }
00595 
00596 void *
00597 __kmpc_threadprivate(ident_t *loc, kmp_int32 global_tid, void *data, size_t size)
00598 {
00599     void *ret;
00600     struct private_common *tn;
00601 
00602     KC_TRACE( 10, ("__kmpc_threadprivate: T#%d called\n", global_tid ) );
00603 
00604 #ifdef USE_CHECKS_COMMON
00605     if (! __kmp_init_serial)
00606         KMP_FATAL( RTLNotInitialized );
00607 #endif /* USE_CHECKS_COMMON */
00608 
00609     if ( ! __kmp_threads[global_tid] -> th.th_root -> r.r_active && ! __kmp_foreign_tp ) {
00610         /* The parallel address will NEVER overlap with the data_address */
00611         /* dkp: 3rd arg to kmp_threadprivate_insert_private_data() is the data_address; use data_address = data */
00612 
00613         KC_TRACE( 20, ("__kmpc_threadprivate: T#%d inserting private data\n", global_tid ) );
00614         kmp_threadprivate_insert_private_data( global_tid, data, data, size );
00615 
00616         ret = data;
00617     }
00618     else {
00619         KC_TRACE( 50, ("__kmpc_threadprivate: T#%d try to find private data at address %p\n",
00620                        global_tid, data ) );
00621         tn = __kmp_threadprivate_find_task_common( __kmp_threads[ global_tid ]->th.th_pri_common, global_tid, data );
00622 
00623         if ( tn ) {
00624             KC_TRACE( 20, ("__kmpc_threadprivate: T#%d found data\n", global_tid ) );
00625 #ifdef USE_CHECKS_COMMON
00626             if ((size_t) size > tn->cmn_size) {
00627                 KC_TRACE( 10, ( "THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC " ,%" KMP_UINTPTR_SPEC ")\n",
00628                                 data, size, tn->cmn_size ) );
00629                 KMP_FATAL( TPCommonBlocksInconsist );
00630             }
00631 #endif /* USE_CHECKS_COMMON */
00632         }
00633         else {
00634             /* The parallel address will NEVER overlap with the data_address */
00635             /* dkp: 3rd arg to kmp_threadprivate_insert() is the data_address; use data_address = data */
00636             KC_TRACE( 20, ("__kmpc_threadprivate: T#%d inserting data\n", global_tid ) );
00637             tn = kmp_threadprivate_insert( global_tid, data, data, size );
00638         }
00639 
00640         ret = tn->par_addr;
00641     }
00642     KC_TRACE( 10, ("__kmpc_threadprivate: T#%d exiting; return value = %p\n",
00643                    global_tid, ret ) );
00644 
00645     return ret;
00646 }
00647 
00659 void *
00660 __kmpc_threadprivate_cached(
00661     ident_t *  loc,
00662     kmp_int32  global_tid,   // gtid.
00663     void *     data,         // Pointer to original global variable.
00664     size_t     size,         // Size of original global variable.
00665     void ***   cache
00666 ) {
00667     void *ret, **my_cache;
00668 
00669     KC_TRACE( 10, ("__kmpc_threadprivate_cached: T#%d called with cache: %p, address: %p, size: %"
00670                    KMP_SIZE_T_SPEC "\n",
00671                    global_tid, *cache, data, size ) );
00672 
00673     if ( TCR_PTR(*cache) == 0) {
00674         __kmp_acquire_lock( & __kmp_global_lock, global_tid );
00675 
00676         if ( TCR_PTR(*cache) == 0) {
00677             int i;
00678             kmp_cached_addr_t *tp_cache_addr;
00679 
00680             __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
00681             if(__kmp_threads_capacity > __kmp_tp_capacity)
00682                 __kmp_msg(
00683                     kmp_ms_fatal,
00684                     KMP_MSG( ManyThreadsForTPDirective ),
00685                     KMP_HNT( Set_ALL_THREADPRIVATE, __kmp_threads_capacity),
00686                     __kmp_msg_null
00687                  );
00688             __kmp_tp_cached = 1;
00689             __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
00690             /* TODO: free all this memory in __kmp_common_destroy using __kmp_threadpriv_cache_list */
00691                 my_cache = (void**)
00692                     __kmp_allocate(
00693                         sizeof( void * ) * __kmp_tp_capacity + sizeof ( kmp_cached_addr_t )
00694                     );
00695 
00696             KC_TRACE( 50, ("__kmpc_threadprivate_cached: T#%d allocated cache at address %p\n",
00697                            global_tid, my_cache ) );
00698 /*
00699             // AC: commented out because __kmp_allocate zeroes the memory
00700             for (i = 0; i < __kmp_tp_capacity; ++i)
00701                 TCW_PTR(my_cache[i], 0);
00702 */
00703             /* add address of mycache for cleanup later to linked list */
00704             tp_cache_addr = (kmp_cached_addr_t *) & my_cache[__kmp_tp_capacity];
00705             tp_cache_addr -> addr = my_cache;
00706             tp_cache_addr -> next = __kmp_threadpriv_cache_list;
00707             __kmp_threadpriv_cache_list = tp_cache_addr;
00708 
00709             KMP_MB();
00710 
00711             TCW_PTR( *cache, my_cache);
00712 
00713             KMP_MB();
00714         }
00715 
00716         __kmp_release_lock( & __kmp_global_lock, global_tid );
00717     }
00718 
00719 
00720     if ((ret = TCR_PTR((*cache)[ global_tid ])) == 0) {
00721         ret = __kmpc_threadprivate( loc, global_tid, data, (size_t) size);
00722 
00723         TCW_PTR( (*cache)[ global_tid ], ret);
00724     }
00725     KC_TRACE( 10, ("__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n",
00726                    global_tid, ret ) );
00727 
00728     return ret;
00729 }
00730 
00741 void
00742 __kmpc_threadprivate_register_vec( ident_t *loc, void *data, kmpc_ctor_vec ctor,
00743                                    kmpc_cctor_vec cctor, kmpc_dtor_vec dtor,
00744                                    size_t vector_length )
00745 {
00746     struct shared_common *d_tn, **lnk_tn;
00747 
00748     KC_TRACE( 10, ("__kmpc_threadprivate_register_vec: called\n" ) );
00749 
00750 #ifdef USE_CHECKS_COMMON
00751     /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
00752     KMP_ASSERT( cctor == 0);
00753 #endif /* USE_CHECKS_COMMON */
00754 
00755     d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
00756                                           -1, data );        /* Only the global data table exists. */
00757 
00758     if (d_tn == 0) {
00759         d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
00760         d_tn->gbl_addr = data;
00761 
00762         d_tn->ct.ctorv = ctor;
00763         d_tn->cct.cctorv = cctor;
00764         d_tn->dt.dtorv = dtor;
00765         d_tn->is_vec = TRUE;
00766         d_tn->vec_len = (size_t) vector_length;
00767 /*
00768         d_tn->obj_init = 0;  // AC: commented out because __kmp_allocate zeroes the memory
00769         d_tn->pod_init = 0;
00770 */
00771         lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]);
00772 
00773         d_tn->next = *lnk_tn;
00774         *lnk_tn = d_tn;
00775     }
00776 }

Generated on 25 Aug 2013 for libomp_oss by  doxygen 1.6.1