00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047 #if defined(__x86_64)
00048 # define KMP_I8
00049 #endif
00050 #include "kmp.h"
00051 #include "kmp_atomic.h"
00052 #include "ompt-specific.h"
00053
00054 #ifdef __cplusplus
00055 extern "C" {
00056 #endif // __cplusplus
00057
00058 #define MKLOC(loc,routine) \
00059 static ident_t (loc) = {0, KMP_IDENT_KMPC, 0, 0, ";unknown;unknown;0;0;;" };
00060
00061
00062 void
00063 GOMP_barrier(void)
00064 {
00065 int gtid = __kmp_entry_gtid();
00066 MKLOC(loc, "GOMP_barrier");
00067 KA_TRACE(20, ("GOMP_barrier: T#%d\n", gtid));
00068 __kmpc_barrier(&loc, gtid);
00069 }
00070
00071
00072
00073
00074
00075
00076
00077
00078
00079
00080
00081
00082
00083
00084
00085
00086
00087 extern kmp_critical_name *__kmp_unnamed_critical_addr;
00088
00089
00090 void
00091 GOMP_critical_start(void)
00092 {
00093 int gtid = __kmp_entry_gtid();
00094 MKLOC(loc, "GOMP_critical_start");
00095 KA_TRACE(20, ("GOMP_critical_start: T#%d\n", gtid));
00096 __kmpc_critical(&loc, gtid, __kmp_unnamed_critical_addr);
00097 }
00098
00099
00100 void
00101 GOMP_critical_end(void)
00102 {
00103 int gtid = __kmp_get_gtid();
00104 MKLOC(loc, "GOMP_critical_end");
00105 KA_TRACE(20, ("GOMP_critical_end: T#%d\n", gtid));
00106 __kmpc_end_critical(&loc, gtid, __kmp_unnamed_critical_addr);
00107 }
00108
00109
00110 void
00111 GOMP_critical_name_start(void **pptr)
00112 {
00113 int gtid = __kmp_entry_gtid();
00114 MKLOC(loc, "GOMP_critical_name_start");
00115 KA_TRACE(20, ("GOMP_critical_name_start: T#%d\n", gtid));
00116 __kmpc_critical(&loc, gtid, (kmp_critical_name *)pptr);
00117 }
00118
00119
00120 void
00121 GOMP_critical_name_end(void **pptr)
00122 {
00123 int gtid = __kmp_get_gtid();
00124 MKLOC(loc, "GOMP_critical_name_end");
00125 KA_TRACE(20, ("GOMP_critical_name_end: T#%d\n", gtid));
00126 __kmpc_end_critical(&loc, gtid, (kmp_critical_name *)pptr);
00127 }
00128
00129
00130
00131
00132
00133
00134
00135
00136 void
00137 GOMP_atomic_start(void)
00138 {
00139 int gtid = __kmp_entry_gtid();
00140 KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
00141 __ompt_thread_assign_wait_id(0);
00142 __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);
00143 }
00144
00145
00146 void
00147 GOMP_atomic_end(void)
00148 {
00149 int gtid = __kmp_get_gtid();
00150 KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
00151 __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);
00152 }
00153
00154
00155 int
00156 GOMP_single_start(void)
00157 {
00158 int gtid = __kmp_entry_gtid();
00159 MKLOC(loc, "GOMP_single_start");
00160 KA_TRACE(20, ("GOMP_single_start: T#%d\n", gtid));
00161
00162 if (! TCR_4(__kmp_init_parallel))
00163 __kmp_parallel_initialize();
00164
00165
00166
00167
00168
00169
00170 return __kmp_enter_single(gtid, &loc, FALSE);
00171 }
00172
00173
00174 void *
00175 GOMP_single_copy_start(void)
00176 {
00177 void *retval;
00178 int gtid = __kmp_entry_gtid();
00179 MKLOC(loc, "GOMP_single_copy_start");
00180 KA_TRACE(20, ("GOMP_single_copy_start: T#%d\n", gtid));
00181
00182 if (! TCR_4(__kmp_init_parallel))
00183 __kmp_parallel_initialize();
00184
00185
00186
00187
00188
00189
00190 if (__kmp_enter_single(gtid, &loc, FALSE))
00191 return NULL;
00192
00193
00194
00195
00196
00197 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
00198
00199
00200
00201
00202
00203 retval = __kmp_team_from_gtid(gtid)->t.t_copypriv_data;
00204 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
00205 return retval;
00206 }
00207
00208
00209 void
00210 GOMP_single_copy_end(void *data)
00211 {
00212 int gtid = __kmp_get_gtid();
00213 MKLOC(loc, "GOMP_single_copy_end");
00214 KA_TRACE(20, ("GOMP_single_copy_end: T#%d\n", gtid));
00215
00216
00217
00218
00219
00220
00221
00222
00223 __kmp_team_from_gtid(gtid)->t.t_copypriv_data = data;
00224 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
00225 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
00226 }
00227
00228
00229 void
00230 GOMP_ordered_start(void)
00231 {
00232 int gtid = __kmp_entry_gtid();
00233 MKLOC(loc, "GOMP_ordered_start");
00234 KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
00235 __kmpc_ordered(&loc, gtid);
00236 }
00237
00238
00239 void
00240 GOMP_ordered_end(void)
00241 {
00242 int gtid = __kmp_get_gtid();
00243 MKLOC(loc, "GOMP_ordered_end");
00244 KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
00245 __kmpc_end_ordered(&loc, gtid);
00246 }
00247
00248
00249
00250
00251
00252
00253
00254
00255
00256
00257 #if KMP_ARCH_X86
00258 # define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_4
00259 # define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_4
00260 # define KMP_DISPATCH_NEXT __kmpc_dispatch_next_4
00261 #else
00262 # define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_8
00263 # define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_8
00264 # define KMP_DISPATCH_NEXT __kmpc_dispatch_next_8
00265 #endif
00266
00267 # define KMP_DISPATCH_INIT_ULL __kmp_aux_dispatch_init_8u
00268 # define KMP_DISPATCH_FINI_CHUNK_ULL __kmp_aux_dispatch_fini_chunk_8u
00269 # define KMP_DISPATCH_NEXT_ULL __kmpc_dispatch_next_8u
00270
00271
00272
00273
00274
00275
00276
00277 #ifdef KMP_DEBUG
00278 static
00279 #endif
00280 void
00281 __kmp_GOMP_microtask_wrapper(int *gtid, int *npr, void (*task)(void *),
00282 void *data)
00283 {
00284 task(data);
00285 }
00286
00287
00288 #ifdef KMP_DEBUG
00289 static
00290 #endif
00291 void
00292 __kmp_GOMP_parallel_microtask_wrapper(int *gtid, int *npr,
00293 void (*task)(void *), void *data, unsigned num_threads, ident_t *loc,
00294 enum sched_type schedule, long start, long end, long incr, long chunk_size)
00295 {
00296
00297
00298
00299 KMP_DISPATCH_INIT(loc, *gtid, schedule, start, end, incr, chunk_size,
00300 schedule != kmp_sch_static);
00301
00302
00303
00304
00305 task(data);
00306 }
00307
00308
00309 #ifdef KMP_DEBUG
00310 static
00311 #endif
00312 void
00313 __kmp_GOMP_fork_call(ident_t *loc, int gtid, microtask_t wrapper, int argc,...)
00314 {
00315 int rc;
00316
00317 va_list ap;
00318 va_start(ap, argc);
00319
00320 rc = __kmp_fork_call(loc, gtid, FALSE, argc, wrapper, __kmp_invoke_task_func,
00321 #if KMP_ARCH_X86_64 && KMP_OS_LINUX
00322 &ap
00323 #else
00324 ap
00325 #endif
00326 );
00327
00328 va_end(ap);
00329
00330 if (rc) {
00331 kmp_info_t *thr = __kmp_threads[gtid];
00332 __kmp_run_before_invoked_task(gtid, __kmp_tid_from_gtid(gtid), thr,
00333 thr->th.th_team);
00334 }
00335 }
00336
00337
00338 void
00339 GOMP_parallel_start(void (*task)(void *), void *data, unsigned num_threads)
00340 {
00341 int gtid = __kmp_entry_gtid();
00342 MKLOC(loc, "GOMP_parallel_start");
00343 KA_TRACE(20, ("GOMP_parallel_start: T#%d\n", gtid));
00344
00345 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
00346 if (num_threads != 0) {
00347 __kmp_push_num_threads(&loc, gtid, num_threads);
00348 }
00349 __kmp_GOMP_fork_call(&loc, gtid,
00350 (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, data);
00351 }
00352 else {
00353 __kmpc_serialized_parallel(&loc, gtid);
00354 }
00355 }
00356
00357
00358 void
00359 GOMP_parallel_end(void)
00360 {
00361 int gtid = __kmp_get_gtid();
00362 MKLOC(loc, "GOMP_parallel_end");
00363 KA_TRACE(20, ("GOMP_parallel_end: T#%d\n", gtid));
00364
00365 if (! __kmp_threads[gtid]->th.th_team->t.t_serialized) {
00366 kmp_info_t *thr = __kmp_threads[gtid];
00367 __kmp_run_after_invoked_task(gtid, __kmp_tid_from_gtid(gtid), thr,
00368 thr->th.th_team);
00369 __kmp_join_call(&loc, gtid);
00370 }
00371 else {
00372 __kmpc_end_serialized_parallel(&loc, gtid);
00373 }
00374 }
00375
00376
00377
00378
00379
00380
00381
00382
00383
00384
00385
00386
00387
00388
00389
00390
00391
00392
00393
00394
00395
00396
00397
00398
00399
00400
00401
00402
00403
00404
00405
00406 #define LOOP_START(func,schedule) \
00407 int func (long lb, long ub, long str, long chunk_sz, long *p_lb, \
00408 long *p_ub) \
00409 { \
00410 int status; \
00411 long stride; \
00412 int gtid = __kmp_entry_gtid(); \
00413 MKLOC(loc, #func); \
00414 KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
00415 gtid, lb, ub, str, chunk_sz )); \
00416 \
00417 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
00418 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
00419 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
00420 (schedule) != kmp_sch_static); \
00421 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
00422 (kmp_int *)p_ub, (kmp_int *)&stride); \
00423 if (status) { \
00424 KMP_DEBUG_ASSERT(stride == str); \
00425 *p_ub += (str > 0) ? 1 : -1; \
00426 } \
00427 } \
00428 else { \
00429 status = 0; \
00430 } \
00431 \
00432 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
00433 gtid, *p_lb, *p_ub, status)); \
00434 return status; \
00435 }
00436
00437
00438 #define LOOP_RUNTIME_START(func,schedule) \
00439 int func (long lb, long ub, long str, long *p_lb, long *p_ub) \
00440 { \
00441 int status; \
00442 long stride; \
00443 long chunk_sz = 0; \
00444 int gtid = __kmp_entry_gtid(); \
00445 MKLOC(loc, #func); \
00446 KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \
00447 gtid, lb, ub, str, chunk_sz )); \
00448 \
00449 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
00450 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
00451 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \
00452 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
00453 (kmp_int *)p_ub, (kmp_int *)&stride); \
00454 if (status) { \
00455 KMP_DEBUG_ASSERT(stride == str); \
00456 *p_ub += (str > 0) ? 1 : -1; \
00457 } \
00458 } \
00459 else { \
00460 status = 0; \
00461 } \
00462 \
00463 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
00464 gtid, *p_lb, *p_ub, status)); \
00465 return status; \
00466 }
00467
00468
00469 #define LOOP_NEXT(func,fini_code) \
00470 int func(long *p_lb, long *p_ub) \
00471 { \
00472 int status; \
00473 long stride; \
00474 int gtid = __kmp_get_gtid(); \
00475 MKLOC(loc, #func); \
00476 KA_TRACE(20, ( #func ": T#%d\n", gtid)); \
00477 \
00478 fini_code \
00479 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
00480 (kmp_int *)p_ub, (kmp_int *)&stride); \
00481 if (status) { \
00482 *p_ub += (stride > 0) ? 1 : -1; \
00483 } \
00484 \
00485 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, stride 0x%lx, " \
00486 "returning %d\n", gtid, *p_lb, *p_ub, stride, status)); \
00487 return status; \
00488 }
00489
00490
00491 LOOP_START(GOMP_loop_static_start, kmp_sch_static)
00492 LOOP_NEXT(GOMP_loop_static_next, {})
00493 LOOP_START(GOMP_loop_dynamic_start, kmp_sch_dynamic_chunked)
00494 LOOP_NEXT(GOMP_loop_dynamic_next, {})
00495 LOOP_START(GOMP_loop_guided_start, kmp_sch_guided_chunked)
00496 LOOP_NEXT(GOMP_loop_guided_next, {})
00497 LOOP_RUNTIME_START(GOMP_loop_runtime_start, kmp_sch_runtime)
00498 LOOP_NEXT(GOMP_loop_runtime_next, {})
00499
00500 LOOP_START(GOMP_loop_ordered_static_start, kmp_ord_static)
00501 LOOP_NEXT(GOMP_loop_ordered_static_next, \
00502 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
00503 LOOP_START(GOMP_loop_ordered_dynamic_start, kmp_ord_dynamic_chunked)
00504 LOOP_NEXT(GOMP_loop_ordered_dynamic_next, \
00505 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
00506 LOOP_START(GOMP_loop_ordered_guided_start, kmp_ord_guided_chunked)
00507 LOOP_NEXT(GOMP_loop_ordered_guided_next, \
00508 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
00509 LOOP_RUNTIME_START(GOMP_loop_ordered_runtime_start, kmp_ord_runtime)
00510 LOOP_NEXT(GOMP_loop_ordered_runtime_next, \
00511 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
00512
00513
00514 void
00515 GOMP_loop_end(void)
00516 {
00517 int gtid = __kmp_get_gtid();
00518 KA_TRACE(20, ("GOMP_loop_end: T#%d\n", gtid))
00519
00520 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
00521
00522 KA_TRACE(20, ("GOMP_loop_end exit: T#%d\n", gtid))
00523 }
00524
00525
00526 void
00527 GOMP_loop_end_nowait(void)
00528 {
00529 KA_TRACE(20, ("GOMP_loop_end_nowait: T#%d\n", __kmp_get_gtid()))
00530 }
00531
00532
00533
00534
00535
00536
00537
00538
00539
00540 #define LOOP_START_ULL(func,schedule) \
00541 int func (int up, unsigned long long lb, unsigned long long ub, \
00542 unsigned long long str, unsigned long long chunk_sz, \
00543 unsigned long long *p_lb, unsigned long long *p_ub) \
00544 { \
00545 int status; \
00546 long long str2 = up ? ((long long)str) : -((long long)str); \
00547 long long stride; \
00548 int gtid = __kmp_entry_gtid(); \
00549 MKLOC(loc, #func); \
00550 \
00551 KA_TRACE(20, ( #func ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
00552 gtid, up, lb, ub, str, chunk_sz )); \
00553 \
00554 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
00555 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
00556 (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \
00557 (schedule) != kmp_sch_static); \
00558 status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, \
00559 (kmp_uint64 *)p_lb, (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
00560 if (status) { \
00561 KMP_DEBUG_ASSERT(stride == str2); \
00562 *p_ub += (str > 0) ? 1 : -1; \
00563 } \
00564 } \
00565 else { \
00566 status = 0; \
00567 } \
00568 \
00569 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
00570 gtid, *p_lb, *p_ub, status)); \
00571 return status; \
00572 }
00573
00574
00575 #define LOOP_RUNTIME_START_ULL(func,schedule) \
00576 int func (int up, unsigned long long lb, unsigned long long ub, \
00577 unsigned long long str, unsigned long long *p_lb, \
00578 unsigned long long *p_ub) \
00579 { \
00580 int status; \
00581 long long str2 = up ? ((long long)str) : -((long long)str); \
00582 unsigned long long stride; \
00583 unsigned long long chunk_sz = 0; \
00584 int gtid = __kmp_entry_gtid(); \
00585 MKLOC(loc, #func); \
00586 \
00587 KA_TRACE(20, ( #func ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
00588 gtid, up, lb, ub, str, chunk_sz )); \
00589 \
00590 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
00591 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
00592 (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, TRUE); \
00593 status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, \
00594 (kmp_uint64 *)p_lb, (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
00595 if (status) { \
00596 KMP_DEBUG_ASSERT(stride == str2); \
00597 *p_ub += (str > 0) ? 1 : -1; \
00598 } \
00599 } \
00600 else { \
00601 status = 0; \
00602 } \
00603 \
00604 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
00605 gtid, *p_lb, *p_ub, status)); \
00606 return status; \
00607 }
00608
00609
00610 #define LOOP_NEXT_ULL(func,fini_code) \
00611 int func(unsigned long long *p_lb, unsigned long long *p_ub) \
00612 { \
00613 int status; \
00614 long long stride; \
00615 int gtid = __kmp_get_gtid(); \
00616 MKLOC(loc, #func); \
00617 KA_TRACE(20, ( #func ": T#%d\n", gtid)); \
00618 \
00619 fini_code \
00620 status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
00621 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
00622 if (status) { \
00623 *p_ub += (stride > 0) ? 1 : -1; \
00624 } \
00625 \
00626 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, stride 0x%llx, " \
00627 "returning %d\n", gtid, *p_lb, *p_ub, stride, status)); \
00628 return status; \
00629 }
00630
00631
00632 LOOP_START_ULL(GOMP_loop_ull_static_start, kmp_sch_static)
00633 LOOP_NEXT_ULL(GOMP_loop_ull_static_next, {})
00634 LOOP_START_ULL(GOMP_loop_ull_dynamic_start, kmp_sch_dynamic_chunked)
00635 LOOP_NEXT_ULL(GOMP_loop_ull_dynamic_next, {})
00636 LOOP_START_ULL(GOMP_loop_ull_guided_start, kmp_sch_guided_chunked)
00637 LOOP_NEXT_ULL(GOMP_loop_ull_guided_next, {})
00638 LOOP_RUNTIME_START_ULL(GOMP_loop_ull_runtime_start, kmp_sch_runtime)
00639 LOOP_NEXT_ULL(GOMP_loop_ull_runtime_next, {})
00640
00641 LOOP_START_ULL(GOMP_loop_ull_ordered_static_start, kmp_ord_static)
00642 LOOP_NEXT_ULL(GOMP_loop_ull_ordered_static_next, \
00643 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
00644 LOOP_START_ULL(GOMP_loop_ull_ordered_dynamic_start, kmp_ord_dynamic_chunked)
00645 LOOP_NEXT_ULL(GOMP_loop_ull_ordered_dynamic_next, \
00646 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
00647 LOOP_START_ULL(GOMP_loop_ull_ordered_guided_start, kmp_ord_guided_chunked)
00648 LOOP_NEXT_ULL(GOMP_loop_ull_ordered_guided_next, \
00649 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
00650 LOOP_RUNTIME_START_ULL(GOMP_loop_ull_ordered_runtime_start, kmp_ord_runtime)
00651 LOOP_NEXT_ULL(GOMP_loop_ull_ordered_runtime_next, \
00652 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
00653
00654
00655
00656
00657
00658
00659
00660
00661
00662 #define PARALLEL_LOOP_START(func, schedule) \
00663 void func (void (*task) (void *), void *data, unsigned num_threads, \
00664 long lb, long ub, long str, long chunk_sz) \
00665 { \
00666 int gtid = __kmp_entry_gtid(); \
00667 int last = FALSE; \
00668 MKLOC(loc, #func); \
00669 KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
00670 gtid, lb, ub, str, chunk_sz )); \
00671 \
00672 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \
00673 if (num_threads != 0) { \
00674 __kmp_push_num_threads(&loc, gtid, num_threads); \
00675 } \
00676 __kmp_GOMP_fork_call(&loc, gtid, \
00677 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, \
00678 task, data, num_threads, &loc, (schedule), lb, \
00679 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \
00680 } \
00681 else { \
00682 __kmpc_serialized_parallel(&loc, gtid); \
00683 } \
00684 \
00685 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
00686 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
00687 (schedule) != kmp_sch_static); \
00688 \
00689 KA_TRACE(20, ( #func " exit: T#%d\n", gtid)); \
00690 }
00691
00692
00693 PARALLEL_LOOP_START(GOMP_parallel_loop_static_start, kmp_sch_static)
00694 PARALLEL_LOOP_START(GOMP_parallel_loop_dynamic_start, kmp_sch_dynamic_chunked)
00695 PARALLEL_LOOP_START(GOMP_parallel_loop_guided_start, kmp_sch_guided_chunked)
00696 PARALLEL_LOOP_START(GOMP_parallel_loop_runtime_start, kmp_sch_runtime)
00697
00698
00699 #if OMP_30_ENABLED
00700
00701
00702
00703
00704
00705
00706
00707 void
00708 GOMP_task(void (*func)(void *), void *data, void (*copy_func)(void *, void *),
00709 long arg_size, long arg_align, int if_cond, unsigned gomp_flags)
00710 {
00711 MKLOC(loc, "GOMP_task");
00712 int gtid = __kmp_entry_gtid();
00713 kmp_int32 flags = 0;
00714 kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *) & flags;
00715
00716 KA_TRACE(20, ("GOMP_task: T#%d\n", gtid));
00717
00718
00719 if (gomp_flags & 1) {
00720 input_flags->tiedness = 1;
00721 }
00722 input_flags->native = 1;
00723
00724
00725 if (! if_cond) {
00726 arg_size = 0;
00727 }
00728
00729 kmp_task_t *task = __kmp_task_alloc(&loc, gtid, input_flags,
00730 sizeof(kmp_task_t), arg_size ? arg_size + arg_align - 1 : 0,
00731 (kmp_routine_entry_t)func);
00732
00733 if (arg_size > 0) {
00734 if (arg_align > 0) {
00735 task->shareds = (void *)((((size_t)task->shareds)
00736 + arg_align - 1) / arg_align * arg_align);
00737 }
00738
00739
00740 if (copy_func) {
00741 (*copy_func)(task->shareds, data);
00742 }
00743 else {
00744 memcpy(task->shareds, data, arg_size);
00745 }
00746 }
00747
00748 if (if_cond) {
00749 __kmpc_omp_task(&loc, gtid, task);
00750 }
00751 else {
00752 __kmpc_omp_task_begin_if0(&loc, gtid, task);
00753 func(data);
00754 __kmpc_omp_task_complete_if0(&loc, gtid, task);
00755 }
00756
00757 KA_TRACE(20, ("GOMP_task exit: T#%d\n", gtid));
00758 }
00759
00760
00761 void
00762 GOMP_taskwait(void)
00763 {
00764 MKLOC(loc, "GOMP_taskwait");
00765 int gtid = __kmp_entry_gtid();
00766
00767 KA_TRACE(20, ("GOMP_taskwait: T#%d\n", gtid));
00768
00769 __kmpc_omp_taskwait(&loc, gtid);
00770
00771 KA_TRACE(20, ("GOMP_taskwait exit: T#%d\n", gtid));
00772 }
00773
00774
00775 #endif
00776
00777
00778
00779
00780
00781
00782
00783
00784
00785
00786
00787
00788
00789
00790
00791
00792 unsigned
00793 GOMP_sections_start(unsigned count)
00794 {
00795 int status;
00796 kmp_int lb, ub, stride;
00797 int gtid = __kmp_entry_gtid();
00798 MKLOC(loc, "GOMP_sections_start");
00799 KA_TRACE(20, ("GOMP_sections_start: T#%d\n", gtid));
00800
00801 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
00802
00803 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
00804 if (status) {
00805 KMP_DEBUG_ASSERT(stride == 1);
00806 KMP_DEBUG_ASSERT(lb > 0);
00807 KMP_ASSERT(lb == ub);
00808 }
00809 else {
00810 lb = 0;
00811 }
00812
00813 KA_TRACE(20, ("GOMP_sections_start exit: T#%d returning %u\n", gtid,
00814 (unsigned)lb));
00815 return (unsigned)lb;
00816 }
00817
00818
00819 unsigned
00820 GOMP_sections_next(void)
00821 {
00822 int status;
00823 kmp_int lb, ub, stride;
00824 int gtid = __kmp_get_gtid();
00825 MKLOC(loc, "GOMP_sections_next");
00826 KA_TRACE(20, ("GOMP_sections_next: T#%d\n", gtid));
00827
00828 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
00829 if (status) {
00830 KMP_DEBUG_ASSERT(stride == 1);
00831 KMP_DEBUG_ASSERT(lb > 0);
00832 KMP_ASSERT(lb == ub);
00833 }
00834 else {
00835 lb = 0;
00836 }
00837
00838 KA_TRACE(20, ("GOMP_sections_next exit: T#%d returning %u\n", gtid,
00839 (unsigned)lb));
00840 return (unsigned)lb;
00841 }
00842
00843
00844 void
00845 GOMP_parallel_sections_start(void (*task) (void *), void *data,
00846 unsigned num_threads, unsigned count)
00847 {
00848 int gtid = __kmp_entry_gtid();
00849 int last = FALSE;
00850 MKLOC(loc, "GOMP_parallel_sections_start");
00851 KA_TRACE(20, ("GOMP_parallel_sections_start: T#%d\n", gtid));
00852
00853 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
00854 if (num_threads != 0) {
00855 __kmp_push_num_threads(&loc, gtid, num_threads);
00856 }
00857 __kmp_GOMP_fork_call(&loc, gtid,
00858 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, task, data,
00859 num_threads, &loc, kmp_nm_dynamic_chunked, (kmp_int)1,
00860 (kmp_int)count, (kmp_int)1, (kmp_int)1);
00861 }
00862 else {
00863 __kmpc_serialized_parallel(&loc, gtid);
00864 }
00865
00866 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
00867
00868 KA_TRACE(20, ("GOMP_parallel_sections_start exit: T#%d\n", gtid));
00869 }
00870
00871
00872 void
00873 GOMP_sections_end(void)
00874 {
00875 int gtid = __kmp_get_gtid();
00876 KA_TRACE(20, ("GOMP_sections_end: T#%d\n", gtid))
00877
00878 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
00879
00880 KA_TRACE(20, ("GOMP_sections_end exit: T#%d\n", gtid))
00881 }
00882
00883
00884 void
00885 GOMP_sections_end_nowait(void)
00886 {
00887 KA_TRACE(20, ("GOMP_sections_end_nowait: T#%d\n", __kmp_get_gtid()))
00888 }
00889
00890 #ifdef __cplusplus
00891 }
00892 #endif // __cplusplus
00893
00894