HPCToolkit
lush-pthread.h
Go to the documentation of this file.
1 // -*-Mode: C++;-*- // technically C99
2 
3 // * BeginRiceCopyright *****************************************************
4 //
5 // $HeadURL$
6 // $Id$
7 //
8 // --------------------------------------------------------------------------
9 // Part of HPCToolkit (hpctoolkit.org)
10 //
11 // Information about sources of support for research and development of
12 // HPCToolkit is at 'hpctoolkit.org' and in 'README.Acknowledgments'.
13 // --------------------------------------------------------------------------
14 //
15 // Copyright ((c)) 2002-2019, Rice University
16 // All rights reserved.
17 //
18 // Redistribution and use in source and binary forms, with or without
19 // modification, are permitted provided that the following conditions are
20 // met:
21 //
22 // * Redistributions of source code must retain the above copyright
23 // notice, this list of conditions and the following disclaimer.
24 //
25 // * Redistributions in binary form must reproduce the above copyright
26 // notice, this list of conditions and the following disclaimer in the
27 // documentation and/or other materials provided with the distribution.
28 //
29 // * Neither the name of Rice University (RICE) nor the names of its
30 // contributors may be used to endorse or promote products derived from
31 // this software without specific prior written permission.
32 //
33 // This software is provided by RICE and contributors "as is" and any
34 // express or implied warranties, including, but not limited to, the
35 // implied warranties of merchantability and fitness for a particular
36 // purpose are disclaimed. In no event shall RICE or contributors be
37 // liable for any direct, indirect, incidental, special, exemplary, or
38 // consequential damages (including, but not limited to, procurement of
39 // substitute goods or services; loss of use, data, or profits; or
40 // business interruption) however caused and on any theory of liability,
41 // whether in contract, strict liability, or tort (including negligence
42 // or otherwise) arising in any way out of the use of this software, even
43 // if advised of the possibility of such damage.
44 //
45 // ******************************************************* EndRiceCopyright *
46 
47 //***************************************************************************
48 //
49 // File:
50 // $HeadURL$
51 //
52 // Purpose:
53 // LUSH: Logical Unwind Support for HPCToolkit
54 //
55 // Description:
56 // [The set of functions, macros, etc. defined in the file]
57 //
58 // Author:
59 // Nathan Tallent, Rice University.
60 //
61 //***************************************************************************
62 
63 #ifndef lush_pthreads_h
64 #define lush_pthreads_h
65 
66 //************************* System Include Files ****************************
67 
68 #include <stdlib.h>
69 #include <stdbool.h>
70 
71 #include <assert.h>
72 
73 //*************************** User Include Files ****************************
74 
75 #include <include/gcc-attr.h>
76 #include <include/min-max.h>
77 
78 #include "lush-pthread.i"
79 #include "lush-backtrace.h" // for 'lush_agents'
80 
81 #include <safe-sampling.h>
82 #include <sample_event.h>
83 #include <cct/cct.h>
84 
85 #include <metrics.h>
86 
88 
89 #include <lib/support-lean/timer.h>
90 
91 
92 //*************************** Forward Declarations **************************
93 
94 #define LUSH_PTHR_FN_TY 1
95 
96 #define LUSH_PTHR_DBG 0
97 
98 
99 //*************************** Forward Declarations **************************
100 
101 #if defined(__cplusplus)
102 extern "C" {
103 #endif
104 
105 //***************************************************************************
106 //
107 //***************************************************************************
108 
109 void
111 
112 void
113 lushPthr_init(lushPthr_t* x);
114 
115 void
116 lushPthr_dump(lushPthr_t* x, const char* nm, void* lock);
117 
118 
119 #define LUSH_PTHR_FN_REAL0(FN, TY) FN ## _ty ## TY
120 #define LUSH_PTHR_FN_REAL1(FN, TY) LUSH_PTHR_FN_REAL0(FN, TY)
121 #define LUSH_PTHR_FN(FN) LUSH_PTHR_FN_REAL1(FN, LUSH_PTHR_FN_TY)
122 
123 //***************************************************************************
124 // 1. Attribute a thread's idleness to victim (itself)
125 //***************************************************************************
126 
127 #define LUSH_PTHR_SYNC_SMPL_PERIOD 33 /* if !0, sample synchronously */
128 
129 static inline void
131 {
132 #if (LUSH_PTHR_SYNC_SMPL_PERIOD)
133  x->doIdlenessCnt++;
134  if (x->doIdlenessCnt == LUSH_PTHR_SYNC_SMPL_PERIOD) {
135  uint64_t time = UINT64_MAX;
136  time_getTimeReal(&time);
137  x->begIdleness = time;
138  }
139 #endif
140 }
141 
142 
143 static inline void
145 {
146 #if (LUSH_PTHR_SYNC_SMPL_PERIOD)
147  if (x->doIdlenessCnt == LUSH_PTHR_SYNC_SMPL_PERIOD) {
148  uint64_t time = 0;
149  time_getTimeReal(&time);
150  x->idleness += LUSH_PTHR_SYNC_SMPL_PERIOD * MAX(0, time - x->begIdleness);
151  x->doIdlenessCnt = 0;
152  }
153 #endif
154 }
155 
156 
157 static inline cct_node_t*
158 lushPthr_attribToCallPath(uint64_t idlenessIncr)
159 {
160  sample_val_t smpl;
161 
163 
164  ucontext_t context;
165  getcontext(&context); // FIXME: check for errors
167  (hpcrun_metricVal_t) {.i=0},
168  1/*skipInner*/, 1/*isSync*/, NULL);
170 
171  return smpl.sample_node;
172 }
173 
174 
175 //***************************************************************************
176 
177 static inline void
179 {
180  x->is_working = true;
181 }
182 
183 
184 static inline void
186 {
187  x->is_working = false;
188 }
189 
190 
191 static inline void
192 lushPthr_mutexLock_pre_ty1(lushPthr_t* restrict x,
193  pthread_mutex_t* restrict lock)
194 {
195  // N.B.: There is a small window where we could be sampled. Rather
196  // than setting an 'ignore-sample' flag, we currently depend upon
197  // this happening infrequently.
198  x->is_working = false;
200 }
201 
202 
203 static inline void
204 lushPthr_mutexLock_post_ty1(lushPthr_t* restrict x,
205  pthread_mutex_t* restrict lock)
206 {
208  x->is_working = true;
209  if (x->idleness > 0) {
210  lushPthr_attribToCallPath(x->idleness);
211  }
212 }
213 
214 
215 static inline void
216 lushPthr_mutexTrylock_post_ty1(lushPthr_t* restrict x,
217  pthread_mutex_t* restrict lock)
218 {
219  x->is_working = true; // same
220 }
221 
222 
223 static inline void
224 lushPthr_mutexUnlock_post_ty1(lushPthr_t* restrict x,
225  pthread_mutex_t* restrict lock)
226 {
227  x->is_working = true; // same
228 }
229 
230 
231 static inline atomic_pthread_spinlock_t*
232 lushPthr_spinLock_pre_ty1(lushPthr_t* restrict x,
233  atomic_pthread_spinlock_t* restrict lock)
234 {
235  x->is_working = false;
236  return lock;
237 }
238 
239 
240 static inline void
241 lushPthr_spinLock_post_ty1(lushPthr_t* restrict x,
242  atomic_pthread_spinlock_t* restrict lock)
243 {
244  x->is_working = true;
245 }
246 
247 
248 static inline atomic_pthread_spinlock_t*
249 lushPthr_spinTrylock_pre_ty1(lushPthr_t* restrict x,
250  atomic_pthread_spinlock_t* restrict lock)
251 {
252  return lock;
253 }
254 
255 
256 static inline void
257 lushPthr_spinTrylock_post_ty1(lushPthr_t* restrict x,
258  atomic_pthread_spinlock_t* restrict lock)
259 {
260  x->is_working = true; // same
261 }
262 
263 
264 static inline atomic_pthread_spinlock_t*
265 lushPthr_spinUnlock_pre_ty1(lushPthr_t* restrict x,
266  atomic_pthread_spinlock_t* restrict lock)
267 {
268  return lock;
269 }
270 
271 
272 static inline void
273 lushPthr_spinUnlock_post_ty1(lushPthr_t* restrict x,
274  atomic_pthread_spinlock_t* restrict lock)
275 {
276  x->is_working = true; // same
277 }
278 
279 
280 static inline atomic_pthread_spinlock_t*
281 lushPthr_spinDestroy_pre_ty1(lushPthr_t* restrict x,
282  atomic_pthread_spinlock_t* restrict lock)
283 {
284  return lock;
285 }
286 
287 
288 static inline void
289 lushPthr_spinDestroy_post_ty1(lushPthr_t* restrict x,
290  atomic_pthread_spinlock_t* restrict lock)
291 {
292 }
293 
294 
295 static inline void
297 {
298  x->is_working = false;
300 }
301 
302 
303 static inline void
305 {
307  x->is_working = true;
308  if (x->idleness > 0) {
309  lushPthr_attribToCallPath(x->idleness);
310  }
311 }
312 
313 
314 //***************************************************************************
315 // 2. Attribute idleness to suspects (threads holding locks)
316 //***************************************************************************
317 
318 static inline bool
320 {
321  return (x->cond_lock != 0 && x->cond_lock == x->num_locks);
322 }
323 
324 
325 static inline bool
327 {
328  return (x->is_working && x->num_locks > 0 && !lushPthr_isDirectlyInCond(x));
329 }
330 
331 
332 static inline bool
334 {
335  return (x->is_working && lushPthr_isDirectlyInCond(x));
336 }
337 
338 //***************************************************************************
339 
340 static inline void
342 {
343  x->is_working = true;
344  x->num_locks = 0;
345  x->cond_lock = 0;
346 
347  atomic_fetch_add_explicit(x->ps_num_threads, 1, memory_order_relaxed);
348 
349  atomic_fetch_add_explicit(x->ps_num_working, 1, memory_order_relaxed);
350  // x->ps_num_working_lock: same
351 
352  // x->ps_num_idle_cond: same
353 }
354 
355 
356 static inline void
358 {
359  x->is_working = false;
360  x->num_locks = 0;
361  x->cond_lock = 0;
362 
363  atomic_fetch_add_explicit(x->ps_num_threads, -1, memory_order_relaxed);
364 
365  atomic_fetch_add_explicit(x->ps_num_working, -1, memory_order_relaxed);
366  // x->ps_num_working_lock: same
367 
368  // x->ps_num_idle_cond: same
369 }
370 
371 
372 static inline void
373 lushPthr_lock_pre_ty2(lushPthr_t* x)
374 {
375  atomic_fetch_add_explicit(x->ps_num_working, -1, memory_order_relaxed);
376  // x->ps_num_working_lock: same
377 
378  // x->ps_num_idle_cond: same
379 
380  x->is_working = false;
381  // x->num_locks: same
382  // x->cond_lock: same
383 }
384 
385 
386 static inline void
388 {
389  // (1) moving to lock; (2) moving from cond to lock
390  bool do_addLock = (x->num_locks == 0 || lushPthr_isDirectlyInCond(x));
391 
392  x->is_working = true;
393  x->num_locks++;
394  // x->cond_lock: same
395 
396  atomic_fetch_add_explicit(x->ps_num_working, 1, memory_order_relaxed);
397  if (do_addLock) {
398  atomic_fetch_add_explicit(x->ps_num_working_lock, 1, memory_order_relaxed);
399  }
400 
401  // x->ps_num_idle_cond: same
402 }
403 
404 
405 static inline void
406 lushPthr_trylock_ty2(lushPthr_t* x)
407 {
408  // (1) moving to lock; (2) moving from cond to lock
409  bool do_addLock = (x->num_locks == 0 || lushPthr_isDirectlyInCond(x));
410 
411  x->is_working = true; // same
412  x->num_locks++;
413  // x->cond_lock: same
414 
415  // x->ps_num_working: // same
416  if (do_addLock) {
417  atomic_fetch_add_explicit(x->ps_num_working_lock, 1, memory_order_relaxed);
418  }
419 
420  // x->ps_num_idle_cond: same
421 }
422 
423 
424 static inline void
425 lushPthr_unlock_ty2(lushPthr_t* x)
426 {
427  bool wasDirectlyInCond = lushPthr_isDirectlyInCond(x);
428 
429  x->is_working = true; // same
430  x->num_locks--;
431  if (wasDirectlyInCond) {
432  x->cond_lock = 0;
433  }
434 
435  // x->ps_num_working: same
436  if ((x->num_locks == 0 && !wasDirectlyInCond)
438  atomic_fetch_add_explicit(x->ps_num_working_lock, -1, memory_order_relaxed);
439  }
440 
441  // x->ps_num_idle_cond: same
442 }
443 
444 
445 static inline void
446 lushPthr_mutexLock_pre_ty2(lushPthr_t* restrict x,
447  pthread_mutex_t* restrict lock)
448 {
450 }
451 
452 
453 static inline void
454 lushPthr_mutexLock_post_ty2(lushPthr_t* restrict x,
455  pthread_mutex_t* restrict lock)
456 {
458 }
459 
460 
461 static inline void
462 lushPthr_mutexTrylock_post_ty2(lushPthr_t* restrict x,
463  pthread_mutex_t* restrict lock)
464 {
466 }
467 
468 
469 static inline void
470 lushPthr_mutexUnlock_post_ty2(lushPthr_t* restrict x,
471  pthread_mutex_t* restrict lock)
472 {
474 }
475 
476 
477 static inline atomic_pthread_spinlock_t*
478 lushPthr_spinLock_pre_ty2(lushPthr_t* restrict x,
479  atomic_pthread_spinlock_t* restrict lock)
480 {
482  return lock;
483 }
484 
485 
486 static inline void
487 lushPthr_spinLock_post_ty2(lushPthr_t* restrict x,
488  atomic_pthread_spinlock_t* restrict lock)
489 {
491 }
492 
493 
494 static inline atomic_pthread_spinlock_t*
495 lushPthr_spinTrylock_pre_ty2(lushPthr_t* restrict x,
496  atomic_pthread_spinlock_t* restrict lock)
497 {
498  return lock;
499 }
500 
501 
502 static inline void
503 lushPthr_spinTrylock_post_ty2(lushPthr_t* restrict x,
504  atomic_pthread_spinlock_t* restrict lock)
505 {
507 }
508 
509 
510 static inline atomic_pthread_spinlock_t*
511 lushPthr_spinUnlock_pre_ty2(lushPthr_t* restrict x,
512  atomic_pthread_spinlock_t* restrict lock)
513 {
514  return lock;
515 }
516 
517 
518 static inline void
519 lushPthr_spinUnlock_post_ty2(lushPthr_t* restrict x,
520  atomic_pthread_spinlock_t* restrict lock)
521 {
523 }
524 
525 
526 static inline atomic_pthread_spinlock_t*
527 lushPthr_spinDestroy_pre_ty2(lushPthr_t* restrict x,
528  atomic_pthread_spinlock_t* restrict lock)
529 {
530  return lock;
531 }
532 
533 
534 static inline void
535 lushPthr_spinDestroy_post_ty2(lushPthr_t* restrict x,
536  atomic_pthread_spinlock_t* restrict lock)
537 {
538 }
539 
540 
541 static inline void
543 {
544  bool wasDirectlyInCond = lushPthr_isDirectlyInCond(x);
545  int new_num_locks = (x->num_locks - 1);
546 
547  // N.B. this order ensures that (num_working - num_working_lock) >= 0
548  if (new_num_locks == 0 && !wasDirectlyInCond) {
549  atomic_fetch_add_explicit(x->ps_num_working_lock, -1, memory_order_relaxed);
550  }
551  atomic_fetch_add_explicit(x->ps_num_working, -1, memory_order_relaxed);
552 
553  atomic_fetch_add_explicit(x->ps_num_idle_cond, 1, memory_order_relaxed);
554 
555  x->is_working = false;
556  x->num_locks = new_num_locks;
557  //x->cond_lock: same
558 }
559 
560 
561 static inline void
563 {
564  x->is_working = true;
565  x->num_locks++;
566  x->cond_lock = x->num_locks;
567 
568  atomic_fetch_add_explicit(x->ps_num_working, 1, memory_order_relaxed);
569  // x->ps_num_working_lock: same, b/c thread is part of 'num_working_cond'
570 
571  atomic_fetch_add_explicit(x->ps_num_idle_cond, -1, memory_order_relaxed);
572 }
573 
574 
575 //***************************************************************************
576 // 3. Attribute lock-wait time to the perpetrator.
577 //***************************************************************************
578 
579 // N.B. consistent with the pthreads implementation
580 #define lushPthr_LockValMax (0x0) // locked values: [INT_MIN, 0]
581 #define lushPthr_UnlckVal (0x1)
582 #define lushPthr_DestroyVal (-1)
583 
584 #define lushPthr_maxValueOfLock (1)
585 
586 //***************************************************************************
587 
588 static inline bool
589 lushPthr_isSyncDataPointer(pthread_spinlock_t lockval)
590 {
591  return (lockval > lushPthr_maxValueOfLock);
592 }
593 
594 
595 static inline lushPtr_SyncObjData_t*
596 lushPthr_getSyncDataPointer(atomic_pthread_spinlock_t lockval)
597 {
598  return (lushPtr_SyncObjData_t*)(lushPthr_mem_beg + atomic_load_explicit(&lockval, memory_order_relaxed));
599 }
600 
601 
602 static inline int32_t
603 lushPthr_makeSyncDataPointer(lushPtr_SyncObjData_t* data)
604 {
605  return (intptr_t)((void*)data - lushPthr_mem_beg);
606 }
607 
608 
609 static inline void
610 lushPthr_destroySyncDataPointer(atomic_pthread_spinlock_t* lock)
611 {
613 }
614 
615 
616 // lushPthr_freelstEnq: push x on the tail
617 static inline void
618 lushPthr_freelstEnq(lushPthr_t* restrict pthr,
619  lushPtr_SyncObjData_t* restrict x)
620 {
621  x->next = NULL;
622  if (!pthr->freelstTail) {
623  // Case 1: empty
624  pthr->freelstHead = x;
625  pthr->freelstTail = x;
626  }
627  else {
628  // Case 2: non-empty
629  pthr->freelstTail->next = x;
630  pthr->freelstTail = x;
631  }
632 #if (LUSH_DBG_STATS)
633  atomic_fetch_add_explicit(&DBG_numLockFreelistCur, 1, memory_order_relaxed);
634 #endif
635 }
636 
637 
638 // lushPthr_freelstDeq: pop from the head, if possible
639 static inline lushPtr_SyncObjData_t*
640 lushPthr_freelstDeq(lushPthr_t* pthr)
641 {
642  if (!pthr->freelstHead) {
643  // Case 1: empty
644  return NULL;
645  }
646  else {
647  // Case 2: non-empty
648  lushPtr_SyncObjData_t* x = pthr->freelstHead;
649  pthr->freelstHead = x->next;
650  x->next = NULL;
651 
652  if (!pthr->freelstHead) {
653  // Special case: one element
654  pthr->freelstTail = NULL;
655  }
656 #if (LUSH_DBG_STATS)
657  atomic_fetch_add_explicit(&DBG_numLockFreelistCur, -1, memory_order_relaxed);
658 #endif
659 
660  return x;
661  }
662 }
663 
664 
665 static inline lushPtr_SyncObjData_t*
666 lushPthr_makeSyncObjData_spin(lushPthr_t* restrict pthr,
667  atomic_pthread_spinlock_t* restrict lock)
668 {
669  lushPtr_SyncObjData_t* x = lushPthr_freelstDeq(pthr);
670  if (!x) {
671  x = lushPthr_malloc(sizeof(lushPtr_SyncObjData_t));
672  }
673  if (!x) {
674  assert(0 && "LUSH/Pthreads: exhausted lock memory");
675  }
676  lushPtr_SyncObjData_init(x);
677 #if (LUSH_DBG_STATS)
678  atomic_fetch_add_explicit(&DBG_numLockAlloc, 1, memory_order_relaxed);
680  / sizeof(lushPtr_SyncObjData_t))
681  - 1 - DBG_numLockFreelistCur);
682  atomic_store_explicit(&DBG_maxLockAllocCur, MAX(DBG_maxLockAllocCur, lockAllocCur), memory_order_relaxed);
683 #endif
684  return x;
685 }
686 
687 
688 static inline lushPtr_SyncObjData_t*
689 lushPthr_demandSyncObjData_spin(lushPthr_t* restrict pthr,
690  atomic_pthread_spinlock_t* restrict lock)
691 {
692  // test-and-test-and-set
694  lushPtr_SyncObjData_t* data = lushPthr_makeSyncObjData_spin(pthr, lock);
695  int32_t newval = lushPthr_makeSyncDataPointer(data);
696 
697  bool isWinner = false;
698  while (true) {
699  // CAS returns *old* value iff successful
700  int32_t oldval = atomic_load_explicit(lock, memory_order_relaxed);
701  if (lushPthr_isSyncDataPointer(oldval)) {
702  break;
703  }
704 
705  atomic_store_explicit(&data->lock.spin, oldval, memory_order_relaxed);
706  isWinner = (atomic_exchange_explicit(lock, newval, memory_order_relaxed) == oldval);
707  if (isWinner) {
708  break;
709  }
710  }
711 
712  if (!isWinner) {
713  lushPthr_freelstEnq(pthr, data); // enqueue onto free list
714  }
715  }
716  // INVARIANT: lushPthr_isSyncDataPointer(*lock) is true
717 
718  return lushPthr_getSyncDataPointer(*lock);
719 }
720 
721 
722 static inline lushPtr_SyncObjData_t*
723 lushPthr_demandCachedSyncObjData_spin(lushPthr_t* restrict pthr,
724  atomic_pthread_spinlock_t* restrict lock)
725 {
726  if ((void*)lock != pthr->cache_syncObj) {
727  pthr->cache_syncObj = (void*)lock;
728  pthr->cache_syncObjData = lushPthr_demandSyncObjData_spin(pthr, lock);
729  }
730  return pthr->cache_syncObjData;
731 }
732 
733 
734 static inline lushPtr_SyncObjData_t*
735 lushPthr_demandSyncObjData_ps(lushPthr_t* restrict x, void* restrict syncObj)
736 {
737  //hpcrun_safe_enter(); // inherited
738 
739  BalancedTreeNode_t* fnd =
740  BalancedTree_find(x->ps_syncObjToData, syncObj);
741  if (!fnd) {
742  fnd = BalancedTree_insert(x->ps_syncObjToData, syncObj);
743  lushPtr_SyncObjData_init(fnd->data);
744 #if (LUSH_DBG_STATS)
745  atomic_fetch_add_explicit(&DBG_numLockAlloc, 1, memory_order_relaxed);
746 #endif
747  }
748 
749  //hpcrun_safe_exit(); // inherited
750 
751  return fnd->data;
752 }
753 
754 
755 static inline lushPtr_SyncObjData_t*
756 lushPthr_demandSyncObjData(lushPthr_t* restrict x, void* restrict syncObj)
757 {
759 
760  BalancedTreeNode_t* fnd =
761  BalancedTree_find(&x->syncObjToData, syncObj);
762  if (!fnd) {
763  fnd = BalancedTree_insert(&x->syncObjToData, syncObj);
764  fnd->data = lushPthr_demandSyncObjData_ps(x, syncObj);
765  }
766 
768 
769  return fnd->data;
770 }
771 
772 
773 static inline lushPtr_SyncObjData_t*
774 lushPthr_demandCachedSyncObjData(lushPthr_t* restrict pthr,
775  void* restrict syncObj)
776 {
777  if (syncObj != pthr->cache_syncObj) {
778  pthr->cache_syncObj = syncObj;
779  pthr->cache_syncObjData = lushPthr_demandSyncObjData(pthr, (void*)syncObj);
780  }
781  return pthr->cache_syncObjData;
782 }
783 
784 
785 //***************************************************************************
786 
787 static inline int
788 lushPthr_spin_lock(atomic_pthread_spinlock_t* lock)
789 {
790  while (true) {
792  // ------------------------------------------------------------
793  // acquire an indirect lock
794  // ------------------------------------------------------------
795  lushPtr_SyncObjData_t* data = lushPthr_getSyncDataPointer(*lock);
796  lock = &data->lock.spin;
797  while (true) {
800  == lushPthr_UnlckVal) {
801  return 0; // success
802  }
803  }
804  }
805  // ------------------------------------------------------------
806  // acquire a direct lock
807  // ------------------------------------------------------------
810  == lushPthr_UnlckVal) {
811  return 0; // success
812  }
813  }
814  return 1;
815 }
816 
817 
818 static inline int
819 lushPthr_spin_trylock(atomic_pthread_spinlock_t* lock)
820 {
821  while (true) {
823  // ------------------------------------------------------------
824  // acquire an indirect lock
825  // ------------------------------------------------------------
826  lushPtr_SyncObjData_t* data = lushPthr_getSyncDataPointer(*lock);
827  lock = &data->lock.spin;
829  return ((prev == lushPthr_UnlckVal) ? 0 /*success*/ : 1);
830  }
831  // ------------------------------------------------------------
832  // acquire a direct lock
833  // ------------------------------------------------------------
835  if (prev == lushPthr_UnlckVal) {
836  return 0; // success
837  }
838  else if (prev <= lushPthr_LockValMax) {
839  return 1;
840  }
841  }
842 
843 }
844 
845 
846 static inline int
847 lushPthr_spin_unlock(atomic_pthread_spinlock_t* lock)
848 {
849  while (true) {
850  int lockval = atomic_load_explicit(lock, memory_order_relaxed);
851  if (lushPthr_isSyncDataPointer(lockval)) {
852  lushPtr_SyncObjData_t* data = lushPthr_getSyncDataPointer(*lock);
854  return 0; // success
855  }
856 
858  return 0; // success
859  }
860  }
861  return 1;
862 }
863 
864 
865 //***************************************************************************
866 
867 static inline void
869 {
870  x->is_working = true;
871 }
872 
873 
874 static inline void
876 {
877  x->is_working = false;
878 }
879 
880 
881 static inline void
882 lushPthr_mutexLock_pre_ty3(lushPthr_t* restrict x,
883  pthread_mutex_t* restrict lock)
884 {
885  lushPtr_SyncObjData_t* syncData =
886  lushPthr_demandCachedSyncObjData(x, (void*)lock);
887  syncData->isBlockingWork = (syncData->isLocked);
888 
889 #if (LUSH_DBG_STATS)
890  atomic_fetch_add_explicit(&DBG_numLockAcq, 1, memory_order_relaxed);
891 #endif
892 
894  x->syncObjData = NULL; // drop samples
895  x->is_working = false;
896 }
897 
898 
899 static inline void
900 lushPthr_mutexLock_post_ty3(lushPthr_t* restrict x,
901  pthread_mutex_t* restrict lock)
902 {
903  x->is_working = true;
905 
906  lushPtr_SyncObjData_t* syncData =
907  lushPthr_demandCachedSyncObjData(x, (void*)lock);
908  syncData->isLocked = true;
909 
910  if (x->idleness > 0 && syncData->cct_node) {
911  cct_node_t* node = (cct_node_t*)syncData->cct_node;
912  int mid = lush_agents->metric_idleness;
913  double idleness = x->idleness;
915  node,
916  (cct_metric_data_t){.r = idleness});
917  }
918 }
919 
920 
921 static inline void
922 lushPthr_mutexTrylock_post_ty3(lushPthr_t* restrict x,
923  pthread_mutex_t* restrict lock)
924 {
925  x->is_working = true; // same
926 }
927 
928 
929 static inline void
930 lushPthr_mutexUnlock_post_ty3(lushPthr_t* restrict x,
931  pthread_mutex_t* restrict lock)
932 {
933  x->is_working = true; // same
934 
935  lushPtr_SyncObjData_t* syncData =
936  lushPthr_demandCachedSyncObjData(x, (void*)lock);
937  syncData->isLocked = false;
938 
939  if (syncData->isBlockingWork) {
940  // FIXME3: may not be blocking and may do more work than necessary
941  syncData->cct_node = lushPthr_attribToCallPath(0);
942  }
943 }
944 
945 
946 static inline atomic_pthread_spinlock_t*
947 lushPthr_spinLock_pre_ty3(lushPthr_t* restrict x,
948  atomic_pthread_spinlock_t* restrict lock)
949 {
950  lushPtr_SyncObjData_t* syncData =
952 
953 #if (LUSH_DBG_STATS)
954  atomic_fetch_add_explicit(&DBG_numLockAcq, 1, memory_order_relaxed);
955 #endif
956 
957  x->syncObjData = syncData;
958  x->is_working = false;
959  return &syncData->lock.spin;
960 }
961 
962 
963 static inline void
964 lushPthr_spinLock_post_ty3(lushPthr_t* restrict x,
965  atomic_pthread_spinlock_t* restrict lock)
966 {
967  x->is_working = true;
968  x->syncObjData = NULL;
969 }
970 
971 
972 static inline atomic_pthread_spinlock_t*
973 lushPthr_spinTrylock_pre_ty3(lushPthr_t* restrict x,
974  atomic_pthread_spinlock_t* restrict lock)
975 {
976  lushPtr_SyncObjData_t* syncData =
978  return &syncData->lock.spin;
979 }
980 
981 
982 static inline void
983 lushPthr_spinTrylock_post_ty3(lushPthr_t* restrict x,
984  atomic_pthread_spinlock_t* restrict lock)
985 {
986  x->is_working = true; // same
987 }
988 
989 
990 static inline atomic_pthread_spinlock_t*
991 lushPthr_spinUnlock_pre_ty3(lushPthr_t* restrict x,
992  atomic_pthread_spinlock_t* restrict lock)
993 {
994  lushPtr_SyncObjData_t* syncData =
996  return &syncData->lock.spin;
997 }
998 
999 
1000 static inline void
1001 lushPthr_spinUnlock_post_ty3(lushPthr_t* restrict x,
1002  atomic_pthread_spinlock_t* restrict lock)
1003 {
1004  x->is_working = true; // same
1005 
1006  lushPtr_SyncObjData_t* syncData =
1008  if (syncData && atomic_load_explicit(&syncData->idleness, memory_order_relaxed) > 0) {
1009  x->idleness = atomic_exchange_explicit(&syncData->idleness, 0, memory_order_relaxed);
1010  lushPthr_attribToCallPath(x->idleness);
1011  }
1012 }
1013 
1014 
1015 static inline atomic_pthread_spinlock_t*
1016 lushPthr_spinDestroy_pre_ty3(lushPthr_t* restrict x,
1017  atomic_pthread_spinlock_t* restrict lock)
1018 {
1019  atomic_pthread_spinlock_t* real_lock = lock;
1020  int lockval = atomic_load_explicit(real_lock, memory_order_relaxed);
1021  if (lushPthr_isSyncDataPointer(lockval)) {
1022  lushPtr_SyncObjData_t* syncData = lushPthr_getSyncDataPointer(*lock);
1023  real_lock = &syncData->lock.spin;
1024  }
1025  return real_lock;
1026 }
1027 
1028 
1029 static inline void
1030 lushPthr_spinDestroy_post_ty3(lushPthr_t* restrict x,
1031  atomic_pthread_spinlock_t* restrict lock)
1032 {
1034  lushPtr_SyncObjData_t* syncData = lushPthr_getSyncDataPointer(*lock);
1035  lushPthr_freelstEnq(x, syncData); // enqueue onto free list
1037  }
1038 }
1039 
1040 
1041 static inline void
1043 {
1044  x->is_working = false;
1045  //FIXME3: lushPthr_begSmplIdleness(x);
1046 }
1047 
1048 
1049 static inline void
1051 {
1052  //FIXME3: lushPthr_endSmplIdleness(x);
1053  x->is_working = true;
1054 }
1055 
1056 
1057 //***************************************************************************
1058 //
1059 //***************************************************************************
1060 
1061 // create (thread):
1062 static inline void
1063 lushPthr_thread_init(lushPthr_t* x)
1064 {
1065  if (LUSH_PTHR_DBG) { lushPthr_dump(x, "thrInit", NULL); }
1066 
1068 }
1069 
1070 
1071 // destroy (thread):
1072 static inline void
1073 lushPthr_thread_fini(lushPthr_t* x)
1074 {
1075  if (LUSH_PTHR_DBG) { lushPthr_dump(x, "thrFini", NULL); }
1076 
1078 }
1079 
1080 
1081 // ---------------------------------------------------------
1082 // mutex_lock
1083 // ---------------------------------------------------------
1084 
1085 // lock_pre: thread blocks/sleeps
1086 static inline void
1087 lushPthr_mutexLock_pre(lushPthr_t* restrict x, pthread_mutex_t* restrict lock)
1088 {
1089  if (LUSH_PTHR_DBG) { lushPthr_dump(x, "mLock[", NULL); }
1090 
1092 }
1093 
1094 
1095 // lock_post: thread acquires lock and continues
1096 static inline void
1097 lushPthr_mutexLock_post(lushPthr_t* restrict x, pthread_mutex_t* restrict lock)
1098 {
1099  if (LUSH_PTHR_DBG) { lushPthr_dump(x, "mLock]", NULL); }
1100 
1102 }
1103 
1104 
1105 // trylock: thread may acquire lock, but always continues (never blocks)
1106 static inline void
1107 lushPthr_mutexTrylock_post(lushPthr_t* restrict x,
1108  pthread_mutex_t* restrict lock,
1109  int result)
1110 {
1111  if (result != 0) {
1112  return; // lock was not acquired -- epoch remains the same
1113  }
1114 
1115  if (LUSH_PTHR_DBG) { lushPthr_dump(x, "mTrylock", NULL); }
1116 
1118 }
1119 
1120 
1121 // unlock: thread releases lock and continues
1122 static inline void
1123 lushPthr_mutexUnlock_post(lushPthr_t* restrict x,
1124  pthread_mutex_t* restrict lock)
1125 {
1126  if (LUSH_PTHR_DBG) { lushPthr_dump(x, "mUnlock", NULL); }
1127 
1129 }
1130 
1131 
1132 // ---------------------------------------------------------
1133 // spin_wait
1134 // ---------------------------------------------------------
1135 
1136 // lock_pre: thread blocks/sleeps
1137 static inline atomic_pthread_spinlock_t*
1138 lushPthr_spinLock_pre(lushPthr_t* restrict x,
1139  atomic_pthread_spinlock_t* lock)
1140 {
1141  if (LUSH_PTHR_DBG) { lushPthr_dump(x, "sLock[", (void*)lock); }
1142 
1144 }
1145 
1146 
1147 // lock_post: thread acquires lock and continues
1148 static inline void
1149 lushPthr_spinLock_post(lushPthr_t* restrict x,
1150  atomic_pthread_spinlock_t* restrict lock)
1151 {
1152  if (LUSH_PTHR_DBG) { lushPthr_dump(x, "sLock]", (void*)lock); }
1153 
1155 }
1156 
1157 
1158 // trylock_pre:
1159 static inline atomic_pthread_spinlock_t*
1160 lushPthr_spinTrylock_pre(lushPthr_t* restrict x,
1161  atomic_pthread_spinlock_t* restrict lock)
1162 {
1163  if (LUSH_PTHR_DBG) { lushPthr_dump(x, "sTrylock[", (void*)lock); }
1164 
1166 }
1167 
1168 
1169 // trylock_post: thread may acquire lock, but always continues (never blocks)
1170 static inline void
1171 lushPthr_spinTrylock_post(lushPthr_t* restrict x,
1172  atomic_pthread_spinlock_t* restrict lock,
1173  int result)
1174 {
1175  if (result != 0) {
1176  return; // lock was not acquired -- epoch remains the same
1177  }
1178 
1179  if (LUSH_PTHR_DBG) { lushPthr_dump(x, "sTrylock]", (void*)lock); }
1180 
1182 }
1183 
1184 
1185 // unlock_pre:
1186 static inline atomic_pthread_spinlock_t*
1187 lushPthr_spinUnlock_pre(lushPthr_t* restrict x,
1188  atomic_pthread_spinlock_t* restrict lock)
1189 {
1190  if (LUSH_PTHR_DBG) { lushPthr_dump(x, "sUnlock[", (void*)lock); }
1191 
1193 }
1194 
1195 
1196 // unlock_post: thread releases lock and continues
1197 static inline void
1198 lushPthr_spinUnlock_post(lushPthr_t* restrict x,
1199  atomic_pthread_spinlock_t* restrict lock)
1200 {
1201  if (LUSH_PTHR_DBG) { lushPthr_dump(x, "sUnlock]", (void*)lock); }
1202 
1204 }
1205 
1206 
1207 static inline atomic_pthread_spinlock_t*
1208 lushPthr_spinDestroy_pre(lushPthr_t* restrict x,
1209  atomic_pthread_spinlock_t* restrict lock)
1210 {
1211  if (LUSH_PTHR_DBG) { lushPthr_dump(x, "sDstroy[", (void*)lock); }
1212 
1214 }
1215 
1216 
1217 static inline void
1218 lushPthr_spinDestroy_post(lushPthr_t* restrict x,
1219  atomic_pthread_spinlock_t* restrict lock)
1220 {
1221  if (LUSH_PTHR_DBG) { lushPthr_dump(x, "sDstroy]", (void*)lock); }
1222 
1224 }
1225 
1226 
1227 // ---------------------------------------------------------
1228 // cond_wait
1229 // ---------------------------------------------------------
1230 
1231 // condwait_pre: associated lock is released and thread blocks
1232 static inline void
1234 {
1235  if (LUSH_PTHR_DBG) { lushPthr_dump(x, "cwait[", NULL); }
1236 
1238 }
1239 
1240 
1241 // condwait_post: associated lock is acquired and thread continues
1242 static inline void
1244 {
1245  if (LUSH_PTHR_DBG) { lushPthr_dump(x, "cwait]", NULL); }
1246 
1248 }
1249 
1250 
1251 // **************************************************************************
1252 
1253 #if defined(__cplusplus)
1254 } /* extern "C" */
1255 #endif
1256 
1257 
1258 #endif /* lush_pthreads_h */
#define MAX(a, b)
Definition: min-max.h:77
static void lushPthr_spinLock_post(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
static void lushPthr_freelstEnq(lushPthr_t *restrict pthr, lushPtr_SyncObjData_t *restrict x)
Definition: lush-pthread.h:618
static void lushPthr_endSmplIdleness(lushPthr_t *x)
Definition: lush-pthread.h:144
static atomic_pthread_spinlock_t * lushPthr_spinLock_pre_ty3(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:947
static void lushPthr_lock_pre_ty2(lushPthr_t *x)
Definition: lush-pthread.h:373
static void lushPthr_condwait_pre_ty3(lushPthr_t *x)
static int lushPthr_spin_unlock(atomic_pthread_spinlock_t *lock)
Definition: lush-pthread.h:847
static atomic_pthread_spinlock_t * lushPthr_spinUnlock_pre_ty1(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:265
static void lushPthr_thread_fini_ty2(lushPthr_t *x)
Definition: lush-pthread.h:357
static atomic_pthread_spinlock_t * lushPthr_spinUnlock_pre_ty3(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:991
static void lushPthr_spinDestroy_post_ty2(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:535
static void lushPthr_spinTrylock_post_ty1(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:257
static void hpcrun_safe_exit(void)
static void lushPthr_destroySyncDataPointer(atomic_pthread_spinlock_t *lock)
Definition: lush-pthread.h:610
sample_val_t hpcrun_sample_callpath(void *context, int metricId, hpcrun_metricVal_t metricIncr, int skipInner, int isSync, sampling_info_t *data)
Definition: sample_event.c:160
static atomic_pthread_spinlock_t * lushPthr_spinLock_pre(lushPthr_t *restrict x, atomic_pthread_spinlock_t *lock)
#define LUSH_PTHR_DBG
Definition: lush-pthread.h:96
static void lushPthr_mutexTrylock_post_ty1(lushPthr_t *restrict x, pthread_mutex_t *restrict lock)
Definition: lush-pthread.h:216
#define atomic_fetch_add_explicit(object, operand, order)
Definition: stdatomic.h:366
static atomic_pthread_spinlock_t * lushPthr_spinUnlock_pre(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
static void lushPthr_spinLock_post_ty1(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:241
static lushPtr_SyncObjData_t * lushPthr_demandCachedSyncObjData(lushPthr_t *restrict pthr, void *restrict syncObj)
Definition: lush-pthread.h:774
#define atomic_exchange_explicit(object, desired, order)
Definition: stdatomic.h:357
static void lushPthr_condwait_post_ty3(lushPthr_t *x)
static void lushPthr_mutexLock_pre(lushPthr_t *restrict x, pthread_mutex_t *restrict lock)
static void lushPthr_spinTrylock_post(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock, int result)
static void lushPthr_condwait_post_ty2(lushPthr_t *x)
Definition: lush-pthread.h:562
static void lushPthr_mutexUnlock_post_ty3(lushPthr_t *restrict x, pthread_mutex_t *restrict lock)
Definition: lush-pthread.h:930
static void lushPthr_thread_init_ty1(lushPthr_t *x)
Definition: lush-pthread.h:178
static lushPtr_SyncObjData_t * lushPthr_demandSyncObjData_ps(lushPthr_t *restrict x, void *restrict syncObj)
Definition: lush-pthread.h:735
#define lushPthr_LockValMax
Definition: lush-pthread.h:580
cct_node_t * node
Definition: cct.c:128
static void lushPthr_condwait_pre_ty2(lushPthr_t *x)
Definition: lush-pthread.h:542
static void lushPthr_spinTrylock_post_ty2(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:503
static void lushPthr_mutexTrylock_post_ty3(lushPthr_t *restrict x, pthread_mutex_t *restrict lock)
Definition: lush-pthread.h:922
static lushPtr_SyncObjData_t * lushPthr_demandSyncObjData(lushPthr_t *restrict x, void *restrict syncObj)
Definition: lush-pthread.h:756
static void lushPthr_spinDestroy_post(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
#define atomic_load_explicit(object, order)
Definition: stdatomic.h:378
cct_node_t * sample_node
Definition: sample_event.h:96
static void cct_metric_data_increment(int metric_id, cct_node_t *x, cct_metric_data_t incr)
Definition: cct2metrics.h:86
static void lushPthr_condwait_pre(lushPthr_t *x)
#define atomic_store_explicit(object, desired, order)
Definition: stdatomic.h:380
mcs_lock_t lock
void lushPthr_init(lushPthr_t *x)
Definition: lush-pthread.c:190
static BalancedTreeNode_t * BalancedTree_find(BalancedTree_t *tree, void *key)
Definition: BalancedTree.h:159
static lushPtr_SyncObjData_t * lushPthr_getSyncDataPointer(atomic_pthread_spinlock_t lockval)
Definition: lush-pthread.h:596
#define lushPthr_maxValueOfLock
Definition: lush-pthread.h:584
static void lushPthr_spinUnlock_post_ty3(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
static atomic_pthread_spinlock_t * lushPthr_spinTrylock_pre_ty3(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:973
static bool lushPthr_isDirectlyInCond(lushPthr_t *x)
Definition: lush-pthread.h:319
static void lushPthr_mutexLock_post(lushPthr_t *restrict x, pthread_mutex_t *restrict lock)
#define LUSH_PTHR_FN(FN)
Definition: lush-pthread.h:121
BalancedTreeNode_t * BalancedTree_insert(BalancedTree_t *tree, void *key)
Definition: BalancedTree.c:118
static lushPtr_SyncObjData_t * lushPthr_freelstDeq(lushPthr_t *pthr)
Definition: lush-pthread.h:640
static atomic_pthread_spinlock_t * lushPthr_spinDestroy_pre_ty1(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:281
static int32_t lushPthr_makeSyncDataPointer(lushPtr_SyncObjData_t *data)
Definition: lush-pthread.h:603
static atomic_pthread_spinlock_t * lushPthr_spinDestroy_pre(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
static void lushPthr_spinLock_post_ty2(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:487
static void lushPthr_spinUnlock_post_ty1(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:273
static lushPtr_SyncObjData_t * lushPthr_demandCachedSyncObjData_spin(lushPthr_t *restrict pthr, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:723
static atomic_pthread_spinlock_t * lushPthr_spinUnlock_pre_ty2(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:511
static void lushPthr_thread_fini_ty1(lushPthr_t *x)
Definition: lush-pthread.h:185
static void lushPthr_spinDestroy_post_ty3(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
static void lushPthr_unlock_ty2(lushPthr_t *x)
Definition: lush-pthread.h:425
static void lushPthr_mutexLock_post_ty1(lushPthr_t *restrict x, pthread_mutex_t *restrict lock)
Definition: lush-pthread.h:204
static atomic_pthread_spinlock_t * lushPthr_spinDestroy_pre_ty2(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:527
static void lushPthr_thread_fini_ty3(lushPthr_t *x)
Definition: lush-pthread.h:875
void lushPthr_dump(lushPthr_t *x, const char *nm, void *lock)
Definition: lush-pthread.c:225
static void lushPthr_lock_post_ty2(lushPthr_t *x)
Definition: lush-pthread.h:387
static atomic_pthread_spinlock_t * lushPthr_spinTrylock_pre_ty1(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:249
static void lushPthr_mutexUnlock_post(lushPthr_t *restrict x, pthread_mutex_t *restrict lock)
static void lushPthr_spinTrylock_post_ty3(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:983
void lushPthr_processInit()
Definition: lush-pthread.c:147
static int lushPthr_spin_lock(atomic_pthread_spinlock_t *lock)
Definition: lush-pthread.h:788
static void lushPthr_mutexLock_post_ty2(lushPthr_t *restrict x, pthread_mutex_t *restrict lock)
Definition: lush-pthread.h:454
static cct_node_t * lushPthr_attribToCallPath(uint64_t idlenessIncr)
Definition: lush-pthread.h:158
static void lushPthr_mutexUnlock_post_ty1(lushPthr_t *restrict x, pthread_mutex_t *restrict lock)
Definition: lush-pthread.h:224
static void lushPthr_spinDestroy_post_ty1(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:289
static int time_getTimeReal(uint64_t *time)
Definition: timer.h:131
static void lushPthr_condwait_post_ty1(lushPthr_t *x)
Definition: lush-pthread.h:304
static void lushPthr_mutexUnlock_post_ty2(lushPthr_t *restrict x, pthread_mutex_t *restrict lock)
Definition: lush-pthread.h:470
int metric_idleness
Definition: lush.h:96
static bool lushPthr_isWorking_cond(lushPthr_t *x)
Definition: lush-pthread.h:333
static void lushPthr_mutexLock_pre_ty3(lushPthr_t *restrict x, pthread_mutex_t *restrict lock)
Definition: lush-pthread.h:882
static atomic_pthread_spinlock_t * lushPthr_spinTrylock_pre(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
static void lushPthr_thread_init(lushPthr_t *x)
lushPthr_mem_ptr_t lushPthr_mem_ptr
Definition: lush-pthread.c:97
static void lushPthr_begSmplIdleness(lushPthr_t *x)
Definition: lush-pthread.h:130
static void lushPthr_mutexLock_post_ty3(lushPthr_t *restrict x, pthread_mutex_t *restrict lock)
Definition: lush-pthread.h:900
static void lushPthr_condwait_post(lushPthr_t *x)
#define NULL
Definition: ElfHelper.cpp:85
static void lushPthr_mutexLock_pre_ty1(lushPthr_t *restrict x, pthread_mutex_t *restrict lock)
Definition: lush-pthread.h:192
static void lushPthr_thread_init_ty2(lushPthr_t *x)
Definition: lush-pthread.h:341
static void lushPthr_trylock_ty2(lushPthr_t *x)
Definition: lush-pthread.h:406
static int lushPthr_spin_trylock(atomic_pthread_spinlock_t *lock)
Definition: lush-pthread.h:819
static void lushPthr_mutexTrylock_post(lushPthr_t *restrict x, pthread_mutex_t *restrict lock, int result)
static void lushPthr_thread_fini(lushPthr_t *x)
static void lushPthr_mutexLock_pre_ty2(lushPthr_t *restrict x, pthread_mutex_t *restrict lock)
Definition: lush-pthread.h:446
Definition: cct.c:96
static atomic_pthread_spinlock_t * lushPthr_spinLock_pre_ty1(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:232
static void lushPthr_thread_init_ty3(lushPthr_t *x)
Definition: lush-pthread.h:868
void * lushPthr_mem_beg
Definition: lush-pthread.c:94
static void lushPthr_condwait_pre_ty1(lushPthr_t *x)
Definition: lush-pthread.h:296
#define LUSH_PTHR_SYNC_SMPL_PERIOD
Definition: lush-pthread.h:127
static void lushPthr_spinUnlock_post(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
static void lushPthr_mutexTrylock_post_ty2(lushPthr_t *restrict x, pthread_mutex_t *restrict lock)
Definition: lush-pthread.h:462
static void lushPthr_spinUnlock_post_ty2(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:519
static atomic_pthread_spinlock_t * lushPthr_spinTrylock_pre_ty2(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:495
static bool lushPthr_isSyncDataPointer(pthread_spinlock_t lockval)
Definition: lush-pthread.h:589
static lushPtr_SyncObjData_t * lushPthr_demandSyncObjData_spin(lushPthr_t *restrict pthr, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:689
static lushPtr_SyncObjData_t * lushPthr_makeSyncObjData_spin(lushPthr_t *restrict pthr, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:666
static void lushPthr_spinLock_post_ty3(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:964
lush_agent_pool_t * lush_agents
#define lushPthr_DestroyVal
Definition: lush-pthread.h:582
static int hpcrun_safe_enter(void)
static bool lushPthr_isWorking_lock(lushPthr_t *x)
Definition: lush-pthread.h:326
static atomic_pthread_spinlock_t * lushPthr_spinDestroy_pre_ty3(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
#define lushPthr_UnlckVal
Definition: lush-pthread.h:581
int metric_time
Definition: lush.h:95
static atomic_pthread_spinlock_t * lushPthr_spinLock_pre_ty2(lushPthr_t *restrict x, atomic_pthread_spinlock_t *restrict lock)
Definition: lush-pthread.h:478