74 #define INDEX_MASK ((N)-1) 76 #define LOSSLESS_BLAME 78 #ifdef BLAME_MAP_LOCKING 83 if (fetch_and_store_i64(&thelock, 1) == 0) break; \ 84 while (thelock == 1); \ 88 #define do_unlock() thelock = 0 110 atomic_uint_least64_t
all;
131 return ((uint32_t) ((uint64_t)obj) >> 2);
169 for(i = 0; i <
N; i++) {
178 uint64_t obj, uint32_t metric_value)
183 assert(index >= 0 && index <
N);
190 #ifdef LOSSLESS_BLAME 194 uint64_t testoldall = oldall;
198 == testoldall)
break;
201 table[index].
all = oldval.
all;
207 #ifdef LOSSLESS_BLAME 209 uint64_t testoldall = oldall;
212 == testoldall)
break;
215 table[index].
all = newval;
220 EMSG(
"leaked blame %d\n", metric_value);
235 static uint64_t zero = 0;
240 assert(index >= 0 && index <
N);
246 #ifdef LOSSLESS_BLAME 248 uint64_t testoldall = oldall;
251 != testoldall)
continue;
253 table[index].
all = 0;
#define atomic_load_explicit(object, order)
void blame_map_init(blame_entry_t table[])
uint64_t blame_map_entry(uint64_t obj, uint32_t metric_value)
static uint64_t volatile thelock
#define atomic_compare_exchange_strong_explicit(object, expected, desired, success, failure)
uint64_t blame_map_get_blame(blame_entry_t table[], uint64_t obj)
uint32_t blame_map_obj_id(uint64_t obj)
void blame_map_add_blame(blame_entry_t table[], uint64_t obj, uint32_t metric_value)
atomic_uint_least64_t all
void * hpcrun_malloc(size_t size)
uint32_t blame_map_hash(uint64_t obj)
blame_entry_t * blame_map_new(void)