49 #include <linux/version.h> 67 #define HPCRUN_OPTION_PERF_COUNT "HPCRUN_PERF_COUNT" 77 #define DEFAULT_THRESHOLD 300 78 #define MAX_BUFFER_LINUX_KERNEL 128 114 #if KERNEL_SAMPLING_ENABLED 180 static int privilege = -1;
187 fscanf(fp,
"%d", &privilege);
234 if (ksyms !=
NULL && pe_paranoid !=
NULL) {
235 fscanf(pe_paranoid,
"%d", &level) ;
237 if (ksyms) fclose(ksyms);
238 if (pe_paranoid) fclose(pe_paranoid);
253 static int initialized = 0;
258 if (perf_rate_file !=
NULL) {
259 fscanf(perf_rate_file,
"%d", &max_sample_rate);
260 fclose(perf_rate_file);
264 return max_sample_rate;
275 static int initialized = 0;
282 const char *val_str = getenv(
"HPCRUN_PERF_COUNT");
283 if (val_str !=
NULL) {
284 TMSG(LINUX_PERF,
"HPCRUN_PERF_COUNT = %s", val_str);
322 #if KERNEL_SAMPLING_ENABLED 326 if (krestrict == 0 && (level == 0 || level == 1)) {
353 #if KERNEL_SAMPLING_ENABLED 365 if (data_aux ==
NULL) {
382 for (
int i = data->
nr - 1; i > 0; i--) {
404 const char *event_name,
405 struct perf_event_attr *attr,
414 unsigned int sample_type = sampletype
415 | PERF_SAMPLE_PERIOD | PERF_SAMPLE_TIME;
417 attr->size =
sizeof(
struct perf_event_attr);
418 attr->freq = (usePeriod ? 0 : 1);
423 if (attr->freq == 1 && threshold >= max_sample_rate) {
424 int our_rate = max_sample_rate - 1;
425 EMSG(
"WARNING: Lowered specified sample rate %d to %d, below max sample rate of %d.",
426 threshold, our_rate, max_sample_rate);
427 attr->sample_period = our_rate;
431 attr->sample_type = sample_type;
432 attr->exclude_kernel =
EXCLUDE;
435 attr->exclude_kernel =
EXCLUDE;
438 #if KERNEL_SAMPLING_ENABLED 445 #if KERNEL_SAMPLING_ENABLED 446 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
449 attr->exclude_kernel =
INCLUDE;
458 switch (precise_ip_type) {
468 precise_ip = precise_ip_type;
471 attr->precise_ip = precise_ip;
int OSUtil_setCustomKernelNameWrap(char *buffer, size_t max_chars)
#define LINUX_KERNEL_NAME
static cct_node_t * perf_split_retained_node(cct_node_t *node)
static uint16_t perf_kernel_lm_id
void MONITOR_EXT_WRAP_NAME() free(void *ptr)
int perf_skid_set_max_precise_ip(struct perf_event_attr *attr)
#define LINUX_PERF_EVENTS_MAX_RATE
static int perf_util_kernel_syms_avail()
#define HPCRUN_DEFAULT_SAMPLE_RATE
#define PERF_EVENT_SKID_ERROR
static void spinlock_unlock(spinlock_t *l)
static int perf_util_get_kptr_restrict()
static struct event_threshold_s default_threshold
#define PERF_EVENT_AUTODETECT_SKID
#define INCLUDE_CALLCHAIN
#define LINUX_KERNEL_KPTR_RESTICT
uint16_t hpcrun_loadModule_add(const char *name)
bool perf_util_is_ksym_available()
cct_node_t * hpcrun_cct_parent(cct_node_t *x)
cct_node_t * perf_util_add_kernel_callchain(cct_node_t *leaf, void *data_aux)
#define EXCLUDE_CALLCHAIN
cct_node_t * hpcrun_cct_insert_addr(cct_node_t *node, cct_addr_t *frm)
static uint16_t perf_get_kernel_lm_id()
#define PERF_EVENT_SKID_ARBITRARY
void perf_util_get_default_threshold(struct event_threshold_s *threshold)
enum threshold_e threshold_type
int perf_skid_parse_event(const char *event_string, char **event_string_without_skidmarks)
static void spinlock_lock(spinlock_t *l)
int perf_util_attr_init(const char *event_name, struct perf_event_attr *attr, bool usePeriod, u64 threshold, u64 sampletype)
#define LINUX_KERNEL_SYMBOL_FILE
int hpcrun_cct_retained(cct_node_t *x)
static enum perf_ksym_e ksym_status
u64 ips[MAX_CALLCHAIN_FRAMES]
static cct_node_t * perf_insert_cct(uint16_t lm_id, cct_node_t *parent, u64 ip)
static int perf_util_get_max_sample_rate()
#define LINUX_PERF_EVENTS_FILE
#define DEFAULT_THRESHOLD
#define MAX_BUFFER_LINUX_KERNEL
static spinlock_t perf_lock
static int const threshold
int hpcrun_extract_threshold(const char *input_string, long *threshold, long default_value)
u64 perf_skid_get_precise_ip(struct perf_event_attr *attr)
#define SPINLOCK_UNLOCKED
static void set_default_threshold()
cct_addr_t * hpcrun_cct_addr(cct_node_t *node)