13 #include <linux/bitops.h> 14 #include <api/fs/fs.h> 15 #include <api/fs/tracing_path.h> 16 #include <traceevent/event-parse.h> 17 #include <linux/hw_breakpoint.h> 18 #include <linux/perf_event.h> 19 #include <linux/compiler.h> 20 #include <linux/err.h> 21 #include <sys/ioctl.h> 22 #include <sys/resource.h> 23 #include <sys/types.h> 92 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 100 for (i = 0; i < 64; i++) {
101 if (mask & (1ULL << i))
122 if (sample_type & PERF_SAMPLE_IDENTIFIER)
125 if (!(sample_type & PERF_SAMPLE_ID))
128 if (sample_type & PERF_SAMPLE_IP)
131 if (sample_type & PERF_SAMPLE_TID)
134 if (sample_type & PERF_SAMPLE_TIME)
137 if (sample_type & PERF_SAMPLE_ADDR)
155 if (sample_type & PERF_SAMPLE_IDENTIFIER)
158 if (!(sample_type & PERF_SAMPLE_ID))
161 if (sample_type & PERF_SAMPLE_CPU)
164 if (sample_type & PERF_SAMPLE_STREAM_ID)
177 enum perf_event_sample_format bit)
179 if (!(evsel->
attr.sample_type & bit)) {
180 evsel->
attr.sample_type |= bit;
187 enum perf_event_sample_format bit)
189 if (evsel->
attr.sample_type & bit) {
190 evsel->
attr.sample_type &= ~bit;
197 bool can_sample_identifier)
199 if (can_sample_identifier) {
205 evsel->
attr.read_format |= PERF_FORMAT_ID;
218 #define FUNCTION_EVENT "ftrace:function" 220 return evsel->
name &&
223 #undef FUNCTION_EVENT 227 struct perf_event_attr *
attr,
int idx)
237 INIT_LIST_HEAD(&evsel->
node);
258 evsel->
attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
259 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
260 evsel->
attr.sample_period = 1;
273 struct perf_event_attr attr = {
274 .type = PERF_TYPE_HARDWARE,
275 .config = PERF_COUNT_HW_CPU_CYCLES,
290 attr.sample_period = 1;
297 attr.sample_period = 0;
304 if (asprintf(&evsel->
name,
"cycles%s%s%.*s",
305 (attr.precise_ip || attr.exclude_kernel) ?
":" :
"",
306 attr.exclude_kernel ?
"u" :
"",
307 attr.precise_ip ? attr.precise_ip + 1 : 0,
"ppp") < 0)
328 struct perf_event_attr attr = {
329 .type = PERF_TYPE_TRACEPOINT,
330 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
331 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
334 if (asprintf(&evsel->
name,
"%s:%s", sys, name) < 0)
345 attr.sample_period = 1;
366 "stalled-cycles-frontend",
367 "stalled-cycles-backend",
373 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
374 return perf_evsel__hw_names[
config];
376 return "unknown-hardware";
381 int colon = 0, r = 0;
382 struct perf_event_attr *
attr = &evsel->
attr;
383 bool exclude_guest_default =
false;
385 #define MOD_PRINT(context, mod) do { \ 386 if (!attr->exclude_##context) { \ 387 if (!colon) colon = ++r; \ 388 r += scnprintf(bf + r, size - r, "%c", mod); \ 391 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
395 exclude_guest_default =
true;
398 if (attr->precise_ip) {
401 r += scnprintf(bf + r, size - r,
"%.*s", attr->precise_ip,
"ppp");
402 exclude_guest_default =
true;
405 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
436 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
437 return perf_evsel__sw_names[
config];
438 return "unknown-software";
451 r = scnprintf(bf, size,
"mem:0x%" PRIx64
":", addr);
453 if (type & HW_BREAKPOINT_R)
454 r += scnprintf(bf + r, size - r,
"r");
456 if (type & HW_BREAKPOINT_W)
457 r += scnprintf(bf + r, size - r,
"w");
459 if (type & HW_BREAKPOINT_X)
460 r += scnprintf(bf + r, size - r,
"x");
467 struct perf_event_attr *
attr = &evsel->
attr;
474 {
"L1-dcache",
"l1-d",
"l1d",
"L1-data", },
475 {
"L1-icache",
"l1-i",
"l1i",
"L1-instruction", },
477 {
"dTLB",
"d-tlb",
"Data-TLB", },
478 {
"iTLB",
"i-tlb",
"Instruction-TLB", },
479 {
"branch",
"branches",
"bpu",
"btb",
"bpc", },
485 {
"load",
"loads",
"read", },
486 {
"store",
"stores",
"write", },
487 {
"prefetch",
"prefetches",
"speculative-read",
"speculative-load", },
492 {
"refs",
"Reference",
"ops",
"access", },
493 {
"misses",
"miss", },
496 #define C(x) PERF_COUNT_HW_CACHE_##x 497 #define CACHE_READ (1 << C(OP_READ)) 498 #define CACHE_WRITE (1 << C(OP_WRITE)) 499 #define CACHE_PREFETCH (1 << C(OP_PREFETCH)) 500 #define COP(x) (1 << x) 519 if (perf_evsel__hw_cache_stat[type] &
COP(op))
526 char *bf,
size_t size)
540 u8 op,
result, type = (config >> 0) & 0xff;
541 const char *
err =
"unknown-ext-hardware-cache-type";
543 if (type >= PERF_COUNT_HW_CACHE_MAX)
546 op = (config >> 8) & 0xff;
547 err =
"unknown-ext-hardware-cache-op";
548 if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
551 result = (config >> 16) & 0xff;
552 err =
"unknown-ext-hardware-cache-result";
553 if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
556 err =
"invalid-cache";
562 return scnprintf(bf, size,
"%s", err);
573 int ret = scnprintf(bf, size,
"raw 0x%" PRIx64, evsel->
attr.config);
584 switch (evsel->
attr.type) {
589 case PERF_TYPE_HARDWARE:
593 case PERF_TYPE_HW_CACHE:
597 case PERF_TYPE_SOFTWARE:
601 case PERF_TYPE_TRACEPOINT:
602 scnprintf(bf,
sizeof(bf),
"%s",
"unknown tracepoint");
605 case PERF_TYPE_BREAKPOINT:
610 scnprintf(bf,
sizeof(bf),
"unknown attr type: %d",
615 evsel->
name = strdup(bf);
617 return evsel->
name ?:
"unknown";
642 ret = scnprintf(buf, size,
"%s { ", group_name);
644 ret += scnprintf(buf + ret, size - ret,
"%s",
648 ret += scnprintf(buf + ret, size - ret,
", %s",
652 ret += scnprintf(buf + ret, size - ret,
" }");
662 struct perf_event_attr *
attr = &evsel->
attr;
666 attr->sample_max_stack = param->
max_stack;
670 if (attr->exclude_user) {
671 pr_warning(
"LBR callstack option is only available " 672 "to get user callchain information. " 673 "Falling back to framepointers.\n");
676 attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
677 PERF_SAMPLE_BRANCH_CALL_STACK |
678 PERF_SAMPLE_BRANCH_NO_CYCLES |
679 PERF_SAMPLE_BRANCH_NO_FLAGS;
682 pr_warning(
"Cannot use LBR callstack with branch stack. " 683 "Falling back to framepointers.\n");
691 attr->sample_stack_user = param->
dump_size;
692 attr->exclude_callchain_user = 1;
694 pr_info(
"Cannot use DWARF unwind for function trace event," 695 " falling back to framepointers.\n");
700 pr_info(
"Disabling user space callchains for function trace event.\n");
701 attr->exclude_callchain_user = 1;
717 struct perf_event_attr *
attr = &evsel->
attr;
722 attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
723 PERF_SAMPLE_BRANCH_CALL_STACK);
736 struct perf_event_attr *
attr = &evsel->
attr;
743 const char *callgraph_buf = NULL;
745 list_for_each_entry(term, config_terms, list) {
746 switch (term->
type) {
756 attr->sample_freq = term->
val.
freq;
774 &attr->branch_sample_type);
804 if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
805 bool sample_address =
false;
809 if (callgraph_buf == NULL)
810 callgraph_buf =
"fp";
814 if (callgraph_buf != NULL) {
815 if (!strcmp(callgraph_buf,
"no")) {
821 pr_err(
"per-event callgraph setting for %s failed. " 822 "Apply callgraph global setting for it\n",
827 sample_address =
true;
831 dump_size = round_up(dump_size,
sizeof(u64));
841 if (sample_address) {
844 evsel->
attr.mmap_data = track;
883 struct perf_event_attr *
attr = &evsel->
attr;
889 attr->write_backward = opts->
overwrite ? 1 : 0;
908 attr->read_format |= PERF_FORMAT_GROUP;
917 if (!attr->sample_period || (opts->
user_freq != UINT_MAX ||
922 attr->sample_freq = opts->
freq;
934 attr->sample_freq = 0;
935 attr->sample_period = 0;
936 attr->write_backward = 0;
937 attr->sample_id_all = 0;
941 attr->sample_freq = 0;
944 evsel->
attr.read_format |=
945 PERF_FORMAT_TOTAL_TIME_ENABLED |
946 PERF_FORMAT_TOTAL_TIME_RUNNING |
948 attr->inherit_stat = 1;
953 attr->mmap_data = track;
962 evsel->
attr.exclude_callchain_user = 1;
1002 attr->watermark = 0;
1003 attr->wakeup_events = 1;
1019 attr->namespaces = track;
1022 attr->context_switch = track;
1028 evsel->
attr.read_format |=
1029 PERF_FORMAT_TOTAL_TIME_ENABLED |
1030 PERF_FORMAT_TOTAL_TIME_RUNNING;
1048 attr->enable_on_exec = 1;
1052 attr->enable_on_exec = 0;
1057 attr->use_clockid = 1;
1058 attr->clockid = opts->
clockid;
1065 attr->exclude_kernel = 1;
1066 attr->exclude_user = 0;
1070 attr->exclude_kernel = 0;
1071 attr->exclude_user = 1;
1100 for (cpu = 0; cpu <
ncpus; cpu++) {
1101 for (thread = 0; thread <
nthreads; thread++) {
1102 FD(evsel, cpu, thread) = -1;
1107 return evsel->
fd != NULL ? 0 : -ENOMEM;
1117 int fd =
FD(evsel, cpu, thread),
1118 err = ioctl(fd, ioc, arg);
1131 PERF_EVENT_IOC_SET_FILTER,
1137 char *new_filter = strdup(filter);
1139 if (new_filter != NULL) {
1141 evsel->
filter = new_filter;
1153 if (evsel->
filter == NULL)
1156 if (asprintf(&new_filter, fmt, evsel->
filter, filter) > 0) {
1158 evsel->
filter = new_filter;
1178 PERF_EVENT_IOC_ENABLE,
1185 PERF_EVENT_IOC_DISABLE,
1191 if (ncpus == 0 || nthreads == 0)
1201 evsel->
id =
zalloc(ncpus * nthreads *
sizeof(u64));
1202 if (evsel->
id == NULL) {
1229 list_del(&term->
list);
1240 close(
FD(evsel, cpu, thread));
1241 FD(evsel, cpu, thread) = -1;
1247 assert(list_empty(&evsel->
node));
1248 assert(evsel->
evlist == NULL);
1289 bool scale, s8 *pscaled)
1294 if (count->
run == 0) {
1297 }
else if (count->
run < count->
ena) {
1299 count->
val = (u64)((
double) count->
val * count->
ena / count->
run + 0.5);
1302 count->
ena = count->
run = 0;
1310 u64 read_format = evsel->
attr.read_format;
1311 int entry =
sizeof(u64);
1315 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1316 size +=
sizeof(u64);
1318 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1319 size +=
sizeof(u64);
1321 if (read_format & PERF_FORMAT_ID)
1322 entry +=
sizeof(u64);
1324 if (read_format & PERF_FORMAT_GROUP) {
1326 size +=
sizeof(u64);
1338 memset(count, 0,
sizeof(*count));
1340 if (
FD(evsel, cpu, thread) < 0)
1343 if (
readn(
FD(evsel, cpu, thread), count->
values, size) <= 0)
1375 u64 read_format = leader->
attr.read_format;
1377 u64 nr, ena = 0, run = 0, i;
1384 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1387 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1393 v[0].
value, ena, run);
1395 for (i = 1; i < nr; i++) {
1403 v[i].value, ena, run);
1413 u64 read_format = leader->
attr.read_format;
1417 if (!(read_format & PERF_FORMAT_ID))
1431 if (
FD(leader, cpu, thread) < 0)
1434 if (
readn(
FD(leader, cpu, thread), data, size) <= 0)
1442 u64 read_format = evsel->
attr.read_format;
1444 if (read_format & PERF_FORMAT_GROUP)
1451 int cpu,
int thread,
bool scale)
1454 size_t nv = scale ? 3 : 1;
1456 if (
FD(evsel, cpu, thread) < 0)
1462 if (
readn(
FD(evsel, cpu, thread), &count, nv *
sizeof(u64)) <= 0)
1483 BUG_ON(!leader->
fd);
1485 fd =
FD(leader, cpu, thread);
1498 bool first_bit =
true;
1502 if (value & bits[i].bit) {
1503 buf += scnprintf(buf, size,
"%s%s", first_bit ?
"" :
"|", bits[i].
name);
1506 }
while (bits[++i].
name != NULL);
1511 #define bit_name(n) { PERF_SAMPLE_##n, #n } 1527 #define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n } 1542 #define bit_name(n) { PERF_FORMAT_##n, #n } 1552 #define BUF_SIZE 1024 1554 #define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val)) 1555 #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val)) 1556 #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val)) 1557 #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val) 1558 #define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val) 1559 #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val) 1561 #define PRINT_ATTRn(_n, _f, _p) \ 1565 ret += attr__fprintf(fp, _n, buf, priv);\ 1569 #define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p) 1629 void *priv __maybe_unused)
1631 return fprintf(fp,
" %-32s %s\n", name, val);
1635 int nr_cpus,
int nr_threads,
1638 for (
int cpu = 0; cpu < nr_cpus; cpu++)
1644 int nr_cpus,
int cpu_idx,
1645 int nr_threads,
int thread_idx)
1649 if (cpu_idx >= nr_cpus || thread_idx >= nr_threads)
1653 nr_cpus = pos != evsel ? nr_cpus : cpu_idx;
1668 int nr_cpus,
int cpu,
1686 if (threads->
nr == 1)
1693 if (
update_fds(evsel, nr_cpus, cpu, threads->
nr, thread))
1699 pr_warning(
"WARNING: Ignored open failure for pid %d\n",
1710 enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
1716 static struct cpu_map *empty_cpu_map;
1718 if (empty_cpu_map == NULL) {
1720 if (empty_cpu_map == NULL)
1724 cpus = empty_cpu_map;
1727 if (threads == NULL) {
1730 if (empty_thread_map == NULL) {
1732 if (empty_thread_map == NULL)
1736 threads = empty_thread_map;
1742 nthreads = threads->
nr;
1744 if (evsel->
fd == NULL &&
1753 fallback_missing_features:
1755 evsel->
attr.clockid = CLOCK_MONOTONIC;
1757 evsel->
attr.use_clockid = 0;
1758 evsel->
attr.clockid = 0;
1763 evsel->
attr.mmap2 = 0;
1765 evsel->
attr.exclude_guest = evsel->
attr.exclude_host = 0;
1767 evsel->
attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
1768 PERF_SAMPLE_BRANCH_NO_CYCLES);
1770 evsel->
attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
1773 evsel->
attr.sample_id_all = 0;
1777 fprintf(stderr,
"perf_event_attr:\n");
1782 for (cpu = 0; cpu < cpus->
nr; cpu++) {
1784 for (thread = 0; thread <
nthreads; thread++) {
1792 pr_debug2(
"sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
1793 pid, cpus->
map[cpu], group_fd, flags);
1800 FD(evsel, cpu, thread) = fd;
1819 pr_debug2(
"\nsys_perf_event_open failed, error %d\n",
1826 if (evsel->
bpf_fd >= 0) {
1828 int bpf_fd = evsel->
bpf_fd;
1831 PERF_EVENT_IOC_SET_BPF,
1833 if (
err && errno != EEXIST) {
1834 pr_err(
"failed to attach bpf fd %d: %s\n",
1841 set_rlimit = NO_CHANGE;
1863 if (
err == -EMFILE && set_rlimit < INCREASED_MAX) {
1865 int old_errno = errno;
1867 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
1868 if (set_rlimit == NO_CHANGE)
1869 l.rlim_cur = l.rlim_max;
1871 l.rlim_cur = l.rlim_max + 1000;
1872 l.rlim_max = l.rlim_cur;
1874 if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
1883 if (
err != -EINVAL || cpu > 0 || thread > 0)
1892 pr_debug2(
"switching off write_backward\n");
1897 goto fallback_missing_features;
1900 pr_debug2(
"switching off use_clockid\n");
1901 goto fallback_missing_features;
1904 pr_debug2(
"switching off cloexec flag\n");
1905 goto fallback_missing_features;
1909 goto fallback_missing_features;
1911 (evsel->
attr.exclude_guest || evsel->
attr.exclude_host)) {
1913 pr_debug2(
"switching off exclude_guest, exclude_host\n");
1914 goto fallback_missing_features;
1917 pr_debug2(
"switching off sample_id_all\n");
1918 goto retry_sample_id;
1920 (evsel->
attr.branch_sample_type &
1921 (PERF_SAMPLE_BRANCH_NO_CYCLES |
1922 PERF_SAMPLE_BRANCH_NO_FLAGS))) {
1924 pr_debug2(
"switching off branch sample type no (cycles/flags)\n");
1925 goto fallback_missing_features;
1927 evsel->
attr.inherit &&
1928 (evsel->
attr.read_format & PERF_FORMAT_GROUP) &&
1931 pr_debug2(
"switching off group read\n");
1932 goto fallback_missing_features;
1939 while (--thread >= 0) {
1940 close(
FD(evsel, cpu, thread));
1941 FD(evsel, cpu, thread) = -1;
1944 }
while (--cpu >= 0);
1950 if (evsel->
fd == NULL)
1973 u64 type = evsel->
attr.sample_type;
1974 const u64 *
array =
event->sample.array;
1978 array += ((
event->header.size -
1979 sizeof(
event->header)) /
sizeof(u64)) - 1;
1981 if (type & PERF_SAMPLE_IDENTIFIER) {
1986 if (type & PERF_SAMPLE_CPU) {
1998 if (type & PERF_SAMPLE_STREAM_ID) {
2003 if (type & PERF_SAMPLE_ID) {
2008 if (type & PERF_SAMPLE_TIME) {
2013 if (type & PERF_SAMPLE_TID) {
2030 static inline bool overflow(
const void *endp, u16 max_size,
const void *offset,
2033 return size > max_size || offset + size > endp;
2036 #define OVERFLOW_CHECK(offset, size, max_size) \ 2038 if (overflow(endp, (max_size), (offset), (size))) \ 2042 #define OVERFLOW_CHECK_u64(offset) \ 2043 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64)) 2053 if (sample_size +
sizeof(event->
header) > event->
header.size)
2062 u64 type = evsel->
attr.sample_type;
2065 u16 max_size =
event->header.size;
2066 const void *endp = (
void *)event + max_size;
2075 memset(data, 0,
sizeof(*data));
2079 data->
cpumode =
event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2080 data->
misc =
event->header.misc;
2084 if (event->
header.type != PERF_RECORD_SAMPLE) {
2085 if (!evsel->
attr.sample_id_all)
2090 array =
event->sample.array;
2095 if (type & PERF_SAMPLE_IDENTIFIER) {
2100 if (type & PERF_SAMPLE_IP) {
2105 if (type & PERF_SAMPLE_TID) {
2119 if (type & PERF_SAMPLE_TIME) {
2124 if (type & PERF_SAMPLE_ADDR) {
2129 if (type & PERF_SAMPLE_ID) {
2134 if (type & PERF_SAMPLE_STREAM_ID) {
2139 if (type & PERF_SAMPLE_CPU) {
2152 if (type & PERF_SAMPLE_PERIOD) {
2157 if (type & PERF_SAMPLE_READ) {
2158 u64 read_format = evsel->
attr.read_format;
2161 if (read_format & PERF_FORMAT_GROUP)
2168 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2174 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2181 if (read_format & PERF_FORMAT_GROUP) {
2182 const u64 max_group_nr = UINT64_MAX /
2192 array = (
void *)array + sz;
2201 const u64 max_callchain_nr = UINT64_MAX /
sizeof(u64);
2209 array = (
void *)array + sz;
2212 if (type & PERF_SAMPLE_RAW) {
2236 array = (
void *)array +
sizeof(u32);
2240 array = (
void *)array + data->
raw_size;
2243 if (type & PERF_SAMPLE_BRANCH_STACK) {
2244 const u64 max_branch_nr = UINT64_MAX /
2254 array = (
void *)array + sz;
2257 if (type & PERF_SAMPLE_REGS_USER) {
2263 u64 mask = evsel->
attr.sample_regs_user;
2265 sz = hweight_long(mask) *
sizeof(u64);
2269 array = (
void *)array + sz;
2273 if (type & PERF_SAMPLE_STACK_USER) {
2285 array = (
void *)array + sz;
2289 "user stack dump failure\n"))
2294 if (type & PERF_SAMPLE_WEIGHT) {
2300 if (type & PERF_SAMPLE_DATA_SRC) {
2306 if (type & PERF_SAMPLE_TRANSACTION) {
2313 if (type & PERF_SAMPLE_REGS_INTR) {
2318 if (data->
intr_regs.
abi != PERF_SAMPLE_REGS_ABI_NONE) {
2319 u64 mask = evsel->
attr.sample_regs_intr;
2321 sz = hweight_long(mask) *
sizeof(u64);
2325 array = (
void *)array + sz;
2330 if (type & PERF_SAMPLE_PHYS_ADDR) {
2342 u64 type = evsel->
attr.sample_type;
2345 if (!(type & PERF_SAMPLE_TIME))
2348 if (event->
header.type != PERF_RECORD_SAMPLE) {
2353 if (!evsel->
attr.sample_id_all)
2358 *timestamp = data.
time;
2362 array =
event->sample.array;
2367 if (type & PERF_SAMPLE_IDENTIFIER)
2370 if (type & PERF_SAMPLE_IP)
2373 if (type & PERF_SAMPLE_TID)
2376 if (type & PERF_SAMPLE_TIME)
2377 *timestamp = *
array;
2387 if (type & PERF_SAMPLE_IDENTIFIER)
2388 result +=
sizeof(u64);
2390 if (type & PERF_SAMPLE_IP)
2391 result +=
sizeof(u64);
2393 if (type & PERF_SAMPLE_TID)
2394 result +=
sizeof(u64);
2396 if (type & PERF_SAMPLE_TIME)
2397 result +=
sizeof(u64);
2399 if (type & PERF_SAMPLE_ADDR)
2400 result +=
sizeof(u64);
2402 if (type & PERF_SAMPLE_ID)
2403 result +=
sizeof(u64);
2405 if (type & PERF_SAMPLE_STREAM_ID)
2406 result +=
sizeof(u64);
2408 if (type & PERF_SAMPLE_CPU)
2409 result +=
sizeof(u64);
2411 if (type & PERF_SAMPLE_PERIOD)
2412 result +=
sizeof(u64);
2414 if (type & PERF_SAMPLE_READ) {
2415 result +=
sizeof(u64);
2416 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2417 result +=
sizeof(u64);
2418 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2419 result +=
sizeof(u64);
2421 if (read_format & PERF_FORMAT_GROUP) {
2426 result +=
sizeof(u64);
2430 if (type & PERF_SAMPLE_CALLCHAIN) {
2435 if (type & PERF_SAMPLE_RAW) {
2436 result +=
sizeof(u32);
2440 if (type & PERF_SAMPLE_BRANCH_STACK) {
2446 if (type & PERF_SAMPLE_REGS_USER) {
2448 result +=
sizeof(u64);
2452 result +=
sizeof(u64);
2456 if (type & PERF_SAMPLE_STACK_USER) {
2458 result +=
sizeof(u64);
2461 result +=
sizeof(u64);
2465 if (type & PERF_SAMPLE_WEIGHT)
2466 result +=
sizeof(u64);
2468 if (type & PERF_SAMPLE_DATA_SRC)
2469 result +=
sizeof(u64);
2471 if (type & PERF_SAMPLE_TRANSACTION)
2472 result +=
sizeof(u64);
2474 if (type & PERF_SAMPLE_REGS_INTR) {
2476 result +=
sizeof(u64);
2480 result +=
sizeof(u64);
2484 if (type & PERF_SAMPLE_PHYS_ADDR)
2485 result +=
sizeof(u64);
2502 array =
event->sample.array;
2504 if (type & PERF_SAMPLE_IDENTIFIER) {
2505 *array = sample->
id;
2509 if (type & PERF_SAMPLE_IP) {
2510 *array = sample->
ip;
2514 if (type & PERF_SAMPLE_TID) {
2521 if (type & PERF_SAMPLE_TIME) {
2522 *array = sample->
time;
2526 if (type & PERF_SAMPLE_ADDR) {
2527 *array = sample->
addr;
2531 if (type & PERF_SAMPLE_ID) {
2532 *array = sample->
id;
2536 if (type & PERF_SAMPLE_STREAM_ID) {
2541 if (type & PERF_SAMPLE_CPU) {
2548 if (type & PERF_SAMPLE_PERIOD) {
2553 if (type & PERF_SAMPLE_READ) {
2554 if (read_format & PERF_FORMAT_GROUP)
2560 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2565 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2571 if (read_format & PERF_FORMAT_GROUP) {
2575 array = (
void *)array + sz;
2582 if (type & PERF_SAMPLE_CALLCHAIN) {
2585 array = (
void *)array + sz;
2588 if (type & PERF_SAMPLE_RAW) {
2591 array = (
void *)array +
sizeof(u32);
2594 array = (
void *)array + sample->
raw_size;
2597 if (type & PERF_SAMPLE_BRANCH_STACK) {
2601 array = (
void *)array + sz;
2604 if (type & PERF_SAMPLE_REGS_USER) {
2609 array = (
void *)array + sz;
2615 if (type & PERF_SAMPLE_STACK_USER) {
2620 array = (
void *)array + sz;
2625 if (type & PERF_SAMPLE_WEIGHT) {
2630 if (type & PERF_SAMPLE_DATA_SRC) {
2635 if (type & PERF_SAMPLE_TRANSACTION) {
2640 if (type & PERF_SAMPLE_REGS_INTR) {
2645 array = (
void *)array + sz;
2651 if (type & PERF_SAMPLE_PHYS_ADDR) {
2661 return pevent_find_field(evsel->
tp_format, name);
2673 offset = field->offset;
2675 if (field->flags & FIELD_IS_DYNAMIC) {
2676 offset = *(
int *)(sample->
raw_data + field->offset);
2687 void *ptr = sample->
raw_data + field->offset;
2689 switch (field->size) {
2693 value = *(u16 *)ptr;
2696 value = *(u32 *)ptr;
2699 memcpy(&value, ptr,
sizeof(u64));
2708 switch (field->size) {
2710 return bswap_16(value);
2712 return bswap_32(value);
2714 return bswap_64(value);
2734 char *msg,
size_t msgsize)
2738 if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
2739 evsel->
attr.type == PERF_TYPE_HARDWARE &&
2740 evsel->
attr.config == PERF_COUNT_HW_CPU_CYCLES) {
2749 scnprintf(msg, msgsize,
"%s",
2750 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2752 evsel->
attr.type = PERF_TYPE_SOFTWARE;
2753 evsel->
attr.config = PERF_COUNT_SW_CPU_CLOCK;
2757 }
else if (err == EACCES && !evsel->
attr.exclude_kernel &&
2761 const char *sep =
":";
2764 if (strchr(name,
'/') ||
2768 if (asprintf(&new_name,
"%s%su", name, sep) < 0)
2773 evsel->
name = new_name;
2774 scnprintf(msg, msgsize,
2775 "kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid);
2776 evsel->
attr.exclude_kernel = 1;
2786 size_t len = strlen(name);
2791 dir = opendir(procfs__mountpoint());
2796 while (ret && (d = readdir(dir)) != NULL) {
2801 if ((d->d_type != DT_DIR) ||
2802 !strcmp(
".", d->d_name) ||
2803 !strcmp(
"..", d->d_name))
2806 scnprintf(path,
sizeof(path),
"%s/%s/comm",
2807 procfs__mountpoint(), d->d_name);
2809 if (filename__read_str(path, &data, &size))
2812 ret = strncmp(name, data, len);
2817 return ret ? false :
true;
2821 int err,
char *msg,
size_t size)
2830 printed = scnprintf(msg, size,
2831 "No permission to enable %s event.\n\n",
2834 return scnprintf(msg + printed, size - printed,
2835 "You may not have permission to collect %sstats.\n\n" 2836 "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n" 2837 "which controls use of the performance events system by\n" 2838 "unprivileged users (without CAP_SYS_ADMIN).\n\n" 2839 "The current value is %d:\n\n" 2840 " -1: Allow use of (almost) all events by all users\n" 2841 " Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n" 2842 ">= 0: Disallow ftrace function tracepoint by users without CAP_SYS_ADMIN\n" 2843 " Disallow raw tracepoint access by users without CAP_SYS_ADMIN\n" 2844 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n" 2845 ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN\n\n" 2846 "To make this setting permanent, edit /etc/sysctl.conf too, e.g.:\n\n" 2847 " kernel.perf_event_paranoid = -1\n" ,
2851 return scnprintf(msg, size,
"The %s event is not supported.",
2854 return scnprintf(msg, size,
"%s",
2855 "Too many events are opened.\n" 2856 "Probably the maximum number of open file descriptors has been reached.\n" 2857 "Hint: Try again after reducing the number of events.\n" 2858 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2861 access(
"/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
2862 return scnprintf(msg, size,
2863 "Not enough memory to setup event with callchain.\n" 2864 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n" 2869 return scnprintf(msg, size,
"%s",
2870 "No such device - did you specify an out-of-range profile CPU?");
2873 if (evsel->
attr.sample_period != 0)
2874 return scnprintf(msg, size,
2875 "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
2877 if (evsel->
attr.precise_ip)
2878 return scnprintf(msg, size,
"%s",
2879 "\'precise\' request may not be supported. Try removing 'p' modifier.");
2880 #if defined(__i386__) || defined(__x86_64__) 2881 if (evsel->
attr.type == PERF_TYPE_HARDWARE)
2882 return scnprintf(msg, size,
"%s",
2883 "No hardware sampling interrupt available.\n");
2888 return scnprintf(msg, size,
2889 "The PMU counters are busy/taken by another profiler.\n" 2890 "We found oprofile daemon running, please stop it and try again.");
2894 return scnprintf(msg, size,
"Reading from overwrite event is not supported by this kernel.");
2896 return scnprintf(msg, size,
"clockid feature not supported.");
2898 return scnprintf(msg, size,
"wrong clockid (%d).",
clockid);
2904 return scnprintf(msg, size,
2905 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" 2906 "/bin/dmesg | grep -i perf may provide additional information.\n",
2907 err, str_error_r(err, sbuf,
sizeof(sbuf)),
2913 if (evsel && evsel->
evlist)
static void perf_evsel__free_id(struct perf_evsel *evsel)
int perf_evsel__append_tp_filter(struct perf_evsel *evsel, const char *filter)
void event_attr_init(struct perf_event_attr *attr)
static int __open_attr__fprintf(FILE *fp, const char *name, const char *val, void *priv __maybe_unused)
void xyarray__delete(struct xyarray *xy)
int(* attr__fprintf_f)(FILE *, const char *, const char *, void *)
int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format, const struct perf_sample *sample)
static void apply_config_terms(struct perf_evsel *evsel, struct record_opts *opts, bool track)
struct event_format * trace_event__tp_format(const char *sys, const char *name)
const char * perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][PERF_EVSEL__MAX_ALIASES]
#define PRINT_ATTRn(_n, _f, _p)
int thread_map__remove(struct thread_map *threads, int idx)
void mem_bswap_64(void *src, int byte_size)
bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
#define OVERFLOW_CHECK(offset, size, max_size)
union perf_evsel_config_term::@112 val
static unsigned long perf_evsel__hw_cache_stat[C(MAX)]
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, int cpu, int thread, bool scale)
void __weak test_attr__ready(void)
static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
#define OVERFLOW_CHECK_u64(offset)
void perf_counts_values__scale(struct perf_counts_values *count, bool scale, s8 *pscaled)
static void perf_evsel__reset_callgraph(struct perf_evsel *evsel, struct callchain_param *param)
struct cpu_map * cpu_map__dummy_new(void)
#define PERF_FLAG_PID_CGROUP
#define p_branch_sample_type(val)
struct perf_evsel * perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
struct xyarray * sample_id
int perf_evsel__parse_sample_timestamp(struct perf_evsel *evsel, union perf_event *event, u64 *timestamp)
void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, enum perf_event_sample_format bit)
const char * perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][PERF_EVSEL__MAX_ALIASES]
const char * graph_dotted_line
#define p_sample_type(val)
ssize_t readn(int fd, void *buf, size_t n)
static bool perf_evsel__is_bpf_output(struct perf_evsel *evsel)
static int xyarray__max_x(struct xyarray *xy)
static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
struct ip_callchain * callchain
void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, enum perf_event_sample_format bit)
int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
static int __perf_evsel__calc_id_pos(u64 sample_type)
int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, struct perf_counts_values *count)
#define pr_debug2(fmt,...)
void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts, struct callchain_param *callchain)
enum perf_call_graph_mode record_mode
static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
void perf_evsel__delete(struct perf_evsel *evsel)
const char * perf_evsel__hw_names[PERF_COUNT_HW_MAX]
int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr, attr__fprintf_f attr__fprintf, void *priv)
static int term(yyscan_t scanner, int type)
static const char * __perf_evsel__hw_name(u64 config)
#define perf_evsel__set_sample_bit(evsel, bit)
static int sys_perf_event_open(struct perf_event_attr *attr, pid_t pid, int cpu, int group_fd, unsigned long flags)
struct thread_map * thread_map__new_by_tid(pid_t tid)
static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused)
int perf_evsel__disable(struct perf_evsel *evsel)
static bool target__none(struct target *target)
static int perf_evsel__read_size(struct perf_evsel *evsel)
static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
x86 movsq based memset() in arch/x86/lib/memset_64.S") MEMSET_FN(memset_erms
struct sample_read::@79::@81 group
struct sample_read_value one
void(* fini)(struct perf_evsel *evsel)
static void __p_read_format(char *buf, size_t size, u64 value)
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
struct xyarray * xyarray__new(int xlen, int ylen, size_t entry_size)
#define PERF_EVSEL__MAX_ALIASES
int perf_evsel__append_addr_filter(struct perf_evsel *evsel, const char *filter)
bool perf_evsel__fallback(struct perf_evsel *evsel, int err, char *msg, size_t msgsize)
bool ignore_missing_thread
int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
static bool overflow(const void *endp, u16 max_size, const void *offset, u64 size)
int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size)
void cpu_map__put(struct cpu_map *map)
int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
const char * perf_evsel__sw_names[PERF_COUNT_SW_MAX]
struct perf_evlist * evlist
#define p_read_format(val)
static bool target__has_cpu(struct target *target)
#define PRINT_ATTRf(_f, _p)
static void perf_evsel__free_config_terms(struct perf_evsel *evsel)
static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
bool cmdline_group_boundary
#define evlist__for_each_entry(evlist, evsel)
int parse_callchain_record(const char *arg, struct callchain_param *param)
int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
struct perf_evsel ** metric_events
int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter)
static int perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread)
static bool find_process(const char *name)
static unsigned int nthreads
static int entry(u64 ip, struct unwind_info *ui)
static int perf_evsel__read_one(struct perf_evsel *evsel, int cpu, int thread)
static void perf_evsel__remove_fd(struct perf_evsel *pos, int nr_cpus, int nr_threads, int thread_idx)
struct regs_dump user_regs
struct perf_counts_values aggr
struct perf_counts * prev_raw_counts
static int __perf_evsel__calc_is_pos(u64 sample_type)
struct event_format * tp_format
static void perf_evsel__no_extra_fini(struct perf_evsel *evsel __maybe_unused)
static int update_fds(struct perf_evsel *evsel, int nr_cpus, int cpu_idx, int nr_threads, int thread_idx)
#define PERF_MEM_DATA_SRC_NONE
static void __p_branch_sample_type(char *buf, size_t size, u64 value)
struct perf_evsel * perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
void perf_evsel__close(struct perf_evsel *evsel)
struct regs_dump intr_regs
struct perf_counts * counts
struct branch_stack * branch_stack
u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, const char *name)
struct list_head config_terms
int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, int err, char *msg, size_t size)
x86 movsq based memcpy() in arch/x86/lib/memcpy_64.S") MEMCPY_FN(memcpy_erms
struct cpu_map * own_cpus
int perf_evsel__read_counter(struct perf_evsel *evsel, int cpu, int thread)
struct thread_map * threads
struct perf_env * perf_evsel__env(struct perf_evsel *evsel)
int perf_event_paranoid(void)
void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
const char * perf_evsel__name(struct perf_evsel *evsel)
void perf_evsel__exit(struct perf_evsel *evsel)
static struct perf_evsel * perf_evsel__new(struct perf_event_attr *attr)
static int perf_event__check_size(union perf_event *event, unsigned int sample_size)
static struct rb_root result
struct perf_event_header header
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, struct thread_map *threads)
static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
void perf_evsel__close_fd(struct perf_evsel *evsel)
int parse_branch_str(const char *str, __u64 *mode)
struct strfilter * filter
void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
#define MOD_PRINT(context, mod)
size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
#define for_each_group_member(_evsel, _leader)
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus, int nthreads)
void * perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample, const char *name)
void cgroup__put(struct cgroup *cgrp)
static bool perf_event_can_profile_kernel(void)
bool perf_evsel__is_function_event(struct perf_evsel *evsel)
void perf_evsel__config_callchain(struct perf_evsel *evsel, struct record_opts *opts, struct callchain_param *param)
struct perf_evsel * leader
int perf_evsel__enable(struct perf_evsel *evsel)
struct perf_evsel * perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
bool ignore_missing_thread
void perf_evsel__set_sample_id(struct perf_evsel *evsel, bool can_sample_identifier)
static int perf_evsel__append_filter(struct perf_evsel *evsel, const char *fmt, const char *filter)
static unsigned int ncpus
const char * perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][PERF_EVSEL__MAX_ALIASES]
static struct @111 perf_evsel__object
static bool ignore_missing_thread(struct perf_evsel *evsel, int nr_cpus, int cpu, struct thread_map *threads, int thread, int err)
static int perf_evsel__process_group_data(struct perf_evsel *leader, int cpu, int thread, u64 *data)
static pid_t thread_map__pid(struct thread_map *map, int thread)
static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel, const union perf_event *event, struct perf_sample *sample)
void thread_map__put(struct thread_map *map)
struct perf_evsel * perf_evsel__new_cycles(bool precise)
static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
static void __p_sample_type(char *buf, size_t size, u64 value)
static bool perf_evsel__is_group_leader(const struct perf_evsel *evsel)
#define perf_evsel__reset_sample_bit(evsel, bit)
u64 format_field__intval(struct format_field *field, struct perf_sample *sample, bool needs_swap)
void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread, struct perf_counts_values *count)
static struct perf_counts_values * perf_counts(struct perf_counts *counts, int cpu, int thread)
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, struct perf_sample *data)
struct perf_stat_evsel * stats
int(* init)(struct perf_evsel *evsel)
#define pr_warning(fmt,...)
static int xyarray__max_y(struct xyarray *xy)
static void perf_evsel__free_fd(struct perf_evsel *evsel)
static bool evsel__has_callchain(const struct perf_evsel *evsel)
static const char * __perf_evsel__sw_name(u64 config)
static void __perf_evsel__config_callchain(struct perf_evsel *evsel, struct record_opts *opts, struct callchain_param *param)
struct format_field * perf_evsel__field(struct perf_evsel *evsel, const char *name)
int __perf_evsel__sample_size(u64 sample_type)
#define PERF_FLAG_FD_CLOEXEC
const char * perf_evsel__group_name(struct perf_evsel *evsel)
struct stack_dump user_stack
static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
int sysctl__max_stack(void)
struct perf_event_attr attr
bool record_switch_events
void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr, int idx)
static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ioc, void *arg)
void static void * zalloc(size_t size)
static void perf_evsel__set_count(struct perf_evsel *counter, int cpu, int thread, u64 val, u64 ena, u64 run)
int perf_evsel__object_config(size_t object_size, int(*init)(struct perf_evsel *evsel), void(*fini)(struct perf_evsel *evsel))