Linux Perf
intel-bts.c
Go to the documentation of this file.
1 /*
2  * intel-bts.c: Intel Processor Trace support
3  * Copyright (c) 2013-2015, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  */
15 
16 #include <errno.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/bitops.h>
20 #include <linux/log2.h>
21 
22 #include "../../util/cpumap.h"
23 #include "../../util/evsel.h"
24 #include "../../util/evlist.h"
25 #include "../../util/session.h"
26 #include "../../util/util.h"
27 #include "../../util/pmu.h"
28 #include "../../util/debug.h"
29 #include "../../util/tsc.h"
30 #include "../../util/auxtrace.h"
31 #include "../../util/intel-bts.h"
32 
33 #define KiB(x) ((x) * 1024)
34 #define MiB(x) ((x) * 1024 * 1024)
35 #define KiB_MASK(x) (KiB(x) - 1)
36 #define MiB_MASK(x) (MiB(x) - 1)
37 
39  void *ref_buf;
40  size_t ref_offset;
41  bool wrapped;
42 };
43 
45  struct auxtrace_record itr;
49  size_t snapshot_size;
52 };
53 
54 struct branch {
55  u64 from;
56  u64 to;
57  u64 misc;
58 };
59 
60 static size_t
61 intel_bts_info_priv_size(struct auxtrace_record *itr __maybe_unused,
62  struct perf_evlist *evlist __maybe_unused)
63 {
65 }
66 
67 static int intel_bts_info_fill(struct auxtrace_record *itr,
68  struct perf_session *session,
69  struct auxtrace_info_event *auxtrace_info,
70  size_t priv_size)
71 {
72  struct intel_bts_recording *btsr =
73  container_of(itr, struct intel_bts_recording, itr);
74  struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu;
75  struct perf_event_mmap_page *pc;
76  struct perf_tsc_conversion tc = { .time_mult = 0, };
77  bool cap_user_time_zero = false;
78  int err;
79 
80  if (priv_size != INTEL_BTS_AUXTRACE_PRIV_SIZE)
81  return -EINVAL;
82 
83  if (!session->evlist->nr_mmaps)
84  return -EINVAL;
85 
86  pc = session->evlist->mmap[0].base;
87  if (pc) {
88  err = perf_read_tsc_conversion(pc, &tc);
89  if (err) {
90  if (err != -EOPNOTSUPP)
91  return err;
92  } else {
93  cap_user_time_zero = tc.time_mult != 0;
94  }
95  if (!cap_user_time_zero)
96  ui__warning("Intel BTS: TSC not available\n");
97  }
98 
99  auxtrace_info->type = PERF_AUXTRACE_INTEL_BTS;
100  auxtrace_info->priv[INTEL_BTS_PMU_TYPE] = intel_bts_pmu->type;
101  auxtrace_info->priv[INTEL_BTS_TIME_SHIFT] = tc.time_shift;
102  auxtrace_info->priv[INTEL_BTS_TIME_MULT] = tc.time_mult;
103  auxtrace_info->priv[INTEL_BTS_TIME_ZERO] = tc.time_zero;
104  auxtrace_info->priv[INTEL_BTS_CAP_USER_TIME_ZERO] = cap_user_time_zero;
105  auxtrace_info->priv[INTEL_BTS_SNAPSHOT_MODE] = btsr->snapshot_mode;
106 
107  return 0;
108 }
109 
111  struct perf_evlist *evlist,
112  struct record_opts *opts)
113 {
114  struct intel_bts_recording *btsr =
115  container_of(itr, struct intel_bts_recording, itr);
116  struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu;
117  struct perf_evsel *evsel, *intel_bts_evsel = NULL;
118  const struct cpu_map *cpus = evlist->cpus;
119  bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
120 
121  btsr->evlist = evlist;
123 
124  evlist__for_each_entry(evlist, evsel) {
125  if (evsel->attr.type == intel_bts_pmu->type) {
126  if (intel_bts_evsel) {
127  pr_err("There may be only one " INTEL_BTS_PMU_NAME " event\n");
128  return -EINVAL;
129  }
130  evsel->attr.freq = 0;
131  evsel->attr.sample_period = 1;
132  intel_bts_evsel = evsel;
133  opts->full_auxtrace = true;
134  }
135  }
136 
137  if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) {
138  pr_err("Snapshot mode (-S option) requires " INTEL_BTS_PMU_NAME " PMU event (-e " INTEL_BTS_PMU_NAME ")\n");
139  return -EINVAL;
140  }
141 
142  if (!opts->full_auxtrace)
143  return 0;
144 
145  if (opts->full_auxtrace && !cpu_map__empty(cpus)) {
146  pr_err(INTEL_BTS_PMU_NAME " does not support per-cpu recording\n");
147  return -EINVAL;
148  }
149 
150  /* Set default sizes for snapshot mode */
151  if (opts->auxtrace_snapshot_mode) {
152  if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
153  if (privileged) {
154  opts->auxtrace_mmap_pages = MiB(4) / page_size;
155  } else {
156  opts->auxtrace_mmap_pages = KiB(128) / page_size;
157  if (opts->mmap_pages == UINT_MAX)
158  opts->mmap_pages = KiB(256) / page_size;
159  }
160  } else if (!opts->auxtrace_mmap_pages && !privileged &&
161  opts->mmap_pages == UINT_MAX) {
162  opts->mmap_pages = KiB(256) / page_size;
163  }
164  if (!opts->auxtrace_snapshot_size)
165  opts->auxtrace_snapshot_size =
166  opts->auxtrace_mmap_pages * (size_t)page_size;
167  if (!opts->auxtrace_mmap_pages) {
168  size_t sz = opts->auxtrace_snapshot_size;
169 
170  sz = round_up(sz, page_size) / page_size;
171  opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
172  }
173  if (opts->auxtrace_snapshot_size >
174  opts->auxtrace_mmap_pages * (size_t)page_size) {
175  pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
177  opts->auxtrace_mmap_pages * (size_t)page_size);
178  return -EINVAL;
179  }
180  if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
181  pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
182  return -EINVAL;
183  }
184  pr_debug2("Intel BTS snapshot size: %zu\n",
185  opts->auxtrace_snapshot_size);
186  }
187 
188  /* Set default sizes for full trace mode */
189  if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
190  if (privileged) {
191  opts->auxtrace_mmap_pages = MiB(4) / page_size;
192  } else {
193  opts->auxtrace_mmap_pages = KiB(128) / page_size;
194  if (opts->mmap_pages == UINT_MAX)
195  opts->mmap_pages = KiB(256) / page_size;
196  }
197  }
198 
199  /* Validate auxtrace_mmap_pages */
200  if (opts->auxtrace_mmap_pages) {
201  size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
202  size_t min_sz;
203 
204  if (opts->auxtrace_snapshot_mode)
205  min_sz = KiB(4);
206  else
207  min_sz = KiB(8);
208 
209  if (sz < min_sz || !is_power_of_2(sz)) {
210  pr_err("Invalid mmap size for Intel BTS: must be at least %zuKiB and a power of 2\n",
211  min_sz / 1024);
212  return -EINVAL;
213  }
214  }
215 
216  if (intel_bts_evsel) {
217  /*
218  * To obtain the auxtrace buffer file descriptor, the auxtrace event
219  * must come first.
220  */
221  perf_evlist__to_front(evlist, intel_bts_evsel);
222  /*
223  * In the case of per-cpu mmaps, we need the CPU on the
224  * AUX event.
225  */
226  if (!cpu_map__empty(cpus))
227  perf_evsel__set_sample_bit(intel_bts_evsel, CPU);
228  }
229 
230  /* Add dummy event to keep tracking */
231  if (opts->full_auxtrace) {
232  struct perf_evsel *tracking_evsel;
233  int err;
234 
235  err = parse_events(evlist, "dummy:u", NULL);
236  if (err)
237  return err;
238 
239  tracking_evsel = perf_evlist__last(evlist);
240 
241  perf_evlist__set_tracking_event(evlist, tracking_evsel);
242 
243  tracking_evsel->attr.freq = 0;
244  tracking_evsel->attr.sample_period = 1;
245  }
246 
247  return 0;
248 }
249 
251  struct record_opts *opts,
252  const char *str)
253 {
254  struct intel_bts_recording *btsr =
255  container_of(itr, struct intel_bts_recording, itr);
256  unsigned long long snapshot_size = 0;
257  char *endptr;
258 
259  if (str) {
260  snapshot_size = strtoull(str, &endptr, 0);
261  if (*endptr || snapshot_size > SIZE_MAX)
262  return -1;
263  }
264 
265  opts->auxtrace_snapshot_mode = true;
267 
269 
270  return 0;
271 }
272 
273 static u64 intel_bts_reference(struct auxtrace_record *itr __maybe_unused)
274 {
275  return rdtsc();
276 }
277 
279  int idx)
280 {
281  const size_t sz = sizeof(struct intel_bts_snapshot_ref);
282  int cnt = btsr->snapshot_ref_cnt, new_cnt = cnt * 2;
283  struct intel_bts_snapshot_ref *refs;
284 
285  if (!new_cnt)
286  new_cnt = 16;
287 
288  while (new_cnt <= idx)
289  new_cnt *= 2;
290 
291  refs = calloc(new_cnt, sz);
292  if (!refs)
293  return -ENOMEM;
294 
295  memcpy(refs, btsr->snapshot_refs, cnt * sz);
296 
297  btsr->snapshot_refs = refs;
298  btsr->snapshot_ref_cnt = new_cnt;
299 
300  return 0;
301 }
302 
304 {
305  int i;
306 
307  for (i = 0; i < btsr->snapshot_ref_cnt; i++)
308  zfree(&btsr->snapshot_refs[i].ref_buf);
309  zfree(&btsr->snapshot_refs);
310 }
311 
313 {
314  struct intel_bts_recording *btsr =
315  container_of(itr, struct intel_bts_recording, itr);
316 
318  free(btsr);
319 }
320 
322 {
323  struct intel_bts_recording *btsr =
324  container_of(itr, struct intel_bts_recording, itr);
325  struct perf_evsel *evsel;
326 
327  evlist__for_each_entry(btsr->evlist, evsel) {
328  if (evsel->attr.type == btsr->intel_bts_pmu->type)
329  return perf_evsel__disable(evsel);
330  }
331  return -EINVAL;
332 }
333 
335 {
336  struct intel_bts_recording *btsr =
337  container_of(itr, struct intel_bts_recording, itr);
338  struct perf_evsel *evsel;
339 
340  evlist__for_each_entry(btsr->evlist, evsel) {
341  if (evsel->attr.type == btsr->intel_bts_pmu->type)
342  return perf_evsel__enable(evsel);
343  }
344  return -EINVAL;
345 }
346 
347 static bool intel_bts_first_wrap(u64 *data, size_t buf_size)
348 {
349  int i, a, b;
350 
351  b = buf_size >> 3;
352  a = b - 512;
353  if (a < 0)
354  a = 0;
355 
356  for (i = a; i < b; i++) {
357  if (data[i])
358  return true;
359  }
360 
361  return false;
362 }
363 
364 static int intel_bts_find_snapshot(struct auxtrace_record *itr, int idx,
365  struct auxtrace_mmap *mm, unsigned char *data,
366  u64 *head, u64 *old)
367 {
368  struct intel_bts_recording *btsr =
369  container_of(itr, struct intel_bts_recording, itr);
370  bool wrapped;
371  int err;
372 
373  pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
374  __func__, idx, (size_t)*old, (size_t)*head);
375 
376  if (idx >= btsr->snapshot_ref_cnt) {
377  err = intel_bts_alloc_snapshot_refs(btsr, idx);
378  if (err)
379  goto out_err;
380  }
381 
382  wrapped = btsr->snapshot_refs[idx].wrapped;
383  if (!wrapped && intel_bts_first_wrap((u64 *)data, mm->len)) {
384  btsr->snapshot_refs[idx].wrapped = true;
385  wrapped = true;
386  }
387 
388  /*
389  * In full trace mode 'head' continually increases. However in snapshot
390  * mode 'head' is an offset within the buffer. Here 'old' and 'head'
391  * are adjusted to match the full trace case which expects that 'old' is
392  * always less than 'head'.
393  */
394  if (wrapped) {
395  *old = *head;
396  *head += mm->len;
397  } else {
398  if (mm->mask)
399  *old &= mm->mask;
400  else
401  *old %= mm->len;
402  if (*old > *head)
403  *head += mm->len;
404  }
405 
406  pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
407  __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head);
408 
409  return 0;
410 
411 out_err:
412  pr_err("%s: failed, error %d\n", __func__, err);
413  return err;
414 }
415 
416 static int intel_bts_read_finish(struct auxtrace_record *itr, int idx)
417 {
418  struct intel_bts_recording *btsr =
419  container_of(itr, struct intel_bts_recording, itr);
420  struct perf_evsel *evsel;
421 
422  evlist__for_each_entry(btsr->evlist, evsel) {
423  if (evsel->attr.type == btsr->intel_bts_pmu->type)
425  evsel, idx);
426  }
427  return -EINVAL;
428 }
429 
431 {
432  struct perf_pmu *intel_bts_pmu = perf_pmu__find(INTEL_BTS_PMU_NAME);
433  struct intel_bts_recording *btsr;
434 
435  if (!intel_bts_pmu)
436  return NULL;
437 
438  if (setenv("JITDUMP_USE_ARCH_TIMESTAMP", "1", 1)) {
439  *err = -errno;
440  return NULL;
441  }
442 
443  btsr = zalloc(sizeof(struct intel_bts_recording));
444  if (!btsr) {
445  *err = -ENOMEM;
446  return NULL;
447  }
448 
460  btsr->itr.alignment = sizeof(struct branch);
461  return &btsr->itr;
462 }
size_t auxtrace_snapshot_size
Definition: perf.h:75
#define INTEL_BTS_PMU_NAME
Definition: intel-bts.h:19
struct perf_evlist * evlist
Definition: session.h:25
int(* snapshot_start)(struct auxtrace_record *itr)
Definition: auxtrace.h:318
static int intel_bts_find_snapshot(struct auxtrace_record *itr, int idx, struct auxtrace_mmap *mm, unsigned char *data, u64 *head, u64 *old)
Definition: intel-bts.c:364
struct perf_mmap * mmap
Definition: evlist.h:45
size_t len
Definition: auxtrace.h:265
unsigned int page_size
Definition: util.c:40
#define MiB(x)
Definition: intel-bts.c:34
static void intel_bts_recording_free(struct auxtrace_record *itr)
Definition: intel-bts.c:312
dictionary data
Definition: stat-cpi.py:4
int ui__warning(const char *format,...)
Definition: util.c:44
bool full_auxtrace
Definition: perf.h:55
int int err
Definition: 5sec.c:44
static u64 rdtsc(void)
Definition: rdpmc.c:26
int(* snapshot_finish)(struct auxtrace_record *itr)
Definition: auxtrace.h:319
#define pr_debug2(fmt,...)
Definition: debug.h:33
size_t(* info_priv_size)(struct auxtrace_record *itr, struct perf_evlist *evlist)
Definition: auxtrace.h:311
static void intel_bts_free_snapshot_refs(struct intel_bts_recording *btsr)
Definition: intel-bts.c:303
#define perf_evsel__set_sample_bit(evsel, bit)
Definition: evsel.h:260
struct perf_evlist * evlist
Definition: intel-bts.c:47
Definition: cpumap.h:12
static u64 intel_bts_reference(struct auxtrace_record *itr __maybe_unused)
Definition: intel-bts.c:273
int idx
Definition: evsel.h:100
int perf_evsel__disable(struct perf_evsel *evsel)
Definition: evsel.c:1182
u16 time_shift
Definition: tsc.h:10
Definition: pmu.h:22
#define pr_err(fmt,...)
Definition: json.h:21
int parse_events(struct perf_evlist *evlist, const char *str, struct parse_events_error *err)
void perf_evlist__set_tracking_event(struct perf_evlist *evlist, struct perf_evsel *tracking_evsel)
Definition: evlist.c:1697
int(* read_finish)(struct auxtrace_record *itr, int idx)
Definition: auxtrace.h:327
static bool intel_bts_first_wrap(u64 *data, size_t buf_size)
Definition: intel-bts.c:347
static size_t intel_bts_info_priv_size(struct auxtrace_record *itr __maybe_unused, struct perf_evlist *evlist __maybe_unused)
Definition: intel-bts.c:61
unsigned int alignment
Definition: auxtrace.h:328
u64(* reference)(struct auxtrace_record *itr)
Definition: auxtrace.h:326
static int intel_bts_snapshot_start(struct auxtrace_record *itr)
Definition: intel-bts.c:321
static struct perf_session * session
Definition: builtin-lock.c:34
#define evlist__for_each_entry(evlist, evsel)
Definition: evlist.h:247
static bool cpu_map__empty(const struct cpu_map *map)
Definition: cpumap.h:58
void(* free)(struct auxtrace_record *itr)
Definition: auxtrace.h:317
static int intel_bts_recording_options(struct auxtrace_record *itr, struct perf_evlist *evlist, struct record_opts *opts)
Definition: intel-bts.c:110
int(* recording_options)(struct auxtrace_record *itr, struct perf_evlist *evlist, struct record_opts *opts)
Definition: auxtrace.h:308
static int intel_bts_snapshot_finish(struct auxtrace_record *itr)
Definition: intel-bts.c:334
static int str(yyscan_t scanner, int token)
list cpus
Definition: stat-cpi.py:7
unsigned int mmap_pages
Definition: perf.h:67
#define CPU(he)
static int intel_bts_read_finish(struct auxtrace_record *itr, int idx)
Definition: intel-bts.c:416
void perf_evlist__to_front(struct perf_evlist *evlist, struct perf_evsel *move_evsel)
Definition: evlist.c:1680
size_t mask
Definition: auxtrace.h:264
x86 movsq based memcpy() in arch/x86/lib/memcpy_64.S") MEMCPY_FN(memcpy_erms
int(* parse_snapshot_options)(struct auxtrace_record *itr, struct record_opts *opts, const char *str)
Definition: auxtrace.h:323
int perf_event_paranoid(void)
Definition: util.c:388
int nr_mmaps
Definition: evlist.h:32
#define zfree(ptr)
Definition: util.h:25
int(* find_snapshot)(struct auxtrace_record *itr, int idx, struct auxtrace_mmap *mm, unsigned char *data, u64 *head, u64 *old)
Definition: auxtrace.h:320
bool auxtrace_snapshot_mode
Definition: perf.h:56
u64 misc
Definition: intel-bts.c:57
struct perf_pmu * perf_pmu__find(const char *name)
Definition: pmu.c:778
u64 from
Definition: intel-bts.c:55
static int intel_bts_alloc_snapshot_refs(struct intel_bts_recording *btsr, int idx)
Definition: intel-bts.c:278
#define KiB(x)
Definition: intel-bts.c:33
int perf_evsel__enable(struct perf_evsel *evsel)
Definition: evsel.c:1175
struct auxtrace_record * intel_bts_recording_init(int *err)
Definition: intel-bts.c:430
void free(void *)
struct intel_bts_snapshot_ref * snapshot_refs
Definition: intel-bts.c:51
static int intel_bts_info_fill(struct auxtrace_record *itr, struct perf_session *session, struct auxtrace_info_event *auxtrace_info, size_t priv_size)
Definition: intel-bts.c:67
struct cpu_map * cpus
Definition: evlist.h:48
struct auxtrace_record itr
Definition: intel-bts.c:45
__u32 type
Definition: pmu.h:24
static struct perf_evsel * perf_evlist__last(struct perf_evlist *evlist)
Definition: evlist.h:220
void * base
Definition: mmap.h:18
size_t snapshot_size
Definition: intel-bts.c:49
#define INTEL_BTS_AUXTRACE_PRIV_SIZE
Definition: intel-bts.h:31
struct perf_pmu * intel_bts_pmu
Definition: intel-bts.c:46
static int intel_bts_parse_snapshot_options(struct auxtrace_record *itr, struct record_opts *opts, const char *str)
Definition: intel-bts.c:250
unsigned int auxtrace_mmap_pages
Definition: perf.h:68
int(* info_fill)(struct auxtrace_record *itr, struct perf_session *session, struct auxtrace_info_event *auxtrace_info, size_t priv_size)
Definition: auxtrace.h:313
u64 to
Definition: intel-bts.c:56
#define pr_debug3(fmt,...)
Definition: debug.h:34
int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc, struct perf_tsc_conversion *tc)
Definition: tsc.c:12
struct perf_event_attr attr
Definition: evsel.h:93
int perf_evlist__enable_event_idx(struct perf_evlist *evlist, struct perf_evsel *evsel, int idx)
Definition: evlist.c:422
void static void * zalloc(size_t size)
Definition: util.h:20