10 #include <linux/bpf.h> 11 #include <linux/filter.h> 12 #include <linux/kernel.h> 13 #include <api/fs/fs.h> 19 #define PERF_TEST_BPF_PATH "/sys/fs/bpf/perf_test" 21 #ifdef HAVE_LIBBPF_SUPPORT 23 static int epoll_pwait_loop(
void)
29 epoll_pwait(-(i + 1), NULL, 0, 0, NULL);
33 #ifdef HAVE_BPF_PROLOGUE 35 static int llseek_loop(
void)
39 fds[0] = open(
"/dev/null", O_RDONLY);
40 fds[1] = open(
"/dev/null", O_RDWR);
42 if (fds[0] < 0 || fds[1] < 0)
46 lseek(fds[i % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
47 lseek(fds[(i + 1) % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
60 const char *msg_compile_fail;
61 const char *msg_load_fail;
62 int (*target_func)(void);
65 } bpf_testcase_table[] = {
68 .desc =
"Basic BPF filtering",
69 .name =
"[basic_bpf_test]",
70 .msg_compile_fail =
"fix 'perf test LLVM' first",
71 .msg_load_fail =
"load bpf object failed",
72 .target_func = &epoll_pwait_loop,
77 .desc =
"BPF pinning",
78 .name =
"[bpf_pinning]",
79 .msg_compile_fail =
"fix kbuild first",
80 .msg_load_fail =
"check your vmlinux setting?",
81 .target_func = &epoll_pwait_loop,
85 #ifdef HAVE_BPF_PROLOGUE
88 .desc =
"BPF prologue generation",
89 .name =
"[bpf_prologue_test]",
90 .msg_compile_fail =
"fix kbuild first",
91 .msg_load_fail =
"check your vmlinux setting?",
92 .target_func = &llseek_loop,
98 .desc =
"BPF relocation checker",
99 .name =
"[bpf_relocation_test]",
100 .msg_compile_fail =
"fix 'perf test LLVM' first",
101 .msg_load_fail =
"libbpf error when dealing with relocation",
105 static int do_test(
struct bpf_object *obj,
int (*
func)(
void),
115 .default_interval = 1,
126 bzero(&parse_error,
sizeof(parse_error));
127 bzero(&parse_state,
sizeof(parse_state));
128 parse_state.
error = &parse_error;
129 INIT_LIST_HEAD(&parse_state.
list);
132 if (err || list_empty(&parse_state.
list)) {
133 pr_debug(
"Failed to add events selected by BPF\n");
137 snprintf(pid,
sizeof(pid),
"%d", getpid());
138 pid[
sizeof(
pid) - 1] =
'\0';
144 pr_debug(
"Not enough memory to create evlist\n");
150 pr_debug(
"Not enough memory to create thread/cpu maps\n");
151 goto out_delete_evlist;
162 str_error_r(errno, sbuf,
sizeof(sbuf)));
163 goto out_delete_evlist;
169 str_error_r(errno, sbuf,
sizeof(sbuf)));
170 goto out_delete_evlist;
177 for (i = 0; i < evlist->
nr_mmaps; i++) {
181 md = &evlist->
mmap[i];
186 const u32
type =
event->header.type;
188 if (type == PERF_RECORD_SAMPLE)
194 if (count != expect) {
195 pr_debug(
"BPF filter result incorrect, expected %d, got %d samples\n", expect, count);
196 goto out_delete_evlist;
206 static struct bpf_object *
207 prepare_bpf(
void *obj_buf,
size_t obj_buf_sz,
const char *
name)
209 struct bpf_object *obj;
213 pr_debug(
"Compile BPF program failed.\n");
219 static int __test__bpf(
int idx)
224 struct bpf_object *obj;
227 bpf_testcase_table[idx].prog_id,
229 if (ret !=
TEST_OK || !obj_buf || !obj_buf_sz) {
230 pr_debug(
"Unable to get BPF object, %s\n",
231 bpf_testcase_table[idx].msg_compile_fail);
238 obj = prepare_bpf(obj_buf, obj_buf_sz,
239 bpf_testcase_table[idx].
name);
240 if ((!!bpf_testcase_table[idx].target_func) != (!!obj)) {
242 pr_debug(
"Fail to load BPF object: %s\n",
243 bpf_testcase_table[idx].msg_load_fail);
245 pr_debug(
"Success unexpectedly: %s\n",
246 bpf_testcase_table[idx].msg_load_fail);
253 bpf_testcase_table[idx].target_func,
254 bpf_testcase_table[idx].expect_result);
257 if (bpf_testcase_table[idx].pin) {
260 if (!bpf_fs__mount()) {
261 pr_debug(
"BPF filesystem not mounted\n");
266 if (err && errno != EEXIST) {
267 pr_debug(
"Failed to make perf_test dir: %s\n",
286 return (
int)ARRAY_SIZE(bpf_testcase_table);
291 if (i < 0 || i >= (
int)ARRAY_SIZE(bpf_testcase_table))
293 return bpf_testcase_table[i].desc;
296 static int check_env(
void)
299 unsigned int kver_int;
302 struct bpf_insn insns[] = {
303 BPF_MOV64_IMM(BPF_REG_0, 1),
309 pr_debug(
"Unable to get kernel version\n");
313 err = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
314 sizeof(insns) /
sizeof(insns[0]),
315 license, kver_int, NULL, 0);
317 pr_err(
"Missing basic BPF support, skip this test: %s\n",
330 if (i < 0 || i >= (
int)ARRAY_SIZE(bpf_testcase_table))
333 if (geteuid() != 0) {
334 pr_debug(
"Only root can run BPF test\n");
341 err = __test__bpf(i);
358 pr_debug(
"Skip BPF test because BPF support is not compiled\n");
int fetch_kernel_version(unsigned int *puint, char *str, size_t str_size)
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
static int do_test(struct perf_evlist *evlist, int mmap_pages, int *sample_count, int *comm_count)
void perf_evlist__enable(struct perf_evlist *evlist)
#define PERF_TEST_BPF_PATH
int test__bpf(struct test *test __maybe_unused, int i __maybe_unused)
void perf_evlist__delete(struct perf_evlist *evlist)
void perf_evlist__splice_list_tail(struct perf_evlist *evlist, struct list_head *list)
int parse_events_load_bpf_obj(struct parse_events_state *parse_state, struct list_head *list, struct bpf_object *obj, struct list_head *head_config)
int rm_rf(const char *path)
int perf_mmap__read_init(struct perf_mmap *map)
#define pr_debug(fmt,...)
int perf_evlist__open(struct perf_evlist *evlist)
struct parse_events_error * error
const char * test__bpf_subtest_get_desc(int i __maybe_unused)
struct bpf_object * bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts, struct callchain_param *callchain)
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
void perf_mmap__read_done(struct perf_mmap *map)
int test_llvm__fetch_bpf_obj(void **p_obj_buf, size_t *p_obj_buf_sz, enum test_llvm__testcase idx, bool force, bool *should_load_fail)
union perf_event * perf_mmap__read_event(struct perf_mmap *map)
void perf_evlist__disable(struct perf_evlist *evlist)
int test__bpf_subtest_get_nr(void)
struct perf_evlist * perf_evlist__new(void)