Merge branch 'stable/for-jens-4.5' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / tools / perf / tests / bpf.c
1 #include <stdio.h>
2 #include <sys/epoll.h>
3 #include <util/bpf-loader.h>
4 #include <util/evlist.h>
5 #include "tests.h"
6 #include "llvm.h"
7 #include "debug.h"
8 #define NR_ITERS       111
9
10 #ifdef HAVE_LIBBPF_SUPPORT
11
12 static int epoll_pwait_loop(void)
13 {
14         int i;
15
16         /* Should fail NR_ITERS times */
17         for (i = 0; i < NR_ITERS; i++)
18                 epoll_pwait(-(i + 1), NULL, 0, 0, NULL);
19         return 0;
20 }
21
22 #ifdef HAVE_BPF_PROLOGUE
23
24 static int llseek_loop(void)
25 {
26         int fds[2], i;
27
28         fds[0] = open("/dev/null", O_RDONLY);
29         fds[1] = open("/dev/null", O_RDWR);
30
31         if (fds[0] < 0 || fds[1] < 0)
32                 return -1;
33
34         for (i = 0; i < NR_ITERS; i++) {
35                 lseek(fds[i % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
36                 lseek(fds[(i + 1) % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
37         }
38         close(fds[0]);
39         close(fds[1]);
40         return 0;
41 }
42
43 #endif
44
45 static struct {
46         enum test_llvm__testcase prog_id;
47         const char *desc;
48         const char *name;
49         const char *msg_compile_fail;
50         const char *msg_load_fail;
51         int (*target_func)(void);
52         int expect_result;
53 } bpf_testcase_table[] = {
54         {
55                 LLVM_TESTCASE_BASE,
56                 "Test basic BPF filtering",
57                 "[basic_bpf_test]",
58                 "fix 'perf test LLVM' first",
59                 "load bpf object failed",
60                 &epoll_pwait_loop,
61                 (NR_ITERS + 1) / 2,
62         },
63 #ifdef HAVE_BPF_PROLOGUE
64         {
65                 LLVM_TESTCASE_BPF_PROLOGUE,
66                 "Test BPF prologue generation",
67                 "[bpf_prologue_test]",
68                 "fix kbuild first",
69                 "check your vmlinux setting?",
70                 &llseek_loop,
71                 (NR_ITERS + 1) / 4,
72         },
73 #endif
74 };
75
76 static int do_test(struct bpf_object *obj, int (*func)(void),
77                    int expect)
78 {
79         struct record_opts opts = {
80                 .target = {
81                         .uid = UINT_MAX,
82                         .uses_mmap = true,
83                 },
84                 .freq         = 0,
85                 .mmap_pages   = 256,
86                 .default_interval = 1,
87         };
88
89         char pid[16];
90         char sbuf[STRERR_BUFSIZE];
91         struct perf_evlist *evlist;
92         int i, ret = TEST_FAIL, err = 0, count = 0;
93
94         struct parse_events_evlist parse_evlist;
95         struct parse_events_error parse_error;
96
97         bzero(&parse_error, sizeof(parse_error));
98         bzero(&parse_evlist, sizeof(parse_evlist));
99         parse_evlist.error = &parse_error;
100         INIT_LIST_HEAD(&parse_evlist.list);
101
102         err = parse_events_load_bpf_obj(&parse_evlist, &parse_evlist.list, obj);
103         if (err || list_empty(&parse_evlist.list)) {
104                 pr_debug("Failed to add events selected by BPF\n");
105                 return TEST_FAIL;
106         }
107
108         snprintf(pid, sizeof(pid), "%d", getpid());
109         pid[sizeof(pid) - 1] = '\0';
110         opts.target.tid = opts.target.pid = pid;
111
112         /* Instead of perf_evlist__new_default, don't add default events */
113         evlist = perf_evlist__new();
114         if (!evlist) {
115                 pr_debug("No ehough memory to create evlist\n");
116                 return TEST_FAIL;
117         }
118
119         err = perf_evlist__create_maps(evlist, &opts.target);
120         if (err < 0) {
121                 pr_debug("Not enough memory to create thread/cpu maps\n");
122                 goto out_delete_evlist;
123         }
124
125         perf_evlist__splice_list_tail(evlist, &parse_evlist.list);
126         evlist->nr_groups = parse_evlist.nr_groups;
127
128         perf_evlist__config(evlist, &opts);
129
130         err = perf_evlist__open(evlist);
131         if (err < 0) {
132                 pr_debug("perf_evlist__open: %s\n",
133                          strerror_r(errno, sbuf, sizeof(sbuf)));
134                 goto out_delete_evlist;
135         }
136
137         err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
138         if (err < 0) {
139                 pr_debug("perf_evlist__mmap: %s\n",
140                          strerror_r(errno, sbuf, sizeof(sbuf)));
141                 goto out_delete_evlist;
142         }
143
144         perf_evlist__enable(evlist);
145         (*func)();
146         perf_evlist__disable(evlist);
147
148         for (i = 0; i < evlist->nr_mmaps; i++) {
149                 union perf_event *event;
150
151                 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
152                         const u32 type = event->header.type;
153
154                         if (type == PERF_RECORD_SAMPLE)
155                                 count ++;
156                 }
157         }
158
159         if (count != expect) {
160                 pr_debug("BPF filter result incorrect\n");
161                 goto out_delete_evlist;
162         }
163
164         ret = TEST_OK;
165
166 out_delete_evlist:
167         perf_evlist__delete(evlist);
168         return ret;
169 }
170
171 static struct bpf_object *
172 prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name)
173 {
174         struct bpf_object *obj;
175
176         obj = bpf__prepare_load_buffer(obj_buf, obj_buf_sz, name);
177         if (IS_ERR(obj)) {
178                 pr_debug("Compile BPF program failed.\n");
179                 return NULL;
180         }
181         return obj;
182 }
183
184 static int __test__bpf(int idx)
185 {
186         int ret;
187         void *obj_buf;
188         size_t obj_buf_sz;
189         struct bpf_object *obj;
190
191         ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
192                                        bpf_testcase_table[idx].prog_id,
193                                        true);
194         if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
195                 pr_debug("Unable to get BPF object, %s\n",
196                          bpf_testcase_table[idx].msg_compile_fail);
197                 if (idx == 0)
198                         return TEST_SKIP;
199                 else
200                         return TEST_FAIL;
201         }
202
203         obj = prepare_bpf(obj_buf, obj_buf_sz,
204                           bpf_testcase_table[idx].name);
205         if (!obj) {
206                 ret = TEST_FAIL;
207                 goto out;
208         }
209
210         ret = do_test(obj,
211                       bpf_testcase_table[idx].target_func,
212                       bpf_testcase_table[idx].expect_result);
213 out:
214         bpf__clear();
215         return ret;
216 }
217
218 int test__bpf_subtest_get_nr(void)
219 {
220         return (int)ARRAY_SIZE(bpf_testcase_table);
221 }
222
223 const char *test__bpf_subtest_get_desc(int i)
224 {
225         if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
226                 return NULL;
227         return bpf_testcase_table[i].desc;
228 }
229
230 int test__bpf(int i)
231 {
232         int err;
233
234         if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
235                 return TEST_FAIL;
236
237         if (geteuid() != 0) {
238                 pr_debug("Only root can run BPF test\n");
239                 return TEST_SKIP;
240         }
241
242         err = __test__bpf(i);
243         return err;
244 }
245
246 #else
247 int test__bpf_subtest_get_nr(void)
248 {
249         return 0;
250 }
251
252 const char *test__bpf_subtest_get_desc(int i __maybe_unused)
253 {
254         return NULL;
255 }
256
257 int test__bpf(int i __maybe_unused)
258 {
259         pr_debug("Skip BPF test because BPF support is not compiled\n");
260         return TEST_SKIP;
261 }
262 #endif