1 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/bpf.h>
11 #include <linux/filter.h>
12 #include <linux/uaccess.h>
13 #include <linux/ctype.h>
17 * trace_call_bpf - invoke BPF program
19 * @ctx: opaque context pointer
21 * kprobe handlers execute BPF programs via this helper.
22 * Can be used from static tracepoints in the future.
24 * Return: BPF programs always return an integer which is interpreted by
26 * 0 - return from kprobe (event is filtered out)
27 * 1 - store kprobe event into ring buffer
28 * Other values are reserved and currently alias to 1
30 unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
34 if (in_nmi()) /* not supported yet */
39 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
41 * since some bpf program is already running on this cpu,
42 * don't call into another bpf program (same or different)
43 * and don't send kprobe event into ring-buffer,
51 ret = BPF_PROG_RUN(prog, ctx);
55 __this_cpu_dec(bpf_prog_active);
60 EXPORT_SYMBOL_GPL(trace_call_bpf);
62 static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
64 void *dst = (void *) (long) r1;
65 int ret, size = (int) r2;
66 void *unsafe_ptr = (void *) (long) r3;
68 ret = probe_kernel_read(dst, unsafe_ptr, size);
69 if (unlikely(ret < 0))
75 static const struct bpf_func_proto bpf_probe_read_proto = {
76 .func = bpf_probe_read,
78 .ret_type = RET_INTEGER,
79 .arg1_type = ARG_PTR_TO_RAW_STACK,
80 .arg2_type = ARG_CONST_STACK_SIZE,
81 .arg3_type = ARG_ANYTHING,
84 static u64 bpf_probe_write_user(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
86 void *unsafe_ptr = (void *) (long) r1;
87 void *src = (void *) (long) r2;
91 * Ensure we're in user context which is safe for the helper to
92 * run. This helper has no business in a kthread.
94 * access_ok() should prevent writing to non-user memory, but in
95 * some situations (nommu, temporary switch, etc) access_ok() does
96 * not provide enough validation, hence the check on KERNEL_DS.
99 if (unlikely(in_interrupt() ||
100 current->flags & (PF_KTHREAD | PF_EXITING)))
102 if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
104 if (!access_ok(VERIFY_WRITE, unsafe_ptr, size))
107 return probe_kernel_write(unsafe_ptr, src, size);
110 static const struct bpf_func_proto bpf_probe_write_user_proto = {
111 .func = bpf_probe_write_user,
113 .ret_type = RET_INTEGER,
114 .arg1_type = ARG_ANYTHING,
115 .arg2_type = ARG_PTR_TO_STACK,
116 .arg3_type = ARG_CONST_STACK_SIZE,
119 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
121 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
122 current->comm, task_pid_nr(current));
124 return &bpf_probe_write_user_proto;
128 * limited trace_printk()
129 * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed
131 static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5)
133 char *fmt = (char *) (long) r1;
134 bool str_seen = false;
142 * bpf_check()->check_func_arg()->check_stack_boundary()
143 * guarantees that fmt points to bpf program stack,
144 * fmt_size bytes of it were initialized and fmt_size > 0
146 if (fmt[--fmt_size] != 0)
149 /* check format string for allowed specifiers */
150 for (i = 0; i < fmt_size; i++) {
151 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
160 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
165 } else if (fmt[i] == 'p' || fmt[i] == 's') {
168 if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
171 if (fmt[i - 1] == 's') {
173 /* allow only one '%s' per fmt string */
192 strncpy_from_unsafe(buf,
193 (void *) (long) unsafe_addr,
204 if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x')
209 return __trace_printk(1/* fake ip will not be printed */, fmt,
210 mod[0] == 2 ? r3 : mod[0] == 1 ? (long) r3 : (u32) r3,
211 mod[1] == 2 ? r4 : mod[1] == 1 ? (long) r4 : (u32) r4,
212 mod[2] == 2 ? r5 : mod[2] == 1 ? (long) r5 : (u32) r5);
215 static const struct bpf_func_proto bpf_trace_printk_proto = {
216 .func = bpf_trace_printk,
218 .ret_type = RET_INTEGER,
219 .arg1_type = ARG_PTR_TO_STACK,
220 .arg2_type = ARG_CONST_STACK_SIZE,
223 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
226 * this program might be calling bpf_trace_printk,
227 * so allocate per-cpu printk buffers
229 trace_printk_init_buffers();
231 return &bpf_trace_printk_proto;
234 static u64 bpf_perf_event_read(u64 r1, u64 flags, u64 r3, u64 r4, u64 r5)
236 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
237 struct bpf_array *array = container_of(map, struct bpf_array, map);
238 unsigned int cpu = smp_processor_id();
239 u64 index = flags & BPF_F_INDEX_MASK;
240 struct bpf_event_entry *ee;
241 struct perf_event *event;
243 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
245 if (index == BPF_F_CURRENT_CPU)
247 if (unlikely(index >= array->map.max_entries))
250 ee = READ_ONCE(array->ptrs[index]);
255 if (unlikely(event->attr.type != PERF_TYPE_HARDWARE &&
256 event->attr.type != PERF_TYPE_RAW))
259 /* make sure event is local and doesn't have pmu::count */
260 if (unlikely(event->oncpu != cpu || event->pmu->count))
264 * we don't know if the function is run successfully by the
265 * return value. It can be judged in other places, such as
268 return perf_event_read_local(event);
271 static const struct bpf_func_proto bpf_perf_event_read_proto = {
272 .func = bpf_perf_event_read,
274 .ret_type = RET_INTEGER,
275 .arg1_type = ARG_CONST_MAP_PTR,
276 .arg2_type = ARG_ANYTHING,
279 static __always_inline u64
280 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
281 u64 flags, struct perf_raw_record *raw)
283 struct bpf_array *array = container_of(map, struct bpf_array, map);
284 unsigned int cpu = smp_processor_id();
285 u64 index = flags & BPF_F_INDEX_MASK;
286 struct perf_sample_data sample_data;
287 struct bpf_event_entry *ee;
288 struct perf_event *event;
290 if (index == BPF_F_CURRENT_CPU)
292 if (unlikely(index >= array->map.max_entries))
295 ee = READ_ONCE(array->ptrs[index]);
300 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
301 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
304 if (unlikely(event->oncpu != cpu))
307 perf_sample_data_init(&sample_data, 0, 0);
308 sample_data.raw = raw;
309 perf_event_output(event, &sample_data, regs);
313 static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
315 struct pt_regs *regs = (struct pt_regs *)(long) r1;
316 struct bpf_map *map = (struct bpf_map *)(long) r2;
317 void *data = (void *)(long) r4;
318 struct perf_raw_record raw = {
325 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
328 return __bpf_perf_event_output(regs, map, flags, &raw);
331 static const struct bpf_func_proto bpf_perf_event_output_proto = {
332 .func = bpf_perf_event_output,
334 .ret_type = RET_INTEGER,
335 .arg1_type = ARG_PTR_TO_CTX,
336 .arg2_type = ARG_CONST_MAP_PTR,
337 .arg3_type = ARG_ANYTHING,
338 .arg4_type = ARG_PTR_TO_STACK,
339 .arg5_type = ARG_CONST_STACK_SIZE,
342 static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
344 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
345 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
347 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
348 struct perf_raw_frag frag = {
353 struct perf_raw_record raw = {
356 .next = ctx_size ? &frag : NULL,
363 perf_fetch_caller_regs(regs);
365 return __bpf_perf_event_output(regs, map, flags, &raw);
368 static u64 bpf_get_current_task(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
370 return (long) current;
373 static const struct bpf_func_proto bpf_get_current_task_proto = {
374 .func = bpf_get_current_task,
376 .ret_type = RET_INTEGER,
379 static u64 bpf_current_task_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
381 struct bpf_map *map = (struct bpf_map *)(long)r1;
382 struct bpf_array *array = container_of(map, struct bpf_array, map);
386 if (unlikely(in_interrupt()))
389 if (unlikely(idx >= array->map.max_entries))
392 cgrp = READ_ONCE(array->ptrs[idx]);
396 return task_under_cgroup_hierarchy(current, cgrp);
399 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
400 .func = bpf_current_task_under_cgroup,
402 .ret_type = RET_INTEGER,
403 .arg1_type = ARG_CONST_MAP_PTR,
404 .arg2_type = ARG_ANYTHING,
407 static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
410 case BPF_FUNC_map_lookup_elem:
411 return &bpf_map_lookup_elem_proto;
412 case BPF_FUNC_map_update_elem:
413 return &bpf_map_update_elem_proto;
414 case BPF_FUNC_map_delete_elem:
415 return &bpf_map_delete_elem_proto;
416 case BPF_FUNC_probe_read:
417 return &bpf_probe_read_proto;
418 case BPF_FUNC_ktime_get_ns:
419 return &bpf_ktime_get_ns_proto;
420 case BPF_FUNC_tail_call:
421 return &bpf_tail_call_proto;
422 case BPF_FUNC_get_current_pid_tgid:
423 return &bpf_get_current_pid_tgid_proto;
424 case BPF_FUNC_get_current_task:
425 return &bpf_get_current_task_proto;
426 case BPF_FUNC_get_current_uid_gid:
427 return &bpf_get_current_uid_gid_proto;
428 case BPF_FUNC_get_current_comm:
429 return &bpf_get_current_comm_proto;
430 case BPF_FUNC_trace_printk:
431 return bpf_get_trace_printk_proto();
432 case BPF_FUNC_get_smp_processor_id:
433 return &bpf_get_smp_processor_id_proto;
434 case BPF_FUNC_perf_event_read:
435 return &bpf_perf_event_read_proto;
436 case BPF_FUNC_probe_write_user:
437 return bpf_get_probe_write_proto();
438 case BPF_FUNC_current_task_under_cgroup:
439 return &bpf_current_task_under_cgroup_proto;
440 case BPF_FUNC_get_prandom_u32:
441 return &bpf_get_prandom_u32_proto;
447 static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
450 case BPF_FUNC_perf_event_output:
451 return &bpf_perf_event_output_proto;
452 case BPF_FUNC_get_stackid:
453 return &bpf_get_stackid_proto;
455 return tracing_func_proto(func_id);
459 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
460 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
461 enum bpf_reg_type *reg_type)
463 if (off < 0 || off >= sizeof(struct pt_regs))
465 if (type != BPF_READ)
472 static const struct bpf_verifier_ops kprobe_prog_ops = {
473 .get_func_proto = kprobe_prog_func_proto,
474 .is_valid_access = kprobe_prog_is_valid_access,
477 static struct bpf_prog_type_list kprobe_tl = {
478 .ops = &kprobe_prog_ops,
479 .type = BPF_PROG_TYPE_KPROBE,
482 static u64 bpf_perf_event_output_tp(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
485 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
486 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
487 * from there and call the same bpf_perf_event_output() helper
489 u64 ctx = *(long *)(uintptr_t)r1;
491 return bpf_perf_event_output(ctx, r2, index, r4, size);
494 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
495 .func = bpf_perf_event_output_tp,
497 .ret_type = RET_INTEGER,
498 .arg1_type = ARG_PTR_TO_CTX,
499 .arg2_type = ARG_CONST_MAP_PTR,
500 .arg3_type = ARG_ANYTHING,
501 .arg4_type = ARG_PTR_TO_STACK,
502 .arg5_type = ARG_CONST_STACK_SIZE,
505 static u64 bpf_get_stackid_tp(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
507 u64 ctx = *(long *)(uintptr_t)r1;
509 return bpf_get_stackid(ctx, r2, r3, r4, r5);
512 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
513 .func = bpf_get_stackid_tp,
515 .ret_type = RET_INTEGER,
516 .arg1_type = ARG_PTR_TO_CTX,
517 .arg2_type = ARG_CONST_MAP_PTR,
518 .arg3_type = ARG_ANYTHING,
521 static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
524 case BPF_FUNC_perf_event_output:
525 return &bpf_perf_event_output_proto_tp;
526 case BPF_FUNC_get_stackid:
527 return &bpf_get_stackid_proto_tp;
529 return tracing_func_proto(func_id);
533 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
534 enum bpf_reg_type *reg_type)
536 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
538 if (type != BPF_READ)
545 static const struct bpf_verifier_ops tracepoint_prog_ops = {
546 .get_func_proto = tp_prog_func_proto,
547 .is_valid_access = tp_prog_is_valid_access,
550 static struct bpf_prog_type_list tracepoint_tl = {
551 .ops = &tracepoint_prog_ops,
552 .type = BPF_PROG_TYPE_TRACEPOINT,
555 static int __init register_kprobe_prog_ops(void)
557 bpf_register_prog_type(&kprobe_tl);
558 bpf_register_prog_type(&tracepoint_tl);
561 late_initcall(register_kprobe_prog_ops);