bpf: support bpf_get_stackid() and bpf_perf_event_output() in tracepoint programs
authorAlexei Starovoitov <ast@fb.com>
Thu, 7 Apr 2016 01:43:27 +0000 (18:43 -0700)
committerDavid S. Miller <davem@davemloft.net>
Fri, 8 Apr 2016 01:04:26 +0000 (21:04 -0400)
needs two wrapper functions to fetch 'struct pt_regs *' to convert
tracepoint bpf context into kprobe bpf context to reuse existing
helper functions

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/bpf.h
kernel/bpf/stackmap.c
kernel/trace/bpf_trace.c

index 21ee41b..198f6ac 100644 (file)
@@ -160,6 +160,7 @@ struct bpf_array {
 #define MAX_TAIL_CALL_CNT 32
 
 u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
+u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 void bpf_fd_array_map_clear(struct bpf_map *map);
 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
 const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
index 499d9e9..3511472 100644 (file)
@@ -116,7 +116,7 @@ free_smap:
        return ERR_PTR(err);
 }
 
-static u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
+u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
 {
        struct pt_regs *regs = (struct pt_regs *) (long) r1;
        struct bpf_map *map = (struct bpf_map *) (long) r2;
index 3e5ebe3..413ec56 100644 (file)
@@ -340,12 +340,52 @@ static struct bpf_prog_type_list kprobe_tl = {
        .type   = BPF_PROG_TYPE_KPROBE,
 };
 
+static u64 bpf_perf_event_output_tp(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
+{
+       /*
+        * r1 points to perf tracepoint buffer where first 8 bytes are hidden
+        * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
+        * from there and call the same bpf_perf_event_output() helper
+        */
+       u64 ctx = *(long *)r1;
+
+       return bpf_perf_event_output(ctx, r2, index, r4, size);
+}
+
+static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
+       .func           = bpf_perf_event_output_tp,
+       .gpl_only       = true,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_CONST_MAP_PTR,
+       .arg3_type      = ARG_ANYTHING,
+       .arg4_type      = ARG_PTR_TO_STACK,
+       .arg5_type      = ARG_CONST_STACK_SIZE,
+};
+
+static u64 bpf_get_stackid_tp(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+       u64 ctx = *(long *)r1;
+
+       return bpf_get_stackid(ctx, r2, r3, r4, r5);
+}
+
+static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
+       .func           = bpf_get_stackid_tp,
+       .gpl_only       = true,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_CONST_MAP_PTR,
+       .arg3_type      = ARG_ANYTHING,
+};
+
 static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
 {
        switch (func_id) {
        case BPF_FUNC_perf_event_output:
+               return &bpf_perf_event_output_proto_tp;
        case BPF_FUNC_get_stackid:
-               return NULL;
+               return &bpf_get_stackid_proto_tp;
        default:
                return tracing_func_proto(func_id);
        }