Merge branch 'for-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
[cascardo/linux.git] / kernel / bpf / helpers.c
index 1ea3afb..3991840 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/ktime.h>
 #include <linux/sched.h>
 #include <linux/uidgid.h>
+#include <linux/filter.h>
 
 /* If kernel subsystem is allowing eBPF programs to call this function,
  * inside its own verifier_ops->get_func_proto() callback it should return
  * if program is allowed to access maps, so check rcu_read_lock_held in
  * all three functions.
  */
-static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
 {
-       /* verifier checked that R1 contains a valid pointer to bpf_map
-        * and R2 points to a program stack and map->key_size bytes were
-        * initialized
-        */
-       struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
-       void *key = (void *) (unsigned long) r2;
-       void *value;
-
        WARN_ON_ONCE(!rcu_read_lock_held());
-
-       value = map->ops->map_lookup_elem(map, key);
-
-       /* lookup() returns either pointer to element value or NULL
-        * which is the meaning of PTR_TO_MAP_VALUE_OR_NULL type
-        */
-       return (unsigned long) value;
+       return (unsigned long) map->ops->map_lookup_elem(map, key);
 }
 
 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
        .func           = bpf_map_lookup_elem,
        .gpl_only       = false,
+       .pkt_access     = true,
        .ret_type       = RET_PTR_TO_MAP_VALUE_OR_NULL,
        .arg1_type      = ARG_CONST_MAP_PTR,
        .arg2_type      = ARG_PTR_TO_MAP_KEY,
 };
 
-static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
+          void *, value, u64, flags)
 {
-       struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
-       void *key = (void *) (unsigned long) r2;
-       void *value = (void *) (unsigned long) r3;
-
        WARN_ON_ONCE(!rcu_read_lock_held());
-
-       return map->ops->map_update_elem(map, key, value, r4);
+       return map->ops->map_update_elem(map, key, value, flags);
 }
 
 const struct bpf_func_proto bpf_map_update_elem_proto = {
        .func           = bpf_map_update_elem,
        .gpl_only       = false,
+       .pkt_access     = true,
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_CONST_MAP_PTR,
        .arg2_type      = ARG_PTR_TO_MAP_KEY,
@@ -75,19 +60,16 @@ const struct bpf_func_proto bpf_map_update_elem_proto = {
        .arg4_type      = ARG_ANYTHING,
 };
 
-static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
 {
-       struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
-       void *key = (void *) (unsigned long) r2;
-
        WARN_ON_ONCE(!rcu_read_lock_held());
-
        return map->ops->map_delete_elem(map, key);
 }
 
 const struct bpf_func_proto bpf_map_delete_elem_proto = {
        .func           = bpf_map_delete_elem,
        .gpl_only       = false,
+       .pkt_access     = true,
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_CONST_MAP_PTR,
        .arg2_type      = ARG_PTR_TO_MAP_KEY,
@@ -99,7 +81,7 @@ const struct bpf_func_proto bpf_get_prandom_u32_proto = {
        .ret_type       = RET_INTEGER,
 };
 
-static u64 bpf_get_smp_processor_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_0(bpf_get_smp_processor_id)
 {
        return smp_processor_id();
 }
@@ -110,7 +92,7 @@ const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
        .ret_type       = RET_INTEGER,
 };
 
-static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_0(bpf_ktime_get_ns)
 {
        /* NMI safe access to clock monotonic */
        return ktime_get_mono_fast_ns();
@@ -122,11 +104,11 @@ const struct bpf_func_proto bpf_ktime_get_ns_proto = {
        .ret_type       = RET_INTEGER,
 };
 
-static u64 bpf_get_current_pid_tgid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_0(bpf_get_current_pid_tgid)
 {
        struct task_struct *task = current;
 
-       if (!task)
+       if (unlikely(!task))
                return -EINVAL;
 
        return (u64) task->tgid << 32 | task->pid;
@@ -138,18 +120,18 @@ const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
        .ret_type       = RET_INTEGER,
 };
 
-static u64 bpf_get_current_uid_gid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_0(bpf_get_current_uid_gid)
 {
        struct task_struct *task = current;
        kuid_t uid;
        kgid_t gid;
 
-       if (!task)
+       if (unlikely(!task))
                return -EINVAL;
 
        current_uid_gid(&uid, &gid);
        return (u64) from_kgid(&init_user_ns, gid) << 32 |
-               from_kuid(&init_user_ns, uid);
+                    from_kuid(&init_user_ns, uid);
 }
 
 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
@@ -158,10 +140,9 @@ const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
        .ret_type       = RET_INTEGER,
 };
 
-static u64 bpf_get_current_comm(u64 r1, u64 size, u64 r3, u64 r4, u64 r5)
+BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
 {
        struct task_struct *task = current;
-       char *buf = (char *) (long) r1;
 
        if (unlikely(!task))
                goto err_clear;