Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[cascardo/linux.git] / net / core / filter.c
index ca7f832..cb9fc16 100644 (file)
@@ -748,6 +748,17 @@ static bool chk_code_allowed(u16 code_to_probe)
        return codes[code_to_probe];
 }
 
+static bool bpf_check_basics_ok(const struct sock_filter *filter,
+                               unsigned int flen)
+{
+       if (filter == NULL)
+               return false;
+       if (flen == 0 || flen > BPF_MAXINSNS)
+               return false;
+
+       return true;
+}
+
 /**
  *     bpf_check_classic - verify socket filter code
  *     @filter: filter to verify
@@ -768,9 +779,6 @@ static int bpf_check_classic(const struct sock_filter *filter,
        bool anc_found;
        int pc;
 
-       if (flen == 0 || flen > BPF_MAXINSNS)
-               return -EINVAL;
-
        /* Check the filter code now */
        for (pc = 0; pc < flen; pc++) {
                const struct sock_filter *ftest = &filter[pc];
@@ -994,7 +1002,11 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
                 */
                goto out_err_free;
 
-       bpf_prog_select_runtime(fp);
+       /* We are guaranteed to never error here with cBPF to eBPF
+        * transitions, since there's no issue with type compatibility
+        * checks on program arrays.
+        */
+       fp = bpf_prog_select_runtime(fp, &err);
 
        kfree(old_prog);
        return fp;
@@ -1061,7 +1073,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
        struct bpf_prog *fp;
 
        /* Make sure new filter is there and in the right amounts. */
-       if (fprog->filter == NULL)
+       if (!bpf_check_basics_ok(fprog->filter, fprog->len))
                return -EINVAL;
 
        fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
@@ -1108,7 +1120,7 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
        int err;
 
        /* Make sure new filter is there and in the right amounts. */
-       if (fprog->filter == NULL)
+       if (!bpf_check_basics_ok(fprog->filter, fprog->len))
                return -EINVAL;
 
        fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
@@ -1149,8 +1161,7 @@ void bpf_prog_destroy(struct bpf_prog *fp)
 }
 EXPORT_SYMBOL_GPL(bpf_prog_destroy);
 
-static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk,
-                           bool locked)
+static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
 {
        struct sk_filter *fp, *old_fp;
 
@@ -1166,8 +1177,10 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk,
                return -ENOMEM;
        }
 
-       old_fp = rcu_dereference_protected(sk->sk_filter, locked);
+       old_fp = rcu_dereference_protected(sk->sk_filter,
+                                          lockdep_sock_is_held(sk));
        rcu_assign_pointer(sk->sk_filter, fp);
+
        if (old_fp)
                sk_filter_uncharge(sk, old_fp);
 
@@ -1202,7 +1215,6 @@ static
 struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
 {
        unsigned int fsize = bpf_classic_proglen(fprog);
-       unsigned int bpf_fsize = bpf_prog_size(fprog->len);
        struct bpf_prog *prog;
        int err;
 
@@ -1210,10 +1222,10 @@ struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
                return ERR_PTR(-EPERM);
 
        /* Make sure new filter is there and in the right amounts. */
-       if (fprog->filter == NULL)
+       if (!bpf_check_basics_ok(fprog->filter, fprog->len))
                return ERR_PTR(-EINVAL);
 
-       prog = bpf_prog_alloc(bpf_fsize, 0);
+       prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
        if (!prog)
                return ERR_PTR(-ENOMEM);
 
@@ -1246,8 +1258,7 @@ struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
  * occurs or there is insufficient memory for the filter a negative
  * errno code is returned. On success the return is zero.
  */
-int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
-                      bool locked)
+int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
 {
        struct bpf_prog *prog = __get_filter(fprog, sk);
        int err;
@@ -1255,7 +1266,7 @@ int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
        if (IS_ERR(prog))
                return PTR_ERR(prog);
 
-       err = __sk_attach_prog(prog, sk, locked);
+       err = __sk_attach_prog(prog, sk);
        if (err < 0) {
                __bpf_prog_release(prog);
                return err;
@@ -1263,12 +1274,7 @@ int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(__sk_attach_filter);
-
-int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
-{
-       return __sk_attach_filter(fprog, sk, sock_owned_by_user(sk));
-}
+EXPORT_SYMBOL_GPL(sk_attach_filter);
 
 int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
 {
@@ -1314,7 +1320,7 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
        if (IS_ERR(prog))
                return PTR_ERR(prog);
 
-       err = __sk_attach_prog(prog, sk, sock_owned_by_user(sk));
+       err = __sk_attach_prog(prog, sk);
        if (err < 0) {
                bpf_prog_put(prog);
                return err;
@@ -1349,6 +1355,21 @@ struct bpf_scratchpad {
 
 static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
 
+static inline int bpf_try_make_writable(struct sk_buff *skb,
+                                       unsigned int write_len)
+{
+       int err;
+
+       if (!skb_cloned(skb))
+               return 0;
+       if (skb_clone_writable(skb, write_len))
+               return 0;
+       err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+       if (!err)
+               bpf_compute_data_end(skb);
+       return err;
+}
+
 static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
 {
        struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
@@ -1371,7 +1392,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
         */
        if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
                return -EFAULT;
-       if (unlikely(skb_try_make_writable(skb, offset + len)))
+       if (unlikely(bpf_try_make_writable(skb, offset + len)))
                return -EFAULT;
 
        ptr = skb_header_pointer(skb, offset, len, sp->buff);
@@ -1414,16 +1435,19 @@ static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
        unsigned int len = (unsigned int) r4;
        void *ptr;
 
-       if (unlikely((u32) offset > 0xffff || len > MAX_BPF_STACK))
-               return -EFAULT;
+       if (unlikely((u32) offset > 0xffff))
+               goto err_clear;
 
        ptr = skb_header_pointer(skb, offset, len, to);
        if (unlikely(!ptr))
-               return -EFAULT;
+               goto err_clear;
        if (ptr != to)
                memcpy(to, ptr, len);
 
        return 0;
+err_clear:
+       memset(to, 0, len);
+       return -EFAULT;
 }
 
 static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
@@ -1432,7 +1456,7 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_PTR_TO_CTX,
        .arg2_type      = ARG_ANYTHING,
-       .arg3_type      = ARG_PTR_TO_STACK,
+       .arg3_type      = ARG_PTR_TO_RAW_STACK,
        .arg4_type      = ARG_CONST_STACK_SIZE,
 };
 
@@ -1446,7 +1470,7 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
                return -EINVAL;
        if (unlikely((u32) offset > 0xffff))
                return -EFAULT;
-       if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
+       if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
                return -EFAULT;
 
        ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1501,7 +1525,7 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
                return -EINVAL;
        if (unlikely((u32) offset > 0xffff))
                return -EFAULT;
-       if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
+       if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
                return -EFAULT;
 
        ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1586,9 +1610,36 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
        .arg5_type      = ARG_ANYTHING,
 };
 
+static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
+{
+       if (skb_at_tc_ingress(skb))
+               skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
+
+       return dev_forward_skb(dev, skb);
+}
+
+static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
+{
+       int ret;
+
+       if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
+               net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
+               kfree_skb(skb);
+               return -ENETDOWN;
+       }
+
+       skb->dev = dev;
+
+       __this_cpu_inc(xmit_recursion);
+       ret = dev_queue_xmit(skb);
+       __this_cpu_dec(xmit_recursion);
+
+       return ret;
+}
+
 static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
 {
-       struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2;
+       struct sk_buff *skb = (struct sk_buff *) (long) r1;
        struct net_device *dev;
 
        if (unlikely(flags & ~(BPF_F_INGRESS)))
@@ -1598,19 +1649,12 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
        if (unlikely(!dev))
                return -EINVAL;
 
-       skb2 = skb_clone(skb, GFP_ATOMIC);
-       if (unlikely(!skb2))
+       skb = skb_clone(skb, GFP_ATOMIC);
+       if (unlikely(!skb))
                return -ENOMEM;
 
-       if (flags & BPF_F_INGRESS) {
-               if (skb_at_tc_ingress(skb2))
-                       skb_postpush_rcsum(skb2, skb_mac_header(skb2),
-                                          skb2->mac_len);
-               return dev_forward_skb(dev, skb2);
-       }
-
-       skb2->dev = dev;
-       return dev_queue_xmit(skb2);
+       return flags & BPF_F_INGRESS ?
+              __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
 }
 
 static const struct bpf_func_proto bpf_clone_redirect_proto = {
@@ -1654,15 +1698,8 @@ int skb_do_redirect(struct sk_buff *skb)
                return -EINVAL;
        }
 
-       if (ri->flags & BPF_F_INGRESS) {
-               if (skb_at_tc_ingress(skb))
-                       skb_postpush_rcsum(skb, skb_mac_header(skb),
-                                          skb->mac_len);
-               return dev_forward_skb(dev, skb);
-       }
-
-       skb->dev = dev;
-       return dev_queue_xmit(skb);
+       return ri->flags & BPF_F_INGRESS ?
+              __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
 }
 
 static const struct bpf_func_proto bpf_redirect_proto = {
@@ -1701,12 +1738,15 @@ static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
 {
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
        __be16 vlan_proto = (__force __be16) r2;
+       int ret;
 
        if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
                     vlan_proto != htons(ETH_P_8021AD)))
                vlan_proto = htons(ETH_P_8021Q);
 
-       return skb_vlan_push(skb, vlan_proto, vlan_tci);
+       ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
+       bpf_compute_data_end(skb);
+       return ret;
 }
 
 const struct bpf_func_proto bpf_skb_vlan_push_proto = {
@@ -1722,8 +1762,11 @@ EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
 static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 {
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
+       int ret;
 
-       return skb_vlan_pop(skb);
+       ret = skb_vlan_pop(skb);
+       bpf_compute_data_end(skb);
+       return ret;
 }
 
 const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
@@ -1761,12 +1804,19 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
        struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
        const struct ip_tunnel_info *info = skb_tunnel_info(skb);
        u8 compat[sizeof(struct bpf_tunnel_key)];
+       void *to_orig = to;
+       int err;
 
-       if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6))))
-               return -EINVAL;
-       if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags))
-               return -EPROTO;
+       if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) {
+               err = -EINVAL;
+               goto err_clear;
+       }
+       if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
+               err = -EPROTO;
+               goto err_clear;
+       }
        if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
+               err = -EINVAL;
                switch (size) {
                case offsetof(struct bpf_tunnel_key, tunnel_label):
                case offsetof(struct bpf_tunnel_key, tunnel_ext):
@@ -1776,12 +1826,12 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
                         * a common path later on.
                         */
                        if (ip_tunnel_info_af(info) != AF_INET)
-                               return -EINVAL;
+                               goto err_clear;
 set_compat:
                        to = (struct bpf_tunnel_key *)compat;
                        break;
                default:
-                       return -EINVAL;
+                       goto err_clear;
                }
        }
 
@@ -1798,9 +1848,12 @@ set_compat:
        }
 
        if (unlikely(size != sizeof(struct bpf_tunnel_key)))
-               memcpy((void *)(long) r2, to, size);
+               memcpy(to_orig, to, size);
 
        return 0;
+err_clear:
+       memset(to_orig, 0, size);
+       return err;
 }
 
 static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
@@ -1808,7 +1861,7 @@ static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_PTR_TO_CTX,
-       .arg2_type      = ARG_PTR_TO_STACK,
+       .arg2_type      = ARG_PTR_TO_RAW_STACK,
        .arg3_type      = ARG_CONST_STACK_SIZE,
        .arg4_type      = ARG_ANYTHING,
 };
@@ -1818,16 +1871,26 @@ static u64 bpf_skb_get_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5)
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
        u8 *to = (u8 *) (long) r2;
        const struct ip_tunnel_info *info = skb_tunnel_info(skb);
+       int err;
 
        if (unlikely(!info ||
-                    !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT)))
-               return -ENOENT;
-       if (unlikely(size < info->options_len))
-               return -ENOMEM;
+                    !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
+               err = -ENOENT;
+               goto err_clear;
+       }
+       if (unlikely(size < info->options_len)) {
+               err = -ENOMEM;
+               goto err_clear;
+       }
 
        ip_tunnel_info_opts_get(to, info);
+       if (size > info->options_len)
+               memset(to + info->options_len, 0, size - info->options_len);
 
        return info->options_len;
+err_clear:
+       memset(to, 0, size);
+       return err;
 }
 
 static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
@@ -1835,7 +1898,7 @@ static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_PTR_TO_CTX,
-       .arg2_type      = ARG_PTR_TO_STACK,
+       .arg2_type      = ARG_PTR_TO_RAW_STACK,
        .arg3_type      = ARG_CONST_STACK_SIZE,
 };
 
@@ -2021,6 +2084,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
                return &bpf_redirect_proto;
        case BPF_FUNC_get_route_realm:
                return &bpf_get_route_realm_proto;
+       case BPF_FUNC_perf_event_output:
+               return bpf_get_event_output_proto();
        default:
                return sk_filter_func_proto(func_id);
        }
@@ -2028,31 +2093,32 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
 
 static bool __is_valid_access(int off, int size, enum bpf_access_type type)
 {
-       /* check bounds */
        if (off < 0 || off >= sizeof(struct __sk_buff))
                return false;
-
-       /* disallow misaligned access */
+       /* The verifier guarantees that size > 0. */
        if (off % size != 0)
                return false;
-
-       /* all __sk_buff fields are __u32 */
-       if (size != 4)
+       if (size != sizeof(__u32))
                return false;
 
        return true;
 }
 
 static bool sk_filter_is_valid_access(int off, int size,
-                                     enum bpf_access_type type)
+                                     enum bpf_access_type type,
+                                     enum bpf_reg_type *reg_type)
 {
-       if (off == offsetof(struct __sk_buff, tc_classid))
+       switch (off) {
+       case offsetof(struct __sk_buff, tc_classid):
+       case offsetof(struct __sk_buff, data):
+       case offsetof(struct __sk_buff, data_end):
                return false;
+       }
 
        if (type == BPF_WRITE) {
                switch (off) {
                case offsetof(struct __sk_buff, cb[0]) ...
-                       offsetof(struct __sk_buff, cb[4]):
+                    offsetof(struct __sk_buff, cb[4]):
                        break;
                default:
                        return false;
@@ -2063,7 +2129,8 @@ static bool sk_filter_is_valid_access(int off, int size,
 }
 
 static bool tc_cls_act_is_valid_access(int off, int size,
-                                      enum bpf_access_type type)
+                                      enum bpf_access_type type,
+                                      enum bpf_reg_type *reg_type)
 {
        if (type == BPF_WRITE) {
                switch (off) {
@@ -2078,6 +2145,16 @@ static bool tc_cls_act_is_valid_access(int off, int size,
                        return false;
                }
        }
+
+       switch (off) {
+       case offsetof(struct __sk_buff, data):
+               *reg_type = PTR_TO_PACKET;
+               break;
+       case offsetof(struct __sk_buff, data_end):
+               *reg_type = PTR_TO_PACKET_END;
+               break;
+       }
+
        return __is_valid_access(off, size, type);
 }
 
@@ -2195,6 +2272,20 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
                        *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
                break;
 
+       case offsetof(struct __sk_buff, data):
+               *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, data)),
+                                     dst_reg, src_reg,
+                                     offsetof(struct sk_buff, data));
+               break;
+
+       case offsetof(struct __sk_buff, data_end):
+               ctx_off -= offsetof(struct __sk_buff, data_end);
+               ctx_off += offsetof(struct sk_buff, cb);
+               ctx_off += offsetof(struct bpf_skb_data_end, data_end);
+               *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(sizeof(void *)),
+                                     dst_reg, src_reg, ctx_off);
+               break;
+
        case offsetof(struct __sk_buff, tc_index):
 #ifdef CONFIG_NET_SCHED
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);
@@ -2219,30 +2310,30 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
 }
 
 static const struct bpf_verifier_ops sk_filter_ops = {
-       .get_func_proto = sk_filter_func_proto,
-       .is_valid_access = sk_filter_is_valid_access,
-       .convert_ctx_access = bpf_net_convert_ctx_access,
+       .get_func_proto         = sk_filter_func_proto,
+       .is_valid_access        = sk_filter_is_valid_access,
+       .convert_ctx_access     = bpf_net_convert_ctx_access,
 };
 
 static const struct bpf_verifier_ops tc_cls_act_ops = {
-       .get_func_proto = tc_cls_act_func_proto,
-       .is_valid_access = tc_cls_act_is_valid_access,
-       .convert_ctx_access = bpf_net_convert_ctx_access,
+       .get_func_proto         = tc_cls_act_func_proto,
+       .is_valid_access        = tc_cls_act_is_valid_access,
+       .convert_ctx_access     = bpf_net_convert_ctx_access,
 };
 
 static struct bpf_prog_type_list sk_filter_type __read_mostly = {
-       .ops = &sk_filter_ops,
-       .type = BPF_PROG_TYPE_SOCKET_FILTER,
+       .ops    = &sk_filter_ops,
+       .type   = BPF_PROG_TYPE_SOCKET_FILTER,
 };
 
 static struct bpf_prog_type_list sched_cls_type __read_mostly = {
-       .ops = &tc_cls_act_ops,
-       .type = BPF_PROG_TYPE_SCHED_CLS,
+       .ops    = &tc_cls_act_ops,
+       .type   = BPF_PROG_TYPE_SCHED_CLS,
 };
 
 static struct bpf_prog_type_list sched_act_type __read_mostly = {
-       .ops = &tc_cls_act_ops,
-       .type = BPF_PROG_TYPE_SCHED_ACT,
+       .ops    = &tc_cls_act_ops,
+       .type   = BPF_PROG_TYPE_SCHED_ACT,
 };
 
 static int __init register_sk_filter_ops(void)
@@ -2255,7 +2346,7 @@ static int __init register_sk_filter_ops(void)
 }
 late_initcall(register_sk_filter_ops);
 
-int __sk_detach_filter(struct sock *sk, bool locked)
+int sk_detach_filter(struct sock *sk)
 {
        int ret = -ENOENT;
        struct sk_filter *filter;
@@ -2263,7 +2354,8 @@ int __sk_detach_filter(struct sock *sk, bool locked)
        if (sock_flag(sk, SOCK_FILTER_LOCKED))
                return -EPERM;
 
-       filter = rcu_dereference_protected(sk->sk_filter, locked);
+       filter = rcu_dereference_protected(sk->sk_filter,
+                                          lockdep_sock_is_held(sk));
        if (filter) {
                RCU_INIT_POINTER(sk->sk_filter, NULL);
                sk_filter_uncharge(sk, filter);
@@ -2272,12 +2364,7 @@ int __sk_detach_filter(struct sock *sk, bool locked)
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(__sk_detach_filter);
-
-int sk_detach_filter(struct sock *sk)
-{
-       return __sk_detach_filter(sk, sock_owned_by_user(sk));
-}
+EXPORT_SYMBOL_GPL(sk_detach_filter);
 
 int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
                  unsigned int len)
@@ -2288,7 +2375,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
 
        lock_sock(sk);
        filter = rcu_dereference_protected(sk->sk_filter,
-                                          sock_owned_by_user(sk));
+                                          lockdep_sock_is_held(sk));
        if (!filter)
                goto out;