nfp: bpf: improve handling for disabled BPF syscall
[cascardo/linux.git] / drivers / net / ethernet / netronome / nfp / nfp_net_offload.c
1 /*
2  * Copyright (C) 2016 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 /*
35  * nfp_net_offload.c
36  * Netronome network device driver: TC offload functions for PF and VF
37  */
38
39 #include <linux/kernel.h>
40 #include <linux/netdevice.h>
41 #include <linux/pci.h>
42 #include <linux/jiffies.h>
43 #include <linux/timer.h>
44 #include <linux/list.h>
45
46 #include <net/pkt_cls.h>
47 #include <net/tc_act/tc_gact.h>
48 #include <net/tc_act/tc_mirred.h>
49
50 #include "nfp_bpf.h"
51 #include "nfp_net_ctrl.h"
52 #include "nfp_net.h"
53
54 void nfp_net_filter_stats_timer(unsigned long data)
55 {
56         struct nfp_net *nn = (void *)data;
57         struct nfp_stat_pair latest;
58
59         spin_lock_bh(&nn->rx_filter_lock);
60
61         if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
62                 mod_timer(&nn->rx_filter_stats_timer,
63                           jiffies + NFP_NET_STAT_POLL_IVL);
64
65         spin_unlock_bh(&nn->rx_filter_lock);
66
67         latest.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
68         latest.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
69
70         if (latest.pkts != nn->rx_filter.pkts)
71                 nn->rx_filter_change = jiffies;
72
73         nn->rx_filter = latest;
74 }
75
76 static void nfp_net_bpf_stats_reset(struct nfp_net *nn)
77 {
78         nn->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
79         nn->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
80         nn->rx_filter_prev = nn->rx_filter;
81         nn->rx_filter_change = jiffies;
82 }
83
84 static int
85 nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
86 {
87         struct tc_action *a;
88         LIST_HEAD(actions);
89         u64 bytes, pkts;
90
91         pkts = nn->rx_filter.pkts - nn->rx_filter_prev.pkts;
92         bytes = nn->rx_filter.bytes - nn->rx_filter_prev.bytes;
93         bytes -= pkts * ETH_HLEN;
94
95         nn->rx_filter_prev = nn->rx_filter;
96
97         preempt_disable();
98
99         tcf_exts_to_list(cls_bpf->exts, &actions);
100         list_for_each_entry(a, &actions, list)
101                 tcf_action_stats_update(a, bytes, pkts, nn->rx_filter_change);
102
103         preempt_enable();
104
105         return 0;
106 }
107
108 static int
109 nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
110 {
111         const struct tc_action *a;
112         LIST_HEAD(actions);
113
114         /* TC direct action */
115         if (cls_bpf->exts_integrated) {
116                 if (tc_no_actions(cls_bpf->exts))
117                         return NN_ACT_DIRECT;
118
119                 return -ENOTSUPP;
120         }
121
122         /* TC legacy mode */
123         if (!tc_single_action(cls_bpf->exts))
124                 return -ENOTSUPP;
125
126         tcf_exts_to_list(cls_bpf->exts, &actions);
127         list_for_each_entry(a, &actions, list) {
128                 if (is_tcf_gact_shot(a))
129                         return NN_ACT_TC_DROP;
130
131                 if (is_tcf_mirred_redirect(a) &&
132                     tcf_mirred_ifindex(a) == nn->netdev->ifindex)
133                         return NN_ACT_TC_REDIR;
134         }
135
136         return -ENOTSUPP;
137 }
138
139 static int
140 nfp_net_bpf_offload_prepare(struct nfp_net *nn,
141                             struct tc_cls_bpf_offload *cls_bpf,
142                             struct nfp_bpf_result *res,
143                             void **code, dma_addr_t *dma_addr, u16 max_instr)
144 {
145         unsigned int code_sz = max_instr * sizeof(u64);
146         enum nfp_bpf_action_type act;
147         u16 start_off, done_off;
148         unsigned int max_mtu;
149         int ret;
150
151         if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
152                 return -ENOTSUPP;
153
154         ret = nfp_net_bpf_get_act(nn, cls_bpf);
155         if (ret < 0)
156                 return ret;
157         act = ret;
158
159         max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
160         if (max_mtu < nn->netdev->mtu) {
161                 nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
162                 return -ENOTSUPP;
163         }
164
165         start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
166         done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
167
168         *code = dma_zalloc_coherent(&nn->pdev->dev, code_sz, dma_addr,
169                                     GFP_KERNEL);
170         if (!*code)
171                 return -ENOMEM;
172
173         ret = nfp_bpf_jit(cls_bpf->prog, *code, act, start_off, done_off,
174                           max_instr, res);
175         if (ret)
176                 goto out;
177
178         return 0;
179
180 out:
181         dma_free_coherent(&nn->pdev->dev, code_sz, *code, *dma_addr);
182         return ret;
183 }
184
185 static void
186 nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
187                            void *code, dma_addr_t dma_addr,
188                            unsigned int code_sz, unsigned int n_instr,
189                            bool dense_mode)
190 {
191         u64 bpf_addr = dma_addr;
192         int err;
193
194         nn->bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
195
196         if (dense_mode)
197                 bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
198
199         nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr);
200         nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, bpf_addr);
201
202         /* Load up the JITed code */
203         err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
204         if (err)
205                 nn_err(nn, "FW command error while loading BPF: %d\n", err);
206
207         /* Enable passing packets through BPF function */
208         nn->ctrl |= NFP_NET_CFG_CTRL_BPF;
209         nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
210         err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
211         if (err)
212                 nn_err(nn, "FW command error while enabling BPF: %d\n", err);
213
214         dma_free_coherent(&nn->pdev->dev, code_sz, code, dma_addr);
215
216         nfp_net_bpf_stats_reset(nn);
217         mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL);
218 }
219
220 static int nfp_net_bpf_stop(struct nfp_net *nn)
221 {
222         if (!(nn->ctrl & NFP_NET_CFG_CTRL_BPF))
223                 return 0;
224
225         spin_lock_bh(&nn->rx_filter_lock);
226         nn->ctrl &= ~NFP_NET_CFG_CTRL_BPF;
227         spin_unlock_bh(&nn->rx_filter_lock);
228         nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
229
230         del_timer_sync(&nn->rx_filter_stats_timer);
231         nn->bpf_offload_skip_sw = 0;
232
233         return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
234 }
235
236 int
237 nfp_net_bpf_offload(struct nfp_net *nn, u32 handle, __be16 proto,
238                     struct tc_cls_bpf_offload *cls_bpf)
239 {
240         struct nfp_bpf_result res;
241         dma_addr_t dma_addr;
242         u16 max_instr;
243         void *code;
244         int err;
245
246         max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
247
248         switch (cls_bpf->command) {
249         case TC_CLSBPF_REPLACE:
250                 /* There is nothing stopping us from implementing seamless
251                  * replace but the simple method of loading I adopted in
252                  * the firmware does not handle atomic replace (i.e. we have to
253                  * stop the BPF offload and re-enable it).  Leaking-in a few
254                  * frames which didn't have BPF applied in the hardware should
255                  * be fine if software fallback is available, though.
256                  */
257                 if (nn->bpf_offload_skip_sw)
258                         return -EBUSY;
259
260                 err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
261                                                   &dma_addr, max_instr);
262                 if (err)
263                         return err;
264
265                 nfp_net_bpf_stop(nn);
266                 nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
267                                            dma_addr, max_instr * sizeof(u64),
268                                            res.n_instr, res.dense_mode);
269                 return 0;
270
271         case TC_CLSBPF_ADD:
272                 if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
273                         return -EBUSY;
274
275                 err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
276                                                   &dma_addr, max_instr);
277                 if (err)
278                         return err;
279
280                 nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
281                                            dma_addr, max_instr * sizeof(u64),
282                                            res.n_instr, res.dense_mode);
283                 return 0;
284
285         case TC_CLSBPF_DESTROY:
286                 return nfp_net_bpf_stop(nn);
287
288         case TC_CLSBPF_STATS:
289                 return nfp_net_bpf_stats_update(nn, cls_bpf);
290
291         default:
292                 return -ENOTSUPP;
293         }
294 }