net/mlx5e: Refactor mlx5e flow steering structs
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/flow_dissector.h>
34 #include <net/pkt_cls.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_skbedit.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
40 #include "en.h"
41 #include "en_tc.h"
42
43 struct mlx5e_tc_flow {
44         struct rhash_head       node;
45         u64                     cookie;
46         struct mlx5_flow_rule   *rule;
47 };
48
49 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
50 #define MLX5E_TC_TABLE_NUM_GROUPS 4
51
52 static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
53                                                 u32 *match_c, u32 *match_v,
54                                                 u32 action, u32 flow_tag)
55 {
56         struct mlx5_flow_destination dest = {
57                 .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
58                 {.ft = priv->fs.vlan.ft.t},
59         };
60         struct mlx5_flow_rule *rule;
61         bool table_created = false;
62
63         if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
64                 priv->fs.tc.t =
65                         mlx5_create_auto_grouped_flow_table(priv->fs.ns,
66                                                             MLX5E_TC_PRIO,
67                                                             MLX5E_TC_TABLE_NUM_ENTRIES,
68                                                             MLX5E_TC_TABLE_NUM_GROUPS,
69                                                             0);
70                 if (IS_ERR(priv->fs.tc.t)) {
71                         netdev_err(priv->netdev,
72                                    "Failed to create tc offload table\n");
73                         return ERR_CAST(priv->fs.tc.t);
74                 }
75
76                 table_created = true;
77         }
78
79         rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS,
80                                   match_c, match_v,
81                                   action, flow_tag,
82                                   action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ? &dest : NULL);
83
84         if (IS_ERR(rule) && table_created) {
85                 mlx5_destroy_flow_table(priv->fs.tc.t);
86                 priv->fs.tc.t = NULL;
87         }
88
89         return rule;
90 }
91
92 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
93                               struct mlx5_flow_rule *rule)
94 {
95         mlx5_del_flow_rule(rule);
96
97         if (!mlx5e_tc_num_filters(priv)) {
98                 mlx5_destroy_flow_table(priv->fs.tc.t);
99                 priv->fs.tc.t = NULL;
100         }
101 }
102
103 static int parse_cls_flower(struct mlx5e_priv *priv,
104                             u32 *match_c, u32 *match_v,
105                             struct tc_cls_flower_offload *f)
106 {
107         void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers);
108         void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers);
109         u16 addr_type = 0;
110         u8 ip_proto = 0;
111
112         if (f->dissector->used_keys &
113             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
114               BIT(FLOW_DISSECTOR_KEY_BASIC) |
115               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
116               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
117               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
118               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
119                 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
120                             f->dissector->used_keys);
121                 return -EOPNOTSUPP;
122         }
123
124         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
125                 struct flow_dissector_key_control *key =
126                         skb_flow_dissector_target(f->dissector,
127                                                   FLOW_DISSECTOR_KEY_BASIC,
128                                                   f->key);
129                 addr_type = key->addr_type;
130         }
131
132         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
133                 struct flow_dissector_key_basic *key =
134                         skb_flow_dissector_target(f->dissector,
135                                                   FLOW_DISSECTOR_KEY_BASIC,
136                                                   f->key);
137                 struct flow_dissector_key_basic *mask =
138                         skb_flow_dissector_target(f->dissector,
139                                                   FLOW_DISSECTOR_KEY_BASIC,
140                                                   f->mask);
141                 ip_proto = key->ip_proto;
142
143                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
144                          ntohs(mask->n_proto));
145                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
146                          ntohs(key->n_proto));
147
148                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
149                          mask->ip_proto);
150                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
151                          key->ip_proto);
152         }
153
154         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
155                 struct flow_dissector_key_eth_addrs *key =
156                         skb_flow_dissector_target(f->dissector,
157                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
158                                                   f->key);
159                 struct flow_dissector_key_eth_addrs *mask =
160                         skb_flow_dissector_target(f->dissector,
161                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
162                                                   f->mask);
163
164                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
165                                              dmac_47_16),
166                                 mask->dst);
167                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
168                                              dmac_47_16),
169                                 key->dst);
170
171                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
172                                              smac_47_16),
173                                 mask->src);
174                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
175                                              smac_47_16),
176                                 key->src);
177         }
178
179         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
180                 struct flow_dissector_key_ipv4_addrs *key =
181                         skb_flow_dissector_target(f->dissector,
182                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
183                                                   f->key);
184                 struct flow_dissector_key_ipv4_addrs *mask =
185                         skb_flow_dissector_target(f->dissector,
186                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
187                                                   f->mask);
188
189                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
190                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
191                        &mask->src, sizeof(mask->src));
192                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
193                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
194                        &key->src, sizeof(key->src));
195                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
196                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
197                        &mask->dst, sizeof(mask->dst));
198                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
199                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
200                        &key->dst, sizeof(key->dst));
201         }
202
203         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
204                 struct flow_dissector_key_ipv6_addrs *key =
205                         skb_flow_dissector_target(f->dissector,
206                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
207                                                   f->key);
208                 struct flow_dissector_key_ipv6_addrs *mask =
209                         skb_flow_dissector_target(f->dissector,
210                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
211                                                   f->mask);
212
213                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
214                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
215                        &mask->src, sizeof(mask->src));
216                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
217                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
218                        &key->src, sizeof(key->src));
219
220                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
221                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
222                        &mask->dst, sizeof(mask->dst));
223                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
224                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
225                        &key->dst, sizeof(key->dst));
226         }
227
228         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
229                 struct flow_dissector_key_ports *key =
230                         skb_flow_dissector_target(f->dissector,
231                                                   FLOW_DISSECTOR_KEY_PORTS,
232                                                   f->key);
233                 struct flow_dissector_key_ports *mask =
234                         skb_flow_dissector_target(f->dissector,
235                                                   FLOW_DISSECTOR_KEY_PORTS,
236                                                   f->mask);
237                 switch (ip_proto) {
238                 case IPPROTO_TCP:
239                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
240                                  tcp_sport, ntohs(mask->src));
241                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
242                                  tcp_sport, ntohs(key->src));
243
244                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
245                                  tcp_dport, ntohs(mask->dst));
246                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
247                                  tcp_dport, ntohs(key->dst));
248                         break;
249
250                 case IPPROTO_UDP:
251                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
252                                  udp_sport, ntohs(mask->src));
253                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
254                                  udp_sport, ntohs(key->src));
255
256                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
257                                  udp_dport, ntohs(mask->dst));
258                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
259                                  udp_dport, ntohs(key->dst));
260                         break;
261                 default:
262                         netdev_err(priv->netdev,
263                                    "Only UDP and TCP transport are supported\n");
264                         return -EINVAL;
265                 }
266         }
267
268         return 0;
269 }
270
271 static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
272                             u32 *action, u32 *flow_tag)
273 {
274         const struct tc_action *a;
275
276         if (tc_no_actions(exts))
277                 return -EINVAL;
278
279         *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
280         *action = 0;
281
282         tc_for_each_action(a, exts) {
283                 /* Only support a single action per rule */
284                 if (*action)
285                         return -EINVAL;
286
287                 if (is_tcf_gact_shot(a)) {
288                         *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
289                         continue;
290                 }
291
292                 if (is_tcf_skbedit_mark(a)) {
293                         u32 mark = tcf_skbedit_mark(a);
294
295                         if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
296                                 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
297                                             mark);
298                                 return -EINVAL;
299                         }
300
301                         *flow_tag = mark;
302                         *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
303                         continue;
304                 }
305
306                 return -EINVAL;
307         }
308
309         return 0;
310 }
311
312 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
313                            struct tc_cls_flower_offload *f)
314 {
315         struct mlx5e_tc_table *tc = &priv->fs.tc;
316         u32 *match_c;
317         u32 *match_v;
318         int err = 0;
319         u32 flow_tag;
320         u32 action;
321         struct mlx5e_tc_flow *flow;
322         struct mlx5_flow_rule *old = NULL;
323
324         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
325                                       tc->ht_params);
326         if (flow)
327                 old = flow->rule;
328         else
329                 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
330
331         match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
332         match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
333         if (!match_c || !match_v || !flow) {
334                 err = -ENOMEM;
335                 goto err_free;
336         }
337
338         flow->cookie = f->cookie;
339
340         err = parse_cls_flower(priv, match_c, match_v, f);
341         if (err < 0)
342                 goto err_free;
343
344         err = parse_tc_actions(priv, f->exts, &action, &flow_tag);
345         if (err < 0)
346                 goto err_free;
347
348         err = rhashtable_insert_fast(&tc->ht, &flow->node,
349                                      tc->ht_params);
350         if (err)
351                 goto err_free;
352
353         flow->rule = mlx5e_tc_add_flow(priv, match_c, match_v, action,
354                                        flow_tag);
355         if (IS_ERR(flow->rule)) {
356                 err = PTR_ERR(flow->rule);
357                 goto err_hash_del;
358         }
359
360         if (old)
361                 mlx5e_tc_del_flow(priv, old);
362
363         goto out;
364
365 err_hash_del:
366         rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
367
368 err_free:
369         if (!old)
370                 kfree(flow);
371 out:
372         kfree(match_c);
373         kfree(match_v);
374         return err;
375 }
376
377 int mlx5e_delete_flower(struct mlx5e_priv *priv,
378                         struct tc_cls_flower_offload *f)
379 {
380         struct mlx5e_tc_flow *flow;
381         struct mlx5e_tc_table *tc = &priv->fs.tc;
382
383         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
384                                       tc->ht_params);
385         if (!flow)
386                 return -EINVAL;
387
388         rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
389
390         mlx5e_tc_del_flow(priv, flow->rule);
391
392         kfree(flow);
393
394         return 0;
395 }
396
397 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
398         .head_offset = offsetof(struct mlx5e_tc_flow, node),
399         .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
400         .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
401         .automatic_shrinking = true,
402 };
403
404 int mlx5e_tc_init(struct mlx5e_priv *priv)
405 {
406         struct mlx5e_tc_table *tc = &priv->fs.tc;
407
408         tc->ht_params = mlx5e_tc_flow_ht_params;
409         return rhashtable_init(&tc->ht, &tc->ht_params);
410 }
411
412 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
413 {
414         struct mlx5e_tc_flow *flow = ptr;
415         struct mlx5e_priv *priv = arg;
416
417         mlx5e_tc_del_flow(priv, flow->rule);
418         kfree(flow);
419 }
420
421 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
422 {
423         struct mlx5e_tc_table *tc = &priv->fs.tc;
424
425         rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
426
427         if (!IS_ERR_OR_NULL(tc->t)) {
428                 mlx5_destroy_flow_table(tc->t);
429                 tc->t = NULL;
430         }
431 }