Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/flow_dissector.h>
34 #include <net/pkt_cls.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_skbedit.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
40 #include <net/switchdev.h>
41 #include <net/tc_act/tc_mirred.h>
42 #include "en.h"
43 #include "en_tc.h"
44 #include "eswitch.h"
45
46 struct mlx5e_tc_flow {
47         struct rhash_head       node;
48         u64                     cookie;
49         struct mlx5_flow_rule   *rule;
50 };
51
52 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
53 #define MLX5E_TC_TABLE_NUM_GROUPS 4
54
55 static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
56                                                     struct mlx5_flow_spec *spec,
57                                                     u32 action, u32 flow_tag)
58 {
59         struct mlx5_core_dev *dev = priv->mdev;
60         struct mlx5_flow_destination dest = { 0 };
61         struct mlx5_fc *counter = NULL;
62         struct mlx5_flow_rule *rule;
63         bool table_created = false;
64
65         if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
66                 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
67                 dest.ft = priv->fs.vlan.ft.t;
68         } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
69                 counter = mlx5_fc_create(dev, true);
70                 if (IS_ERR(counter))
71                         return ERR_CAST(counter);
72
73                 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
74                 dest.counter = counter;
75         }
76
77         if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
78                 priv->fs.tc.t =
79                         mlx5_create_auto_grouped_flow_table(priv->fs.ns,
80                                                             MLX5E_TC_PRIO,
81                                                             MLX5E_TC_TABLE_NUM_ENTRIES,
82                                                             MLX5E_TC_TABLE_NUM_GROUPS,
83                                                             0);
84                 if (IS_ERR(priv->fs.tc.t)) {
85                         netdev_err(priv->netdev,
86                                    "Failed to create tc offload table\n");
87                         rule = ERR_CAST(priv->fs.tc.t);
88                         goto err_create_ft;
89                 }
90
91                 table_created = true;
92         }
93
94         spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
95         rule = mlx5_add_flow_rule(priv->fs.tc.t, spec,
96                                   action, flow_tag,
97                                   &dest);
98
99         if (IS_ERR(rule))
100                 goto err_add_rule;
101
102         return rule;
103
104 err_add_rule:
105         if (table_created) {
106                 mlx5_destroy_flow_table(priv->fs.tc.t);
107                 priv->fs.tc.t = NULL;
108         }
109 err_create_ft:
110         mlx5_fc_destroy(dev, counter);
111
112         return rule;
113 }
114
115 static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
116                                                     struct mlx5_flow_spec *spec,
117                                                     u32 action, u32 dst_vport)
118 {
119         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
120         struct mlx5_eswitch_rep *rep = priv->ppriv;
121         u32 src_vport;
122
123         if (rep->vport) /* set source vport for the flow */
124                 src_vport = rep->vport;
125         else
126                 src_vport = FDB_UPLINK_VPORT;
127
128         return mlx5_eswitch_add_offloaded_rule(esw, spec, action, src_vport, dst_vport);
129 }
130
131 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
132                               struct mlx5_flow_rule *rule)
133 {
134         struct mlx5_fc *counter = NULL;
135
136         counter = mlx5_flow_rule_counter(rule);
137
138         mlx5_del_flow_rule(rule);
139
140         mlx5_fc_destroy(priv->mdev, counter);
141
142         if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
143                 mlx5_destroy_flow_table(priv->fs.tc.t);
144                 priv->fs.tc.t = NULL;
145         }
146 }
147
148 static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
149                             struct tc_cls_flower_offload *f)
150 {
151         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
152                                        outer_headers);
153         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
154                                        outer_headers);
155         u16 addr_type = 0;
156         u8 ip_proto = 0;
157
158         if (f->dissector->used_keys &
159             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
160               BIT(FLOW_DISSECTOR_KEY_BASIC) |
161               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
162               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
163               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
164               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
165                 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
166                             f->dissector->used_keys);
167                 return -EOPNOTSUPP;
168         }
169
170         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
171                 struct flow_dissector_key_control *key =
172                         skb_flow_dissector_target(f->dissector,
173                                                   FLOW_DISSECTOR_KEY_CONTROL,
174                                                   f->key);
175                 addr_type = key->addr_type;
176         }
177
178         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
179                 struct flow_dissector_key_basic *key =
180                         skb_flow_dissector_target(f->dissector,
181                                                   FLOW_DISSECTOR_KEY_BASIC,
182                                                   f->key);
183                 struct flow_dissector_key_basic *mask =
184                         skb_flow_dissector_target(f->dissector,
185                                                   FLOW_DISSECTOR_KEY_BASIC,
186                                                   f->mask);
187                 ip_proto = key->ip_proto;
188
189                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
190                          ntohs(mask->n_proto));
191                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
192                          ntohs(key->n_proto));
193
194                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
195                          mask->ip_proto);
196                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
197                          key->ip_proto);
198         }
199
200         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
201                 struct flow_dissector_key_eth_addrs *key =
202                         skb_flow_dissector_target(f->dissector,
203                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
204                                                   f->key);
205                 struct flow_dissector_key_eth_addrs *mask =
206                         skb_flow_dissector_target(f->dissector,
207                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
208                                                   f->mask);
209
210                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
211                                              dmac_47_16),
212                                 mask->dst);
213                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
214                                              dmac_47_16),
215                                 key->dst);
216
217                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
218                                              smac_47_16),
219                                 mask->src);
220                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
221                                              smac_47_16),
222                                 key->src);
223         }
224
225         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
226                 struct flow_dissector_key_ipv4_addrs *key =
227                         skb_flow_dissector_target(f->dissector,
228                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
229                                                   f->key);
230                 struct flow_dissector_key_ipv4_addrs *mask =
231                         skb_flow_dissector_target(f->dissector,
232                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
233                                                   f->mask);
234
235                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
236                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
237                        &mask->src, sizeof(mask->src));
238                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
239                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
240                        &key->src, sizeof(key->src));
241                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
242                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
243                        &mask->dst, sizeof(mask->dst));
244                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
245                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
246                        &key->dst, sizeof(key->dst));
247         }
248
249         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
250                 struct flow_dissector_key_ipv6_addrs *key =
251                         skb_flow_dissector_target(f->dissector,
252                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
253                                                   f->key);
254                 struct flow_dissector_key_ipv6_addrs *mask =
255                         skb_flow_dissector_target(f->dissector,
256                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
257                                                   f->mask);
258
259                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
260                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
261                        &mask->src, sizeof(mask->src));
262                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
263                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
264                        &key->src, sizeof(key->src));
265
266                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
267                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
268                        &mask->dst, sizeof(mask->dst));
269                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
270                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
271                        &key->dst, sizeof(key->dst));
272         }
273
274         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
275                 struct flow_dissector_key_ports *key =
276                         skb_flow_dissector_target(f->dissector,
277                                                   FLOW_DISSECTOR_KEY_PORTS,
278                                                   f->key);
279                 struct flow_dissector_key_ports *mask =
280                         skb_flow_dissector_target(f->dissector,
281                                                   FLOW_DISSECTOR_KEY_PORTS,
282                                                   f->mask);
283                 switch (ip_proto) {
284                 case IPPROTO_TCP:
285                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
286                                  tcp_sport, ntohs(mask->src));
287                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
288                                  tcp_sport, ntohs(key->src));
289
290                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
291                                  tcp_dport, ntohs(mask->dst));
292                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
293                                  tcp_dport, ntohs(key->dst));
294                         break;
295
296                 case IPPROTO_UDP:
297                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
298                                  udp_sport, ntohs(mask->src));
299                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
300                                  udp_sport, ntohs(key->src));
301
302                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
303                                  udp_dport, ntohs(mask->dst));
304                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
305                                  udp_dport, ntohs(key->dst));
306                         break;
307                 default:
308                         netdev_err(priv->netdev,
309                                    "Only UDP and TCP transport are supported\n");
310                         return -EINVAL;
311                 }
312         }
313
314         return 0;
315 }
316
317 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
318                                 u32 *action, u32 *flow_tag)
319 {
320         const struct tc_action *a;
321         LIST_HEAD(actions);
322
323         if (tc_no_actions(exts))
324                 return -EINVAL;
325
326         *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
327         *action = 0;
328
329         tcf_exts_to_list(exts, &actions);
330         list_for_each_entry(a, &actions, list) {
331                 /* Only support a single action per rule */
332                 if (*action)
333                         return -EINVAL;
334
335                 if (is_tcf_gact_shot(a)) {
336                         *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
337                         if (MLX5_CAP_FLOWTABLE(priv->mdev,
338                                                flow_table_properties_nic_receive.flow_counter))
339                                 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
340                         continue;
341                 }
342
343                 if (is_tcf_skbedit_mark(a)) {
344                         u32 mark = tcf_skbedit_mark(a);
345
346                         if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
347                                 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
348                                             mark);
349                                 return -EINVAL;
350                         }
351
352                         *flow_tag = mark;
353                         *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
354                         continue;
355                 }
356
357                 return -EINVAL;
358         }
359
360         return 0;
361 }
362
363 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
364                                 u32 *action, u32 *dest_vport)
365 {
366         const struct tc_action *a;
367         LIST_HEAD(actions);
368
369         if (tc_no_actions(exts))
370                 return -EINVAL;
371
372         *action = 0;
373
374         tcf_exts_to_list(exts, &actions);
375         list_for_each_entry(a, &actions, list) {
376                 /* Only support a single action per rule */
377                 if (*action)
378                         return -EINVAL;
379
380                 if (is_tcf_gact_shot(a)) {
381                         *action = MLX5_FLOW_CONTEXT_ACTION_DROP |
382                                   MLX5_FLOW_CONTEXT_ACTION_COUNT;
383                         continue;
384                 }
385
386                 if (is_tcf_mirred_redirect(a)) {
387                         int ifindex = tcf_mirred_ifindex(a);
388                         struct net_device *out_dev;
389                         struct mlx5e_priv *out_priv;
390                         struct mlx5_eswitch_rep *out_rep;
391
392                         out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
393
394                         if (!switchdev_port_same_parent_id(priv->netdev, out_dev)) {
395                                 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
396                                        priv->netdev->name, out_dev->name);
397                                 return -EINVAL;
398                         }
399
400                         out_priv = netdev_priv(out_dev);
401                         out_rep  = out_priv->ppriv;
402                         if (out_rep->vport == 0)
403                                 *dest_vport = FDB_UPLINK_VPORT;
404                         else
405                                 *dest_vport = out_rep->vport;
406                         *action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
407                         continue;
408                 }
409
410                 return -EINVAL;
411         }
412         return 0;
413 }
414
415 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
416                            struct tc_cls_flower_offload *f)
417 {
418         struct mlx5e_tc_table *tc = &priv->fs.tc;
419         int err = 0;
420         u32 flow_tag, action, dest_vport = 0;
421         struct mlx5e_tc_flow *flow;
422         struct mlx5_flow_spec *spec;
423         struct mlx5_flow_rule *old = NULL;
424         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
425
426         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
427                                       tc->ht_params);
428         if (flow)
429                 old = flow->rule;
430         else
431                 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
432
433         spec = mlx5_vzalloc(sizeof(*spec));
434         if (!spec || !flow) {
435                 err = -ENOMEM;
436                 goto err_free;
437         }
438
439         flow->cookie = f->cookie;
440
441         err = parse_cls_flower(priv, spec, f);
442         if (err < 0)
443                 goto err_free;
444
445         if (esw && esw->mode == SRIOV_OFFLOADS) {
446                 err = parse_tc_fdb_actions(priv, f->exts, &action, &dest_vport);
447                 if (err < 0)
448                         goto err_free;
449                 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, action, dest_vport);
450         } else {
451                 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
452                 if (err < 0)
453                         goto err_free;
454                 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
455         }
456
457         if (IS_ERR(flow->rule)) {
458                 err = PTR_ERR(flow->rule);
459                 goto err_free;
460         }
461
462         err = rhashtable_insert_fast(&tc->ht, &flow->node,
463                                      tc->ht_params);
464         if (err)
465                 goto err_del_rule;
466
467         if (old)
468                 mlx5e_tc_del_flow(priv, old);
469
470         goto out;
471
472 err_del_rule:
473         mlx5_del_flow_rule(flow->rule);
474
475 err_free:
476         if (!old)
477                 kfree(flow);
478 out:
479         kvfree(spec);
480         return err;
481 }
482
483 int mlx5e_delete_flower(struct mlx5e_priv *priv,
484                         struct tc_cls_flower_offload *f)
485 {
486         struct mlx5e_tc_flow *flow;
487         struct mlx5e_tc_table *tc = &priv->fs.tc;
488
489         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
490                                       tc->ht_params);
491         if (!flow)
492                 return -EINVAL;
493
494         rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
495
496         mlx5e_tc_del_flow(priv, flow->rule);
497
498         kfree(flow);
499
500         return 0;
501 }
502
503 int mlx5e_stats_flower(struct mlx5e_priv *priv,
504                        struct tc_cls_flower_offload *f)
505 {
506         struct mlx5e_tc_table *tc = &priv->fs.tc;
507         struct mlx5e_tc_flow *flow;
508         struct tc_action *a;
509         struct mlx5_fc *counter;
510         LIST_HEAD(actions);
511         u64 bytes;
512         u64 packets;
513         u64 lastuse;
514
515         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
516                                       tc->ht_params);
517         if (!flow)
518                 return -EINVAL;
519
520         counter = mlx5_flow_rule_counter(flow->rule);
521         if (!counter)
522                 return 0;
523
524         mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
525
526         tcf_exts_to_list(f->exts, &actions);
527         list_for_each_entry(a, &actions, list)
528                 tcf_action_stats_update(a, bytes, packets, lastuse);
529
530         return 0;
531 }
532
533 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
534         .head_offset = offsetof(struct mlx5e_tc_flow, node),
535         .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
536         .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
537         .automatic_shrinking = true,
538 };
539
540 int mlx5e_tc_init(struct mlx5e_priv *priv)
541 {
542         struct mlx5e_tc_table *tc = &priv->fs.tc;
543
544         tc->ht_params = mlx5e_tc_flow_ht_params;
545         return rhashtable_init(&tc->ht, &tc->ht_params);
546 }
547
548 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
549 {
550         struct mlx5e_tc_flow *flow = ptr;
551         struct mlx5e_priv *priv = arg;
552
553         mlx5e_tc_del_flow(priv, flow->rule);
554         kfree(flow);
555 }
556
557 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
558 {
559         struct mlx5e_tc_table *tc = &priv->fs.tc;
560
561         rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
562
563         if (!IS_ERR_OR_NULL(tc->t)) {
564                 mlx5_destroy_flow_table(tc->t);
565                 tc->t = NULL;
566         }
567 }