gfs2: Initialize atime of I_NEW inodes
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/flow_dissector.h>
34 #include <net/pkt_cls.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_skbedit.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
40 #include <net/switchdev.h>
41 #include <net/tc_act/tc_mirred.h>
42 #include "en.h"
43 #include "en_tc.h"
44 #include "eswitch.h"
45
46 struct mlx5e_tc_flow {
47         struct rhash_head       node;
48         u64                     cookie;
49         struct mlx5_flow_rule   *rule;
50 };
51
52 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
53 #define MLX5E_TC_TABLE_NUM_GROUPS 4
54
55 static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
56                                                     struct mlx5_flow_spec *spec,
57                                                     u32 action, u32 flow_tag)
58 {
59         struct mlx5_core_dev *dev = priv->mdev;
60         struct mlx5_flow_destination dest = { 0 };
61         struct mlx5_fc *counter = NULL;
62         struct mlx5_flow_rule *rule;
63         bool table_created = false;
64
65         if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
66                 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
67                 dest.ft = priv->fs.vlan.ft.t;
68         } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
69                 counter = mlx5_fc_create(dev, true);
70                 if (IS_ERR(counter))
71                         return ERR_CAST(counter);
72
73                 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
74                 dest.counter = counter;
75         }
76
77         if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
78                 priv->fs.tc.t =
79                         mlx5_create_auto_grouped_flow_table(priv->fs.ns,
80                                                             MLX5E_TC_PRIO,
81                                                             MLX5E_TC_TABLE_NUM_ENTRIES,
82                                                             MLX5E_TC_TABLE_NUM_GROUPS,
83                                                             0);
84                 if (IS_ERR(priv->fs.tc.t)) {
85                         netdev_err(priv->netdev,
86                                    "Failed to create tc offload table\n");
87                         rule = ERR_CAST(priv->fs.tc.t);
88                         goto err_create_ft;
89                 }
90
91                 table_created = true;
92         }
93
94         spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
95         rule = mlx5_add_flow_rule(priv->fs.tc.t, spec,
96                                   action, flow_tag,
97                                   &dest);
98
99         if (IS_ERR(rule))
100                 goto err_add_rule;
101
102         return rule;
103
104 err_add_rule:
105         if (table_created) {
106                 mlx5_destroy_flow_table(priv->fs.tc.t);
107                 priv->fs.tc.t = NULL;
108         }
109 err_create_ft:
110         mlx5_fc_destroy(dev, counter);
111
112         return rule;
113 }
114
115 static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
116                                                     struct mlx5_flow_spec *spec,
117                                                     u32 action, u32 dst_vport)
118 {
119         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
120         struct mlx5_eswitch_rep *rep = priv->ppriv;
121         u32 src_vport;
122
123         if (rep->vport) /* set source vport for the flow */
124                 src_vport = rep->vport;
125         else
126                 src_vport = FDB_UPLINK_VPORT;
127
128         return mlx5_eswitch_add_offloaded_rule(esw, spec, action, src_vport, dst_vport);
129 }
130
131 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
132                               struct mlx5_flow_rule *rule)
133 {
134         struct mlx5_fc *counter = NULL;
135
136         counter = mlx5_flow_rule_counter(rule);
137
138         mlx5_del_flow_rule(rule);
139
140         mlx5_fc_destroy(priv->mdev, counter);
141
142         if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
143                 mlx5_destroy_flow_table(priv->fs.tc.t);
144                 priv->fs.tc.t = NULL;
145         }
146 }
147
148 static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
149                             struct tc_cls_flower_offload *f)
150 {
151         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
152                                        outer_headers);
153         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
154                                        outer_headers);
155         u16 addr_type = 0;
156         u8 ip_proto = 0;
157
158         if (f->dissector->used_keys &
159             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
160               BIT(FLOW_DISSECTOR_KEY_BASIC) |
161               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
162               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
163               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
164               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
165                 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
166                             f->dissector->used_keys);
167                 return -EOPNOTSUPP;
168         }
169
170         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
171                 struct flow_dissector_key_control *key =
172                         skb_flow_dissector_target(f->dissector,
173                                                   FLOW_DISSECTOR_KEY_BASIC,
174                                                   f->key);
175                 addr_type = key->addr_type;
176         }
177
178         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
179                 struct flow_dissector_key_basic *key =
180                         skb_flow_dissector_target(f->dissector,
181                                                   FLOW_DISSECTOR_KEY_BASIC,
182                                                   f->key);
183                 struct flow_dissector_key_basic *mask =
184                         skb_flow_dissector_target(f->dissector,
185                                                   FLOW_DISSECTOR_KEY_BASIC,
186                                                   f->mask);
187                 ip_proto = key->ip_proto;
188
189                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
190                          ntohs(mask->n_proto));
191                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
192                          ntohs(key->n_proto));
193
194                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
195                          mask->ip_proto);
196                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
197                          key->ip_proto);
198         }
199
200         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
201                 struct flow_dissector_key_eth_addrs *key =
202                         skb_flow_dissector_target(f->dissector,
203                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
204                                                   f->key);
205                 struct flow_dissector_key_eth_addrs *mask =
206                         skb_flow_dissector_target(f->dissector,
207                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
208                                                   f->mask);
209
210                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
211                                              dmac_47_16),
212                                 mask->dst);
213                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
214                                              dmac_47_16),
215                                 key->dst);
216
217                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
218                                              smac_47_16),
219                                 mask->src);
220                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
221                                              smac_47_16),
222                                 key->src);
223         }
224
225         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
226                 struct flow_dissector_key_ipv4_addrs *key =
227                         skb_flow_dissector_target(f->dissector,
228                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
229                                                   f->key);
230                 struct flow_dissector_key_ipv4_addrs *mask =
231                         skb_flow_dissector_target(f->dissector,
232                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
233                                                   f->mask);
234
235                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
236                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
237                        &mask->src, sizeof(mask->src));
238                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
239                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
240                        &key->src, sizeof(key->src));
241                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
242                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
243                        &mask->dst, sizeof(mask->dst));
244                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
245                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
246                        &key->dst, sizeof(key->dst));
247         }
248
249         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
250                 struct flow_dissector_key_ipv6_addrs *key =
251                         skb_flow_dissector_target(f->dissector,
252                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
253                                                   f->key);
254                 struct flow_dissector_key_ipv6_addrs *mask =
255                         skb_flow_dissector_target(f->dissector,
256                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
257                                                   f->mask);
258
259                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
260                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
261                        &mask->src, sizeof(mask->src));
262                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
263                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
264                        &key->src, sizeof(key->src));
265
266                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
267                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
268                        &mask->dst, sizeof(mask->dst));
269                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
270                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
271                        &key->dst, sizeof(key->dst));
272         }
273
274         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
275                 struct flow_dissector_key_ports *key =
276                         skb_flow_dissector_target(f->dissector,
277                                                   FLOW_DISSECTOR_KEY_PORTS,
278                                                   f->key);
279                 struct flow_dissector_key_ports *mask =
280                         skb_flow_dissector_target(f->dissector,
281                                                   FLOW_DISSECTOR_KEY_PORTS,
282                                                   f->mask);
283                 switch (ip_proto) {
284                 case IPPROTO_TCP:
285                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
286                                  tcp_sport, ntohs(mask->src));
287                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
288                                  tcp_sport, ntohs(key->src));
289
290                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
291                                  tcp_dport, ntohs(mask->dst));
292                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
293                                  tcp_dport, ntohs(key->dst));
294                         break;
295
296                 case IPPROTO_UDP:
297                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
298                                  udp_sport, ntohs(mask->src));
299                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
300                                  udp_sport, ntohs(key->src));
301
302                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
303                                  udp_dport, ntohs(mask->dst));
304                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
305                                  udp_dport, ntohs(key->dst));
306                         break;
307                 default:
308                         netdev_err(priv->netdev,
309                                    "Only UDP and TCP transport are supported\n");
310                         return -EINVAL;
311                 }
312         }
313
314         return 0;
315 }
316
317 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
318                                 u32 *action, u32 *flow_tag)
319 {
320         const struct tc_action *a;
321
322         if (tc_no_actions(exts))
323                 return -EINVAL;
324
325         *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
326         *action = 0;
327
328         tc_for_each_action(a, exts) {
329                 /* Only support a single action per rule */
330                 if (*action)
331                         return -EINVAL;
332
333                 if (is_tcf_gact_shot(a)) {
334                         *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
335                         if (MLX5_CAP_FLOWTABLE(priv->mdev,
336                                                flow_table_properties_nic_receive.flow_counter))
337                                 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
338                         continue;
339                 }
340
341                 if (is_tcf_skbedit_mark(a)) {
342                         u32 mark = tcf_skbedit_mark(a);
343
344                         if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
345                                 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
346                                             mark);
347                                 return -EINVAL;
348                         }
349
350                         *flow_tag = mark;
351                         *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
352                         continue;
353                 }
354
355                 return -EINVAL;
356         }
357
358         return 0;
359 }
360
361 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
362                                 u32 *action, u32 *dest_vport)
363 {
364         const struct tc_action *a;
365
366         if (tc_no_actions(exts))
367                 return -EINVAL;
368
369         *action = 0;
370
371         tc_for_each_action(a, exts) {
372                 /* Only support a single action per rule */
373                 if (*action)
374                         return -EINVAL;
375
376                 if (is_tcf_gact_shot(a)) {
377                         *action = MLX5_FLOW_CONTEXT_ACTION_DROP |
378                                   MLX5_FLOW_CONTEXT_ACTION_COUNT;
379                         continue;
380                 }
381
382                 if (is_tcf_mirred_redirect(a)) {
383                         int ifindex = tcf_mirred_ifindex(a);
384                         struct net_device *out_dev;
385                         struct mlx5e_priv *out_priv;
386                         struct mlx5_eswitch_rep *out_rep;
387
388                         out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
389
390                         if (!switchdev_port_same_parent_id(priv->netdev, out_dev)) {
391                                 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
392                                        priv->netdev->name, out_dev->name);
393                                 return -EINVAL;
394                         }
395
396                         out_priv = netdev_priv(out_dev);
397                         out_rep  = out_priv->ppriv;
398                         if (out_rep->vport == 0)
399                                 *dest_vport = FDB_UPLINK_VPORT;
400                         else
401                                 *dest_vport = out_rep->vport;
402                         *action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
403                         continue;
404                 }
405
406                 return -EINVAL;
407         }
408         return 0;
409 }
410
411 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
412                            struct tc_cls_flower_offload *f)
413 {
414         struct mlx5e_tc_table *tc = &priv->fs.tc;
415         int err = 0;
416         u32 flow_tag, action, dest_vport = 0;
417         struct mlx5e_tc_flow *flow;
418         struct mlx5_flow_spec *spec;
419         struct mlx5_flow_rule *old = NULL;
420         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
421
422         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
423                                       tc->ht_params);
424         if (flow)
425                 old = flow->rule;
426         else
427                 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
428
429         spec = mlx5_vzalloc(sizeof(*spec));
430         if (!spec || !flow) {
431                 err = -ENOMEM;
432                 goto err_free;
433         }
434
435         flow->cookie = f->cookie;
436
437         err = parse_cls_flower(priv, spec, f);
438         if (err < 0)
439                 goto err_free;
440
441         if (esw && esw->mode == SRIOV_OFFLOADS) {
442                 err = parse_tc_fdb_actions(priv, f->exts, &action, &dest_vport);
443                 if (err < 0)
444                         goto err_free;
445                 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, action, dest_vport);
446         } else {
447                 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
448                 if (err < 0)
449                         goto err_free;
450                 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
451         }
452
453         if (IS_ERR(flow->rule)) {
454                 err = PTR_ERR(flow->rule);
455                 goto err_free;
456         }
457
458         err = rhashtable_insert_fast(&tc->ht, &flow->node,
459                                      tc->ht_params);
460         if (err)
461                 goto err_del_rule;
462
463         if (old)
464                 mlx5e_tc_del_flow(priv, old);
465
466         goto out;
467
468 err_del_rule:
469         mlx5_del_flow_rule(flow->rule);
470
471 err_free:
472         if (!old)
473                 kfree(flow);
474 out:
475         kvfree(spec);
476         return err;
477 }
478
479 int mlx5e_delete_flower(struct mlx5e_priv *priv,
480                         struct tc_cls_flower_offload *f)
481 {
482         struct mlx5e_tc_flow *flow;
483         struct mlx5e_tc_table *tc = &priv->fs.tc;
484
485         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
486                                       tc->ht_params);
487         if (!flow)
488                 return -EINVAL;
489
490         rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
491
492         mlx5e_tc_del_flow(priv, flow->rule);
493
494         kfree(flow);
495
496         return 0;
497 }
498
499 int mlx5e_stats_flower(struct mlx5e_priv *priv,
500                        struct tc_cls_flower_offload *f)
501 {
502         struct mlx5e_tc_table *tc = &priv->fs.tc;
503         struct mlx5e_tc_flow *flow;
504         struct tc_action *a;
505         struct mlx5_fc *counter;
506         u64 bytes;
507         u64 packets;
508         u64 lastuse;
509
510         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
511                                       tc->ht_params);
512         if (!flow)
513                 return -EINVAL;
514
515         counter = mlx5_flow_rule_counter(flow->rule);
516         if (!counter)
517                 return 0;
518
519         mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
520
521         tc_for_each_action(a, f->exts)
522                 tcf_action_stats_update(a, bytes, packets, lastuse);
523
524         return 0;
525 }
526
527 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
528         .head_offset = offsetof(struct mlx5e_tc_flow, node),
529         .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
530         .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
531         .automatic_shrinking = true,
532 };
533
534 int mlx5e_tc_init(struct mlx5e_priv *priv)
535 {
536         struct mlx5e_tc_table *tc = &priv->fs.tc;
537
538         tc->ht_params = mlx5e_tc_flow_ht_params;
539         return rhashtable_init(&tc->ht, &tc->ht_params);
540 }
541
542 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
543 {
544         struct mlx5e_tc_flow *flow = ptr;
545         struct mlx5e_priv *priv = arg;
546
547         mlx5e_tc_del_flow(priv, flow->rule);
548         kfree(flow);
549 }
550
551 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
552 {
553         struct mlx5e_tc_table *tc = &priv->fs.tc;
554
555         rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
556
557         if (!IS_ERR_OR_NULL(tc->t)) {
558                 mlx5_destroy_flow_table(tc->t);
559                 tc->t = NULL;
560         }
561 }