net/mlx5: Add user chosen levels when allocating flow tables
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_fs.c
1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/list.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
38 #include "en.h"
39
40 enum {
41         MLX5E_VLAN_FT_LEVEL = 0,
42         MLX5E_MAIN_FT_LEVEL
43 };
44
45 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
46
47 enum {
48         MLX5E_FULLMATCH = 0,
49         MLX5E_ALLMULTI  = 1,
50         MLX5E_PROMISC   = 2,
51 };
52
53 enum {
54         MLX5E_UC        = 0,
55         MLX5E_MC_IPV4   = 1,
56         MLX5E_MC_IPV6   = 2,
57         MLX5E_MC_OTHER  = 3,
58 };
59
60 enum {
61         MLX5E_ACTION_NONE = 0,
62         MLX5E_ACTION_ADD  = 1,
63         MLX5E_ACTION_DEL  = 2,
64 };
65
66 struct mlx5e_eth_addr_hash_node {
67         struct hlist_node          hlist;
68         u8                         action;
69         struct mlx5e_eth_addr_info ai;
70 };
71
72 static inline int mlx5e_hash_eth_addr(u8 *addr)
73 {
74         return addr[5];
75 }
76
77 static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
78 {
79         struct mlx5e_eth_addr_hash_node *hn;
80         int ix = mlx5e_hash_eth_addr(addr);
81         int found = 0;
82
83         hlist_for_each_entry(hn, &hash[ix], hlist)
84                 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
85                         found = 1;
86                         break;
87                 }
88
89         if (found) {
90                 hn->action = MLX5E_ACTION_NONE;
91                 return;
92         }
93
94         hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
95         if (!hn)
96                 return;
97
98         ether_addr_copy(hn->ai.addr, addr);
99         hn->action = MLX5E_ACTION_ADD;
100
101         hlist_add_head(&hn->hlist, &hash[ix]);
102 }
103
104 static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
105 {
106         hlist_del(&hn->hlist);
107         kfree(hn);
108 }
109
110 static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
111                                                struct mlx5e_eth_addr_info *ai)
112 {
113         if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
114                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
115
116         if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
117                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
118
119         if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
120                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
121
122         if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
123                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
124
125         if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
126                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
127
128         if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
129                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
130
131         if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
132                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
133
134         if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
135                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
136
137         if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
138                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
139
140         if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
141                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
142
143         if (ai->tt_vec & BIT(MLX5E_TT_ANY))
144                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
145 }
146
147 static int mlx5e_get_eth_addr_type(u8 *addr)
148 {
149         if (is_unicast_ether_addr(addr))
150                 return MLX5E_UC;
151
152         if ((addr[0] == 0x01) &&
153             (addr[1] == 0x00) &&
154             (addr[2] == 0x5e) &&
155            !(addr[3] &  0x80))
156                 return MLX5E_MC_IPV4;
157
158         if ((addr[0] == 0x33) &&
159             (addr[1] == 0x33))
160                 return MLX5E_MC_IPV6;
161
162         return MLX5E_MC_OTHER;
163 }
164
165 static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
166 {
167         int eth_addr_type;
168         u32 ret;
169
170         switch (type) {
171         case MLX5E_FULLMATCH:
172                 eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
173                 switch (eth_addr_type) {
174                 case MLX5E_UC:
175                         ret =
176                                 BIT(MLX5E_TT_IPV4_TCP)       |
177                                 BIT(MLX5E_TT_IPV6_TCP)       |
178                                 BIT(MLX5E_TT_IPV4_UDP)       |
179                                 BIT(MLX5E_TT_IPV6_UDP)       |
180                                 BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
181                                 BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
182                                 BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
183                                 BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
184                                 BIT(MLX5E_TT_IPV4)           |
185                                 BIT(MLX5E_TT_IPV6)           |
186                                 BIT(MLX5E_TT_ANY)            |
187                                 0;
188                         break;
189
190                 case MLX5E_MC_IPV4:
191                         ret =
192                                 BIT(MLX5E_TT_IPV4_UDP)       |
193                                 BIT(MLX5E_TT_IPV4)           |
194                                 0;
195                         break;
196
197                 case MLX5E_MC_IPV6:
198                         ret =
199                                 BIT(MLX5E_TT_IPV6_UDP)       |
200                                 BIT(MLX5E_TT_IPV6)           |
201                                 0;
202                         break;
203
204                 case MLX5E_MC_OTHER:
205                         ret =
206                                 BIT(MLX5E_TT_ANY)            |
207                                 0;
208                         break;
209                 }
210
211                 break;
212
213         case MLX5E_ALLMULTI:
214                 ret =
215                         BIT(MLX5E_TT_IPV4_UDP) |
216                         BIT(MLX5E_TT_IPV6_UDP) |
217                         BIT(MLX5E_TT_IPV4)     |
218                         BIT(MLX5E_TT_IPV6)     |
219                         BIT(MLX5E_TT_ANY)      |
220                         0;
221                 break;
222
223         default: /* MLX5E_PROMISC */
224                 ret =
225                         BIT(MLX5E_TT_IPV4_TCP)       |
226                         BIT(MLX5E_TT_IPV6_TCP)       |
227                         BIT(MLX5E_TT_IPV4_UDP)       |
228                         BIT(MLX5E_TT_IPV6_UDP)       |
229                         BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
230                         BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
231                         BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
232                         BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
233                         BIT(MLX5E_TT_IPV4)           |
234                         BIT(MLX5E_TT_IPV6)           |
235                         BIT(MLX5E_TT_ANY)            |
236                         0;
237                 break;
238         }
239
240         return ret;
241 }
242
243 static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
244                                      struct mlx5e_eth_addr_info *ai,
245                                      int type, u32 *mc, u32 *mv)
246 {
247         struct mlx5_flow_destination dest;
248         u8 match_criteria_enable = 0;
249         struct mlx5_flow_rule **rule_p;
250         struct mlx5_flow_table *ft = priv->fts.main.t;
251         u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
252                                    outer_headers.dmac_47_16);
253         u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
254                                    outer_headers.dmac_47_16);
255         u32 *tirn = priv->indir_tirn;
256         u32 tt_vec;
257         int err = 0;
258
259         dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
260
261         switch (type) {
262         case MLX5E_FULLMATCH:
263                 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
264                 eth_broadcast_addr(mc_dmac);
265                 ether_addr_copy(mv_dmac, ai->addr);
266                 break;
267
268         case MLX5E_ALLMULTI:
269                 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
270                 mc_dmac[0] = 0x01;
271                 mv_dmac[0] = 0x01;
272                 break;
273
274         case MLX5E_PROMISC:
275                 break;
276         }
277
278         tt_vec = mlx5e_get_tt_vec(ai, type);
279
280         if (tt_vec & BIT(MLX5E_TT_ANY)) {
281                 rule_p = &ai->ft_rule[MLX5E_TT_ANY];
282                 dest.tir_num = priv->direct_tir[0].tirn;
283                 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
284                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
285                                              MLX5_FS_DEFAULT_FLOW_TAG, &dest);
286                 if (IS_ERR_OR_NULL(*rule_p))
287                         goto err_del_ai;
288                 ai->tt_vec |= BIT(MLX5E_TT_ANY);
289         }
290
291         match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
292         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
293
294         if (tt_vec & BIT(MLX5E_TT_IPV4)) {
295                 rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
296                 dest.tir_num = tirn[MLX5E_TT_IPV4];
297                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
298                          ETH_P_IP);
299                 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
300                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
301                                              MLX5_FS_DEFAULT_FLOW_TAG, &dest);
302                 if (IS_ERR_OR_NULL(*rule_p))
303                         goto err_del_ai;
304                 ai->tt_vec |= BIT(MLX5E_TT_IPV4);
305         }
306
307         if (tt_vec & BIT(MLX5E_TT_IPV6)) {
308                 rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
309                 dest.tir_num = tirn[MLX5E_TT_IPV6];
310                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
311                          ETH_P_IPV6);
312                 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
313                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
314                                              MLX5_FS_DEFAULT_FLOW_TAG, &dest);
315                 if (IS_ERR_OR_NULL(*rule_p))
316                         goto err_del_ai;
317                 ai->tt_vec |= BIT(MLX5E_TT_IPV6);
318         }
319
320         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
321         MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
322
323         if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
324                 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
325                 dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
326                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
327                          ETH_P_IP);
328                 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
329                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
330                                              MLX5_FS_DEFAULT_FLOW_TAG, &dest);
331                 if (IS_ERR_OR_NULL(*rule_p))
332                         goto err_del_ai;
333                 ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
334         }
335
336         if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
337                 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
338                 dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
339                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
340                          ETH_P_IPV6);
341                 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
342                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
343                                              MLX5_FS_DEFAULT_FLOW_TAG, &dest);
344                 if (IS_ERR_OR_NULL(*rule_p))
345                         goto err_del_ai;
346                 ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
347         }
348
349         MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
350
351         if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
352                 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
353                 dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
354                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
355                          ETH_P_IP);
356                 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
357                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
358                                              MLX5_FS_DEFAULT_FLOW_TAG, &dest);
359                 if (IS_ERR_OR_NULL(*rule_p))
360                         goto err_del_ai;
361                 ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
362         }
363
364         if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
365                 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
366                 dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
367                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
368                          ETH_P_IPV6);
369                 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
370                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
371                                              MLX5_FS_DEFAULT_FLOW_TAG, &dest);
372                 if (IS_ERR_OR_NULL(*rule_p))
373                         goto err_del_ai;
374
375                 ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
376         }
377
378         MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
379
380         if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
381                 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
382                 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
383                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
384                          ETH_P_IP);
385                 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
386                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
387                                              MLX5_FS_DEFAULT_FLOW_TAG, &dest);
388                 if (IS_ERR_OR_NULL(*rule_p))
389                         goto err_del_ai;
390                 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
391         }
392
393         if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
394                 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
395                 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
396                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
397                          ETH_P_IPV6);
398                 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
399                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
400                                              MLX5_FS_DEFAULT_FLOW_TAG, &dest);
401                 if (IS_ERR_OR_NULL(*rule_p))
402                         goto err_del_ai;
403                 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
404         }
405
406         MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
407
408         if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
409                 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
410                 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
411                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
412                          ETH_P_IP);
413                 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
414                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
415                                              MLX5_FS_DEFAULT_FLOW_TAG, &dest);
416                 if (IS_ERR_OR_NULL(*rule_p))
417                         goto err_del_ai;
418                 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
419         }
420
421         if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
422                 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
423                 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
424                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
425                          ETH_P_IPV6);
426                 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
427                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
428                                              MLX5_FS_DEFAULT_FLOW_TAG, &dest);
429                 if (IS_ERR_OR_NULL(*rule_p))
430                         goto err_del_ai;
431                 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
432         }
433
434         return 0;
435
436 err_del_ai:
437         err = PTR_ERR(*rule_p);
438         *rule_p = NULL;
439         mlx5e_del_eth_addr_from_flow_table(priv, ai);
440
441         return err;
442 }
443
444 static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
445                                    struct mlx5e_eth_addr_info *ai, int type)
446 {
447         u32 *match_criteria;
448         u32 *match_value;
449         int err = 0;
450
451         match_value     = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
452         match_criteria  = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
453         if (!match_value || !match_criteria) {
454                 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
455                 err = -ENOMEM;
456                 goto add_eth_addr_rule_out;
457         }
458
459         err = __mlx5e_add_eth_addr_rule(priv, ai, type, match_criteria,
460                                         match_value);
461
462 add_eth_addr_rule_out:
463         kvfree(match_criteria);
464         kvfree(match_value);
465
466         return err;
467 }
468
469 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
470 {
471         struct net_device *ndev = priv->netdev;
472         int max_list_size;
473         int list_size;
474         u16 *vlans;
475         int vlan;
476         int err;
477         int i;
478
479         list_size = 0;
480         for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
481                 list_size++;
482
483         max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
484
485         if (list_size > max_list_size) {
486                 netdev_warn(ndev,
487                             "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
488                             list_size, max_list_size);
489                 list_size = max_list_size;
490         }
491
492         vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
493         if (!vlans)
494                 return -ENOMEM;
495
496         i = 0;
497         for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
498                 if (i >= list_size)
499                         break;
500                 vlans[i++] = vlan;
501         }
502
503         err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
504         if (err)
505                 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
506                            err);
507
508         kfree(vlans);
509         return err;
510 }
511
512 enum mlx5e_vlan_rule_type {
513         MLX5E_VLAN_RULE_TYPE_UNTAGGED,
514         MLX5E_VLAN_RULE_TYPE_ANY_VID,
515         MLX5E_VLAN_RULE_TYPE_MATCH_VID,
516 };
517
518 static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
519                                  enum mlx5e_vlan_rule_type rule_type,
520                                  u16 vid, u32 *mc, u32 *mv)
521 {
522         struct mlx5_flow_table *ft = priv->fts.vlan.t;
523         struct mlx5_flow_destination dest;
524         u8 match_criteria_enable = 0;
525         struct mlx5_flow_rule **rule_p;
526         int err = 0;
527
528         dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
529         dest.ft = priv->fts.main.t;
530
531         match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
532         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
533
534         switch (rule_type) {
535         case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
536                 rule_p = &priv->vlan.untagged_rule;
537                 break;
538         case MLX5E_VLAN_RULE_TYPE_ANY_VID:
539                 rule_p = &priv->vlan.any_vlan_rule;
540                 MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
541                 break;
542         default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
543                 rule_p = &priv->vlan.active_vlans_rule[vid];
544                 MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
545                 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
546                 MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
547                 break;
548         }
549
550         *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
551                                      MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
552                                      MLX5_FS_DEFAULT_FLOW_TAG,
553                                      &dest);
554
555         if (IS_ERR(*rule_p)) {
556                 err = PTR_ERR(*rule_p);
557                 *rule_p = NULL;
558                 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
559         }
560
561         return err;
562 }
563
564 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
565                                enum mlx5e_vlan_rule_type rule_type, u16 vid)
566 {
567         u32 *match_criteria;
568         u32 *match_value;
569         int err = 0;
570
571         match_value     = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
572         match_criteria  = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
573         if (!match_value || !match_criteria) {
574                 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
575                 err = -ENOMEM;
576                 goto add_vlan_rule_out;
577         }
578
579         if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
580                 mlx5e_vport_context_update_vlans(priv);
581
582         err = __mlx5e_add_vlan_rule(priv, rule_type, vid, match_criteria,
583                                     match_value);
584
585 add_vlan_rule_out:
586         kvfree(match_criteria);
587         kvfree(match_value);
588
589         return err;
590 }
591
592 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
593                                 enum mlx5e_vlan_rule_type rule_type, u16 vid)
594 {
595         switch (rule_type) {
596         case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
597                 if (priv->vlan.untagged_rule) {
598                         mlx5_del_flow_rule(priv->vlan.untagged_rule);
599                         priv->vlan.untagged_rule = NULL;
600                 }
601                 break;
602         case MLX5E_VLAN_RULE_TYPE_ANY_VID:
603                 if (priv->vlan.any_vlan_rule) {
604                         mlx5_del_flow_rule(priv->vlan.any_vlan_rule);
605                         priv->vlan.any_vlan_rule = NULL;
606                 }
607                 break;
608         case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
609                 mlx5e_vport_context_update_vlans(priv);
610                 if (priv->vlan.active_vlans_rule[vid]) {
611                         mlx5_del_flow_rule(priv->vlan.active_vlans_rule[vid]);
612                         priv->vlan.active_vlans_rule[vid] = NULL;
613                 }
614                 mlx5e_vport_context_update_vlans(priv);
615                 break;
616         }
617 }
618
619 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
620 {
621         if (!priv->vlan.filter_disabled)
622                 return;
623
624         priv->vlan.filter_disabled = false;
625         if (priv->netdev->flags & IFF_PROMISC)
626                 return;
627         mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
628 }
629
630 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
631 {
632         if (priv->vlan.filter_disabled)
633                 return;
634
635         priv->vlan.filter_disabled = true;
636         if (priv->netdev->flags & IFF_PROMISC)
637                 return;
638         mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
639 }
640
641 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
642                           u16 vid)
643 {
644         struct mlx5e_priv *priv = netdev_priv(dev);
645
646         set_bit(vid, priv->vlan.active_vlans);
647
648         return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
649 }
650
651 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
652                            u16 vid)
653 {
654         struct mlx5e_priv *priv = netdev_priv(dev);
655
656         clear_bit(vid, priv->vlan.active_vlans);
657
658         mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
659
660         return 0;
661 }
662
663 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
664         for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
665                 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
666
667 static void mlx5e_execute_action(struct mlx5e_priv *priv,
668                                  struct mlx5e_eth_addr_hash_node *hn)
669 {
670         switch (hn->action) {
671         case MLX5E_ACTION_ADD:
672                 mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
673                 hn->action = MLX5E_ACTION_NONE;
674                 break;
675
676         case MLX5E_ACTION_DEL:
677                 mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
678                 mlx5e_del_eth_addr_from_hash(hn);
679                 break;
680         }
681 }
682
683 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
684 {
685         struct net_device *netdev = priv->netdev;
686         struct netdev_hw_addr *ha;
687
688         netif_addr_lock_bh(netdev);
689
690         mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
691                                    priv->netdev->dev_addr);
692
693         netdev_for_each_uc_addr(ha, netdev)
694                 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
695
696         netdev_for_each_mc_addr(ha, netdev)
697                 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
698
699         netif_addr_unlock_bh(netdev);
700 }
701
702 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
703                                   u8 addr_array[][ETH_ALEN], int size)
704 {
705         bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
706         struct net_device *ndev = priv->netdev;
707         struct mlx5e_eth_addr_hash_node *hn;
708         struct hlist_head *addr_list;
709         struct hlist_node *tmp;
710         int i = 0;
711         int hi;
712
713         addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
714
715         if (is_uc) /* Make sure our own address is pushed first */
716                 ether_addr_copy(addr_array[i++], ndev->dev_addr);
717         else if (priv->eth_addr.broadcast_enabled)
718                 ether_addr_copy(addr_array[i++], ndev->broadcast);
719
720         mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
721                 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
722                         continue;
723                 if (i >= size)
724                         break;
725                 ether_addr_copy(addr_array[i++], hn->ai.addr);
726         }
727 }
728
729 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
730                                                  int list_type)
731 {
732         bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
733         struct mlx5e_eth_addr_hash_node *hn;
734         u8 (*addr_array)[ETH_ALEN] = NULL;
735         struct hlist_head *addr_list;
736         struct hlist_node *tmp;
737         int max_size;
738         int size;
739         int err;
740         int hi;
741
742         size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
743         max_size = is_uc ?
744                 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
745                 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
746
747         addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
748         mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
749                 size++;
750
751         if (size > max_size) {
752                 netdev_warn(priv->netdev,
753                             "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
754                             is_uc ? "UC" : "MC", size, max_size);
755                 size = max_size;
756         }
757
758         if (size) {
759                 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
760                 if (!addr_array) {
761                         err = -ENOMEM;
762                         goto out;
763                 }
764                 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
765         }
766
767         err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
768 out:
769         if (err)
770                 netdev_err(priv->netdev,
771                            "Failed to modify vport %s list err(%d)\n",
772                            is_uc ? "UC" : "MC", err);
773         kfree(addr_array);
774 }
775
776 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
777 {
778         struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
779
780         mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
781         mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
782         mlx5_modify_nic_vport_promisc(priv->mdev, 0,
783                                       ea->allmulti_enabled,
784                                       ea->promisc_enabled);
785 }
786
787 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
788 {
789         struct mlx5e_eth_addr_hash_node *hn;
790         struct hlist_node *tmp;
791         int i;
792
793         mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
794                 mlx5e_execute_action(priv, hn);
795
796         mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
797                 mlx5e_execute_action(priv, hn);
798 }
799
800 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
801 {
802         struct mlx5e_eth_addr_hash_node *hn;
803         struct hlist_node *tmp;
804         int i;
805
806         mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
807                 hn->action = MLX5E_ACTION_DEL;
808         mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
809                 hn->action = MLX5E_ACTION_DEL;
810
811         if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
812                 mlx5e_sync_netdev_addr(priv);
813
814         mlx5e_apply_netdev_addr(priv);
815 }
816
817 void mlx5e_set_rx_mode_work(struct work_struct *work)
818 {
819         struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
820                                                set_rx_mode_work);
821
822         struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
823         struct net_device *ndev = priv->netdev;
824
825         bool rx_mode_enable   = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
826         bool promisc_enabled   = rx_mode_enable && (ndev->flags & IFF_PROMISC);
827         bool allmulti_enabled  = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
828         bool broadcast_enabled = rx_mode_enable;
829
830         bool enable_promisc    = !ea->promisc_enabled   &&  promisc_enabled;
831         bool disable_promisc   =  ea->promisc_enabled   && !promisc_enabled;
832         bool enable_allmulti   = !ea->allmulti_enabled  &&  allmulti_enabled;
833         bool disable_allmulti  =  ea->allmulti_enabled  && !allmulti_enabled;
834         bool enable_broadcast  = !ea->broadcast_enabled &&  broadcast_enabled;
835         bool disable_broadcast =  ea->broadcast_enabled && !broadcast_enabled;
836
837         if (enable_promisc) {
838                 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
839                 if (!priv->vlan.filter_disabled)
840                         mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
841                                             0);
842         }
843         if (enable_allmulti)
844                 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
845         if (enable_broadcast)
846                 mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
847
848         mlx5e_handle_netdev_addr(priv);
849
850         if (disable_broadcast)
851                 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
852         if (disable_allmulti)
853                 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
854         if (disable_promisc) {
855                 if (!priv->vlan.filter_disabled)
856                         mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
857                                             0);
858                 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
859         }
860
861         ea->promisc_enabled   = promisc_enabled;
862         ea->allmulti_enabled  = allmulti_enabled;
863         ea->broadcast_enabled = broadcast_enabled;
864
865         mlx5e_vport_context_update(priv);
866 }
867
868 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
869 {
870         int i;
871
872         for (i = ft->num_groups - 1; i >= 0; i--) {
873                 if (!IS_ERR_OR_NULL(ft->g[i]))
874                         mlx5_destroy_flow_group(ft->g[i]);
875                 ft->g[i] = NULL;
876         }
877         ft->num_groups = 0;
878 }
879
880 void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
881 {
882         ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
883 }
884
885 #define MLX5E_MAIN_GROUP0_SIZE  BIT(3)
886 #define MLX5E_MAIN_GROUP1_SIZE  BIT(1)
887 #define MLX5E_MAIN_GROUP2_SIZE  BIT(0)
888 #define MLX5E_MAIN_GROUP3_SIZE  BIT(14)
889 #define MLX5E_MAIN_GROUP4_SIZE  BIT(13)
890 #define MLX5E_MAIN_GROUP5_SIZE  BIT(11)
891 #define MLX5E_MAIN_GROUP6_SIZE  BIT(2)
892 #define MLX5E_MAIN_GROUP7_SIZE  BIT(1)
893 #define MLX5E_MAIN_GROUP8_SIZE  BIT(0)
894 #define MLX5E_MAIN_TABLE_SIZE   (MLX5E_MAIN_GROUP0_SIZE +\
895                                  MLX5E_MAIN_GROUP1_SIZE +\
896                                  MLX5E_MAIN_GROUP2_SIZE +\
897                                  MLX5E_MAIN_GROUP3_SIZE +\
898                                  MLX5E_MAIN_GROUP4_SIZE +\
899                                  MLX5E_MAIN_GROUP5_SIZE +\
900                                  MLX5E_MAIN_GROUP6_SIZE +\
901                                  MLX5E_MAIN_GROUP7_SIZE +\
902                                  MLX5E_MAIN_GROUP8_SIZE)
903
904 static int __mlx5e_create_main_groups(struct mlx5e_flow_table *ft, u32 *in,
905                                       int inlen)
906 {
907         u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
908         u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
909                                 match_criteria.outer_headers.dmac_47_16);
910         int err;
911         int ix = 0;
912
913         memset(in, 0, inlen);
914         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
915         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
916         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
917         MLX5_SET_CFG(in, start_flow_index, ix);
918         ix += MLX5E_MAIN_GROUP0_SIZE;
919         MLX5_SET_CFG(in, end_flow_index, ix - 1);
920         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
921         if (IS_ERR(ft->g[ft->num_groups]))
922                 goto err_destroy_groups;
923         ft->num_groups++;
924
925         memset(in, 0, inlen);
926         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
927         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
928         MLX5_SET_CFG(in, start_flow_index, ix);
929         ix += MLX5E_MAIN_GROUP1_SIZE;
930         MLX5_SET_CFG(in, end_flow_index, ix - 1);
931         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
932         if (IS_ERR(ft->g[ft->num_groups]))
933                 goto err_destroy_groups;
934         ft->num_groups++;
935
936         memset(in, 0, inlen);
937         MLX5_SET_CFG(in, start_flow_index, ix);
938         ix += MLX5E_MAIN_GROUP2_SIZE;
939         MLX5_SET_CFG(in, end_flow_index, ix - 1);
940         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
941         if (IS_ERR(ft->g[ft->num_groups]))
942                 goto err_destroy_groups;
943         ft->num_groups++;
944
945         memset(in, 0, inlen);
946         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
947         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
948         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
949         eth_broadcast_addr(dmac);
950         MLX5_SET_CFG(in, start_flow_index, ix);
951         ix += MLX5E_MAIN_GROUP3_SIZE;
952         MLX5_SET_CFG(in, end_flow_index, ix - 1);
953         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
954         if (IS_ERR(ft->g[ft->num_groups]))
955                 goto err_destroy_groups;
956         ft->num_groups++;
957
958         memset(in, 0, inlen);
959         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
960         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
961         eth_broadcast_addr(dmac);
962         MLX5_SET_CFG(in, start_flow_index, ix);
963         ix += MLX5E_MAIN_GROUP4_SIZE;
964         MLX5_SET_CFG(in, end_flow_index, ix - 1);
965         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
966         if (IS_ERR(ft->g[ft->num_groups]))
967                 goto err_destroy_groups;
968         ft->num_groups++;
969
970         memset(in, 0, inlen);
971         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
972         eth_broadcast_addr(dmac);
973         MLX5_SET_CFG(in, start_flow_index, ix);
974         ix += MLX5E_MAIN_GROUP5_SIZE;
975         MLX5_SET_CFG(in, end_flow_index, ix - 1);
976         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
977         if (IS_ERR(ft->g[ft->num_groups]))
978                 goto err_destroy_groups;
979         ft->num_groups++;
980
981         memset(in, 0, inlen);
982         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
983         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
984         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
985         dmac[0] = 0x01;
986         MLX5_SET_CFG(in, start_flow_index, ix);
987         ix += MLX5E_MAIN_GROUP6_SIZE;
988         MLX5_SET_CFG(in, end_flow_index, ix - 1);
989         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
990         if (IS_ERR(ft->g[ft->num_groups]))
991                 goto err_destroy_groups;
992         ft->num_groups++;
993
994         memset(in, 0, inlen);
995         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
996         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
997         dmac[0] = 0x01;
998         MLX5_SET_CFG(in, start_flow_index, ix);
999         ix += MLX5E_MAIN_GROUP7_SIZE;
1000         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1001         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1002         if (IS_ERR(ft->g[ft->num_groups]))
1003                 goto err_destroy_groups;
1004         ft->num_groups++;
1005
1006         memset(in, 0, inlen);
1007         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1008         dmac[0] = 0x01;
1009         MLX5_SET_CFG(in, start_flow_index, ix);
1010         ix += MLX5E_MAIN_GROUP8_SIZE;
1011         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1012         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1013         if (IS_ERR(ft->g[ft->num_groups]))
1014                 goto err_destroy_groups;
1015         ft->num_groups++;
1016
1017         return 0;
1018
1019 err_destroy_groups:
1020         err = PTR_ERR(ft->g[ft->num_groups]);
1021         ft->g[ft->num_groups] = NULL;
1022         mlx5e_destroy_groups(ft);
1023
1024         return err;
1025 }
1026
1027 static int mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
1028 {
1029         u32 *in;
1030         int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1031         int err;
1032
1033         in = mlx5_vzalloc(inlen);
1034         if (!in)
1035                 return -ENOMEM;
1036
1037         err = __mlx5e_create_main_groups(ft, in, inlen);
1038
1039         kvfree(in);
1040         return err;
1041 }
1042
1043 static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
1044 {
1045         struct mlx5e_flow_table *ft = &priv->fts.main;
1046         int err;
1047
1048         ft->num_groups = 0;
1049         ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_MAIN_TABLE_SIZE,
1050                                        MLX5E_MAIN_FT_LEVEL);
1051
1052         if (IS_ERR(ft->t)) {
1053                 err = PTR_ERR(ft->t);
1054                 ft->t = NULL;
1055                 return err;
1056         }
1057         ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1058         if (!ft->g) {
1059                 err = -ENOMEM;
1060                 goto err_destroy_main_flow_table;
1061         }
1062
1063         err = mlx5e_create_main_groups(ft);
1064         if (err)
1065                 goto err_free_g;
1066         return 0;
1067
1068 err_free_g:
1069         kfree(ft->g);
1070
1071 err_destroy_main_flow_table:
1072         mlx5_destroy_flow_table(ft->t);
1073         ft->t = NULL;
1074
1075         return err;
1076 }
1077
1078 static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
1079 {
1080         mlx5e_destroy_groups(ft);
1081         kfree(ft->g);
1082         mlx5_destroy_flow_table(ft->t);
1083         ft->t = NULL;
1084 }
1085
1086 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
1087 {
1088         mlx5e_destroy_flow_table(&priv->fts.main);
1089 }
1090
1091 #define MLX5E_NUM_VLAN_GROUPS   2
1092 #define MLX5E_VLAN_GROUP0_SIZE  BIT(12)
1093 #define MLX5E_VLAN_GROUP1_SIZE  BIT(1)
1094 #define MLX5E_VLAN_TABLE_SIZE   (MLX5E_VLAN_GROUP0_SIZE +\
1095                                  MLX5E_VLAN_GROUP1_SIZE)
1096
1097 static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in,
1098                                       int inlen)
1099 {
1100         int err;
1101         int ix = 0;
1102         u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1103
1104         memset(in, 0, inlen);
1105         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1106         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
1107         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1108         MLX5_SET_CFG(in, start_flow_index, ix);
1109         ix += MLX5E_VLAN_GROUP0_SIZE;
1110         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1111         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1112         if (IS_ERR(ft->g[ft->num_groups]))
1113                 goto err_destroy_groups;
1114         ft->num_groups++;
1115
1116         memset(in, 0, inlen);
1117         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1118         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
1119         MLX5_SET_CFG(in, start_flow_index, ix);
1120         ix += MLX5E_VLAN_GROUP1_SIZE;
1121         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1122         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1123         if (IS_ERR(ft->g[ft->num_groups]))
1124                 goto err_destroy_groups;
1125         ft->num_groups++;
1126
1127         return 0;
1128
1129 err_destroy_groups:
1130         err = PTR_ERR(ft->g[ft->num_groups]);
1131         ft->g[ft->num_groups] = NULL;
1132         mlx5e_destroy_groups(ft);
1133
1134         return err;
1135 }
1136
1137 static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
1138 {
1139         u32 *in;
1140         int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1141         int err;
1142
1143         in = mlx5_vzalloc(inlen);
1144         if (!in)
1145                 return -ENOMEM;
1146
1147         err = __mlx5e_create_vlan_groups(ft, in, inlen);
1148
1149         kvfree(in);
1150         return err;
1151 }
1152
1153 static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
1154 {
1155         struct mlx5e_flow_table *ft = &priv->fts.vlan;
1156         int err;
1157
1158         ft->num_groups = 0;
1159         ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_VLAN_TABLE_SIZE,
1160                                        MLX5E_VLAN_FT_LEVEL);
1161
1162         if (IS_ERR(ft->t)) {
1163                 err = PTR_ERR(ft->t);
1164                 ft->t = NULL;
1165                 return err;
1166         }
1167         ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1168         if (!ft->g) {
1169                 err = -ENOMEM;
1170                 goto err_destroy_vlan_flow_table;
1171         }
1172
1173         err = mlx5e_create_vlan_groups(ft);
1174         if (err)
1175                 goto err_free_g;
1176
1177         err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
1178         if (err)
1179                 goto err_destroy_vlan_flow_groups;
1180
1181         return 0;
1182
1183 err_destroy_vlan_flow_groups:
1184         mlx5e_destroy_groups(ft);
1185 err_free_g:
1186         kfree(ft->g);
1187 err_destroy_vlan_flow_table:
1188         mlx5_destroy_flow_table(ft->t);
1189         ft->t = NULL;
1190
1191         return err;
1192 }
1193
1194 static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1195 {
1196         mlx5e_destroy_flow_table(&priv->fts.vlan);
1197 }
1198
1199 int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
1200 {
1201         int err;
1202
1203         priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
1204                                                MLX5_FLOW_NAMESPACE_KERNEL);
1205
1206         if (!priv->fts.ns)
1207                 return -EINVAL;
1208
1209         err = mlx5e_create_main_flow_table(priv);
1210         if (err)
1211                 return err;
1212
1213         err = mlx5e_create_vlan_flow_table(priv);
1214         if (err)
1215                 goto err_destroy_main_flow_table;
1216
1217         return 0;
1218
1219 err_destroy_main_flow_table:
1220         mlx5e_destroy_main_flow_table(priv);
1221
1222         return err;
1223 }
1224
1225 void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
1226 {
1227         mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
1228         mlx5e_destroy_vlan_flow_table(priv);
1229         mlx5e_destroy_main_flow_table(priv);
1230 }