2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/list.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
41 MLX5E_VLAN_FT_LEVEL = 0,
45 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
61 MLX5E_ACTION_NONE = 0,
66 struct mlx5e_eth_addr_hash_node {
67 struct hlist_node hlist;
69 struct mlx5e_eth_addr_info ai;
72 static inline int mlx5e_hash_eth_addr(u8 *addr)
77 static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
79 struct mlx5e_eth_addr_hash_node *hn;
80 int ix = mlx5e_hash_eth_addr(addr);
83 hlist_for_each_entry(hn, &hash[ix], hlist)
84 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
90 hn->action = MLX5E_ACTION_NONE;
94 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
98 ether_addr_copy(hn->ai.addr, addr);
99 hn->action = MLX5E_ACTION_ADD;
101 hlist_add_head(&hn->hlist, &hash[ix]);
104 static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
106 hlist_del(&hn->hlist);
110 static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
111 struct mlx5e_eth_addr_info *ai)
113 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
114 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
116 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
117 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
119 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
120 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
122 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
123 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
125 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
126 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
128 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
129 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
131 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
132 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
134 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
135 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
137 if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
138 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
140 if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
141 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
143 if (ai->tt_vec & BIT(MLX5E_TT_ANY))
144 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
147 static int mlx5e_get_eth_addr_type(u8 *addr)
149 if (is_unicast_ether_addr(addr))
152 if ((addr[0] == 0x01) &&
156 return MLX5E_MC_IPV4;
158 if ((addr[0] == 0x33) &&
160 return MLX5E_MC_IPV6;
162 return MLX5E_MC_OTHER;
165 static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
171 case MLX5E_FULLMATCH:
172 eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
173 switch (eth_addr_type) {
176 BIT(MLX5E_TT_IPV4_TCP) |
177 BIT(MLX5E_TT_IPV6_TCP) |
178 BIT(MLX5E_TT_IPV4_UDP) |
179 BIT(MLX5E_TT_IPV6_UDP) |
180 BIT(MLX5E_TT_IPV4_IPSEC_AH) |
181 BIT(MLX5E_TT_IPV6_IPSEC_AH) |
182 BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
183 BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
192 BIT(MLX5E_TT_IPV4_UDP) |
199 BIT(MLX5E_TT_IPV6_UDP) |
215 BIT(MLX5E_TT_IPV4_UDP) |
216 BIT(MLX5E_TT_IPV6_UDP) |
223 default: /* MLX5E_PROMISC */
225 BIT(MLX5E_TT_IPV4_TCP) |
226 BIT(MLX5E_TT_IPV6_TCP) |
227 BIT(MLX5E_TT_IPV4_UDP) |
228 BIT(MLX5E_TT_IPV6_UDP) |
229 BIT(MLX5E_TT_IPV4_IPSEC_AH) |
230 BIT(MLX5E_TT_IPV6_IPSEC_AH) |
231 BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
232 BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
243 static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
244 struct mlx5e_eth_addr_info *ai,
245 int type, u32 *mc, u32 *mv)
247 struct mlx5_flow_destination dest;
248 u8 match_criteria_enable = 0;
249 struct mlx5_flow_rule **rule_p;
250 struct mlx5_flow_table *ft = priv->fts.main.t;
251 u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
252 outer_headers.dmac_47_16);
253 u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
254 outer_headers.dmac_47_16);
255 u32 *tirn = priv->indir_tirn;
259 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
262 case MLX5E_FULLMATCH:
263 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
264 eth_broadcast_addr(mc_dmac);
265 ether_addr_copy(mv_dmac, ai->addr);
269 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
278 tt_vec = mlx5e_get_tt_vec(ai, type);
280 if (tt_vec & BIT(MLX5E_TT_ANY)) {
281 rule_p = &ai->ft_rule[MLX5E_TT_ANY];
282 dest.tir_num = priv->direct_tir[0].tirn;
283 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
284 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
285 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
286 if (IS_ERR_OR_NULL(*rule_p))
288 ai->tt_vec |= BIT(MLX5E_TT_ANY);
291 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
292 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
294 if (tt_vec & BIT(MLX5E_TT_IPV4)) {
295 rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
296 dest.tir_num = tirn[MLX5E_TT_IPV4];
297 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
299 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
300 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
301 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
302 if (IS_ERR_OR_NULL(*rule_p))
304 ai->tt_vec |= BIT(MLX5E_TT_IPV4);
307 if (tt_vec & BIT(MLX5E_TT_IPV6)) {
308 rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
309 dest.tir_num = tirn[MLX5E_TT_IPV6];
310 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
312 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
313 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
314 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
315 if (IS_ERR_OR_NULL(*rule_p))
317 ai->tt_vec |= BIT(MLX5E_TT_IPV6);
320 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
321 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
323 if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
324 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
325 dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
326 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
328 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
329 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
330 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
331 if (IS_ERR_OR_NULL(*rule_p))
333 ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
336 if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
337 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
338 dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
339 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
341 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
342 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
343 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
344 if (IS_ERR_OR_NULL(*rule_p))
346 ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
349 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
351 if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
352 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
353 dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
354 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
356 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
357 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
358 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
359 if (IS_ERR_OR_NULL(*rule_p))
361 ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
364 if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
365 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
366 dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
367 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
369 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
370 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
371 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
372 if (IS_ERR_OR_NULL(*rule_p))
375 ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
378 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
380 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
381 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
382 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
383 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
385 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
386 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
387 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
388 if (IS_ERR_OR_NULL(*rule_p))
390 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
393 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
394 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
395 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
396 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
398 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
399 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
400 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
401 if (IS_ERR_OR_NULL(*rule_p))
403 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
406 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
408 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
409 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
410 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
411 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
413 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
414 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
415 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
416 if (IS_ERR_OR_NULL(*rule_p))
418 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
421 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
422 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
423 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
424 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
426 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
427 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
428 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
429 if (IS_ERR_OR_NULL(*rule_p))
431 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
437 err = PTR_ERR(*rule_p);
439 mlx5e_del_eth_addr_from_flow_table(priv, ai);
444 static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
445 struct mlx5e_eth_addr_info *ai, int type)
451 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
452 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
453 if (!match_value || !match_criteria) {
454 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
456 goto add_eth_addr_rule_out;
459 err = __mlx5e_add_eth_addr_rule(priv, ai, type, match_criteria,
462 add_eth_addr_rule_out:
463 kvfree(match_criteria);
469 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
471 struct net_device *ndev = priv->netdev;
480 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
483 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
485 if (list_size > max_list_size) {
487 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
488 list_size, max_list_size);
489 list_size = max_list_size;
492 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
497 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
503 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
505 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
512 enum mlx5e_vlan_rule_type {
513 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
514 MLX5E_VLAN_RULE_TYPE_ANY_VID,
515 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
518 static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
519 enum mlx5e_vlan_rule_type rule_type,
520 u16 vid, u32 *mc, u32 *mv)
522 struct mlx5_flow_table *ft = priv->fts.vlan.t;
523 struct mlx5_flow_destination dest;
524 u8 match_criteria_enable = 0;
525 struct mlx5_flow_rule **rule_p;
528 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
529 dest.ft = priv->fts.main.t;
531 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
532 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
535 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
536 rule_p = &priv->vlan.untagged_rule;
538 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
539 rule_p = &priv->vlan.any_vlan_rule;
540 MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
542 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
543 rule_p = &priv->vlan.active_vlans_rule[vid];
544 MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
545 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
546 MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
550 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
551 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
552 MLX5_FS_DEFAULT_FLOW_TAG,
555 if (IS_ERR(*rule_p)) {
556 err = PTR_ERR(*rule_p);
558 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
564 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
565 enum mlx5e_vlan_rule_type rule_type, u16 vid)
571 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
572 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
573 if (!match_value || !match_criteria) {
574 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
576 goto add_vlan_rule_out;
579 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
580 mlx5e_vport_context_update_vlans(priv);
582 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, match_criteria,
586 kvfree(match_criteria);
592 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
593 enum mlx5e_vlan_rule_type rule_type, u16 vid)
596 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
597 if (priv->vlan.untagged_rule) {
598 mlx5_del_flow_rule(priv->vlan.untagged_rule);
599 priv->vlan.untagged_rule = NULL;
602 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
603 if (priv->vlan.any_vlan_rule) {
604 mlx5_del_flow_rule(priv->vlan.any_vlan_rule);
605 priv->vlan.any_vlan_rule = NULL;
608 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
609 mlx5e_vport_context_update_vlans(priv);
610 if (priv->vlan.active_vlans_rule[vid]) {
611 mlx5_del_flow_rule(priv->vlan.active_vlans_rule[vid]);
612 priv->vlan.active_vlans_rule[vid] = NULL;
614 mlx5e_vport_context_update_vlans(priv);
619 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
621 if (!priv->vlan.filter_disabled)
624 priv->vlan.filter_disabled = false;
625 if (priv->netdev->flags & IFF_PROMISC)
627 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
630 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
632 if (priv->vlan.filter_disabled)
635 priv->vlan.filter_disabled = true;
636 if (priv->netdev->flags & IFF_PROMISC)
638 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
641 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
644 struct mlx5e_priv *priv = netdev_priv(dev);
646 set_bit(vid, priv->vlan.active_vlans);
648 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
651 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
654 struct mlx5e_priv *priv = netdev_priv(dev);
656 clear_bit(vid, priv->vlan.active_vlans);
658 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
663 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
664 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
665 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
667 static void mlx5e_execute_action(struct mlx5e_priv *priv,
668 struct mlx5e_eth_addr_hash_node *hn)
670 switch (hn->action) {
671 case MLX5E_ACTION_ADD:
672 mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
673 hn->action = MLX5E_ACTION_NONE;
676 case MLX5E_ACTION_DEL:
677 mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
678 mlx5e_del_eth_addr_from_hash(hn);
683 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
685 struct net_device *netdev = priv->netdev;
686 struct netdev_hw_addr *ha;
688 netif_addr_lock_bh(netdev);
690 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
691 priv->netdev->dev_addr);
693 netdev_for_each_uc_addr(ha, netdev)
694 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
696 netdev_for_each_mc_addr(ha, netdev)
697 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
699 netif_addr_unlock_bh(netdev);
702 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
703 u8 addr_array[][ETH_ALEN], int size)
705 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
706 struct net_device *ndev = priv->netdev;
707 struct mlx5e_eth_addr_hash_node *hn;
708 struct hlist_head *addr_list;
709 struct hlist_node *tmp;
713 addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
715 if (is_uc) /* Make sure our own address is pushed first */
716 ether_addr_copy(addr_array[i++], ndev->dev_addr);
717 else if (priv->eth_addr.broadcast_enabled)
718 ether_addr_copy(addr_array[i++], ndev->broadcast);
720 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
721 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
725 ether_addr_copy(addr_array[i++], hn->ai.addr);
729 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
732 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
733 struct mlx5e_eth_addr_hash_node *hn;
734 u8 (*addr_array)[ETH_ALEN] = NULL;
735 struct hlist_head *addr_list;
736 struct hlist_node *tmp;
742 size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
744 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
745 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
747 addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
748 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
751 if (size > max_size) {
752 netdev_warn(priv->netdev,
753 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
754 is_uc ? "UC" : "MC", size, max_size);
759 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
764 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
767 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
770 netdev_err(priv->netdev,
771 "Failed to modify vport %s list err(%d)\n",
772 is_uc ? "UC" : "MC", err);
776 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
778 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
780 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
781 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
782 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
783 ea->allmulti_enabled,
784 ea->promisc_enabled);
787 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
789 struct mlx5e_eth_addr_hash_node *hn;
790 struct hlist_node *tmp;
793 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
794 mlx5e_execute_action(priv, hn);
796 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
797 mlx5e_execute_action(priv, hn);
800 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
802 struct mlx5e_eth_addr_hash_node *hn;
803 struct hlist_node *tmp;
806 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
807 hn->action = MLX5E_ACTION_DEL;
808 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
809 hn->action = MLX5E_ACTION_DEL;
811 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
812 mlx5e_sync_netdev_addr(priv);
814 mlx5e_apply_netdev_addr(priv);
817 void mlx5e_set_rx_mode_work(struct work_struct *work)
819 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
822 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
823 struct net_device *ndev = priv->netdev;
825 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
826 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
827 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
828 bool broadcast_enabled = rx_mode_enable;
830 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
831 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
832 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
833 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
834 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
835 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
837 if (enable_promisc) {
838 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
839 if (!priv->vlan.filter_disabled)
840 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
844 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
845 if (enable_broadcast)
846 mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
848 mlx5e_handle_netdev_addr(priv);
850 if (disable_broadcast)
851 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
852 if (disable_allmulti)
853 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
854 if (disable_promisc) {
855 if (!priv->vlan.filter_disabled)
856 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
858 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
861 ea->promisc_enabled = promisc_enabled;
862 ea->allmulti_enabled = allmulti_enabled;
863 ea->broadcast_enabled = broadcast_enabled;
865 mlx5e_vport_context_update(priv);
868 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
872 for (i = ft->num_groups - 1; i >= 0; i--) {
873 if (!IS_ERR_OR_NULL(ft->g[i]))
874 mlx5_destroy_flow_group(ft->g[i]);
880 void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
882 ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
885 #define MLX5E_MAIN_GROUP0_SIZE BIT(3)
886 #define MLX5E_MAIN_GROUP1_SIZE BIT(1)
887 #define MLX5E_MAIN_GROUP2_SIZE BIT(0)
888 #define MLX5E_MAIN_GROUP3_SIZE BIT(14)
889 #define MLX5E_MAIN_GROUP4_SIZE BIT(13)
890 #define MLX5E_MAIN_GROUP5_SIZE BIT(11)
891 #define MLX5E_MAIN_GROUP6_SIZE BIT(2)
892 #define MLX5E_MAIN_GROUP7_SIZE BIT(1)
893 #define MLX5E_MAIN_GROUP8_SIZE BIT(0)
894 #define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\
895 MLX5E_MAIN_GROUP1_SIZE +\
896 MLX5E_MAIN_GROUP2_SIZE +\
897 MLX5E_MAIN_GROUP3_SIZE +\
898 MLX5E_MAIN_GROUP4_SIZE +\
899 MLX5E_MAIN_GROUP5_SIZE +\
900 MLX5E_MAIN_GROUP6_SIZE +\
901 MLX5E_MAIN_GROUP7_SIZE +\
902 MLX5E_MAIN_GROUP8_SIZE)
904 static int __mlx5e_create_main_groups(struct mlx5e_flow_table *ft, u32 *in,
907 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
908 u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
909 match_criteria.outer_headers.dmac_47_16);
913 memset(in, 0, inlen);
914 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
915 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
916 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
917 MLX5_SET_CFG(in, start_flow_index, ix);
918 ix += MLX5E_MAIN_GROUP0_SIZE;
919 MLX5_SET_CFG(in, end_flow_index, ix - 1);
920 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
921 if (IS_ERR(ft->g[ft->num_groups]))
922 goto err_destroy_groups;
925 memset(in, 0, inlen);
926 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
927 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
928 MLX5_SET_CFG(in, start_flow_index, ix);
929 ix += MLX5E_MAIN_GROUP1_SIZE;
930 MLX5_SET_CFG(in, end_flow_index, ix - 1);
931 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
932 if (IS_ERR(ft->g[ft->num_groups]))
933 goto err_destroy_groups;
936 memset(in, 0, inlen);
937 MLX5_SET_CFG(in, start_flow_index, ix);
938 ix += MLX5E_MAIN_GROUP2_SIZE;
939 MLX5_SET_CFG(in, end_flow_index, ix - 1);
940 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
941 if (IS_ERR(ft->g[ft->num_groups]))
942 goto err_destroy_groups;
945 memset(in, 0, inlen);
946 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
947 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
948 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
949 eth_broadcast_addr(dmac);
950 MLX5_SET_CFG(in, start_flow_index, ix);
951 ix += MLX5E_MAIN_GROUP3_SIZE;
952 MLX5_SET_CFG(in, end_flow_index, ix - 1);
953 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
954 if (IS_ERR(ft->g[ft->num_groups]))
955 goto err_destroy_groups;
958 memset(in, 0, inlen);
959 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
960 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
961 eth_broadcast_addr(dmac);
962 MLX5_SET_CFG(in, start_flow_index, ix);
963 ix += MLX5E_MAIN_GROUP4_SIZE;
964 MLX5_SET_CFG(in, end_flow_index, ix - 1);
965 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
966 if (IS_ERR(ft->g[ft->num_groups]))
967 goto err_destroy_groups;
970 memset(in, 0, inlen);
971 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
972 eth_broadcast_addr(dmac);
973 MLX5_SET_CFG(in, start_flow_index, ix);
974 ix += MLX5E_MAIN_GROUP5_SIZE;
975 MLX5_SET_CFG(in, end_flow_index, ix - 1);
976 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
977 if (IS_ERR(ft->g[ft->num_groups]))
978 goto err_destroy_groups;
981 memset(in, 0, inlen);
982 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
983 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
984 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
986 MLX5_SET_CFG(in, start_flow_index, ix);
987 ix += MLX5E_MAIN_GROUP6_SIZE;
988 MLX5_SET_CFG(in, end_flow_index, ix - 1);
989 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
990 if (IS_ERR(ft->g[ft->num_groups]))
991 goto err_destroy_groups;
994 memset(in, 0, inlen);
995 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
996 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
998 MLX5_SET_CFG(in, start_flow_index, ix);
999 ix += MLX5E_MAIN_GROUP7_SIZE;
1000 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1001 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1002 if (IS_ERR(ft->g[ft->num_groups]))
1003 goto err_destroy_groups;
1006 memset(in, 0, inlen);
1007 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1009 MLX5_SET_CFG(in, start_flow_index, ix);
1010 ix += MLX5E_MAIN_GROUP8_SIZE;
1011 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1012 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1013 if (IS_ERR(ft->g[ft->num_groups]))
1014 goto err_destroy_groups;
1020 err = PTR_ERR(ft->g[ft->num_groups]);
1021 ft->g[ft->num_groups] = NULL;
1022 mlx5e_destroy_groups(ft);
1027 static int mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
1030 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1033 in = mlx5_vzalloc(inlen);
1037 err = __mlx5e_create_main_groups(ft, in, inlen);
1043 static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
1045 struct mlx5e_flow_table *ft = &priv->fts.main;
1049 ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_MAIN_TABLE_SIZE,
1050 MLX5E_MAIN_FT_LEVEL);
1052 if (IS_ERR(ft->t)) {
1053 err = PTR_ERR(ft->t);
1057 ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1060 goto err_destroy_main_flow_table;
1063 err = mlx5e_create_main_groups(ft);
1071 err_destroy_main_flow_table:
1072 mlx5_destroy_flow_table(ft->t);
1078 static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
1080 mlx5e_destroy_groups(ft);
1082 mlx5_destroy_flow_table(ft->t);
1086 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
1088 mlx5e_destroy_flow_table(&priv->fts.main);
1091 #define MLX5E_NUM_VLAN_GROUPS 2
1092 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1093 #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
1094 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1095 MLX5E_VLAN_GROUP1_SIZE)
1097 static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in,
1102 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1104 memset(in, 0, inlen);
1105 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1106 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
1107 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1108 MLX5_SET_CFG(in, start_flow_index, ix);
1109 ix += MLX5E_VLAN_GROUP0_SIZE;
1110 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1111 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1112 if (IS_ERR(ft->g[ft->num_groups]))
1113 goto err_destroy_groups;
1116 memset(in, 0, inlen);
1117 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1118 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
1119 MLX5_SET_CFG(in, start_flow_index, ix);
1120 ix += MLX5E_VLAN_GROUP1_SIZE;
1121 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1122 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1123 if (IS_ERR(ft->g[ft->num_groups]))
1124 goto err_destroy_groups;
1130 err = PTR_ERR(ft->g[ft->num_groups]);
1131 ft->g[ft->num_groups] = NULL;
1132 mlx5e_destroy_groups(ft);
1137 static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
1140 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1143 in = mlx5_vzalloc(inlen);
1147 err = __mlx5e_create_vlan_groups(ft, in, inlen);
1153 static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
1155 struct mlx5e_flow_table *ft = &priv->fts.vlan;
1159 ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_VLAN_TABLE_SIZE,
1160 MLX5E_VLAN_FT_LEVEL);
1162 if (IS_ERR(ft->t)) {
1163 err = PTR_ERR(ft->t);
1167 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1170 goto err_destroy_vlan_flow_table;
1173 err = mlx5e_create_vlan_groups(ft);
1177 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
1179 goto err_destroy_vlan_flow_groups;
1183 err_destroy_vlan_flow_groups:
1184 mlx5e_destroy_groups(ft);
1187 err_destroy_vlan_flow_table:
1188 mlx5_destroy_flow_table(ft->t);
1194 static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1196 mlx5e_destroy_flow_table(&priv->fts.vlan);
1199 int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
1203 priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
1204 MLX5_FLOW_NAMESPACE_KERNEL);
1209 err = mlx5e_create_main_flow_table(priv);
1213 err = mlx5e_create_vlan_flow_table(priv);
1215 goto err_destroy_main_flow_table;
1219 err_destroy_main_flow_table:
1220 mlx5e_destroy_main_flow_table(priv);
1225 void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
1227 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
1228 mlx5e_destroy_vlan_flow_table(priv);
1229 mlx5e_destroy_main_flow_table(priv);