2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
41 #define UPLINK_VPORT 0xFFFF
49 /* E-Switch UC L2 table hash node */
51 struct l2addr_node node;
56 /* E-Switch MC FDB table hash node */
57 struct esw_mc_addr { /* SRIOV only */
58 struct l2addr_node node;
59 struct mlx5_flow_rule *uplink_rule; /* Forward to uplink rule */
63 /* Vport UC/MC hash node */
65 struct l2addr_node node;
68 struct mlx5_flow_rule *flow_rule; /* SRIOV only */
69 /* A flag indicating that mac was added due to mc promiscuous vport */
74 UC_ADDR_CHANGE = BIT(0),
75 MC_ADDR_CHANGE = BIT(1),
76 PROMISC_CHANGE = BIT(3),
79 /* Vport context events */
80 #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
84 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
87 int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
88 int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
91 MLX5_SET(modify_nic_vport_context_in, in,
92 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
93 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
94 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
96 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
97 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
98 in, nic_vport_context);
100 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
102 if (events_mask & UC_ADDR_CHANGE)
103 MLX5_SET(nic_vport_context, nic_vport_ctx,
104 event_on_uc_address_change, 1);
105 if (events_mask & MC_ADDR_CHANGE)
106 MLX5_SET(nic_vport_context, nic_vport_ctx,
107 event_on_mc_address_change, 1);
108 if (events_mask & PROMISC_CHANGE)
109 MLX5_SET(nic_vport_context, nic_vport_ctx,
110 event_on_promisc_change, 1);
112 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
115 /* E-Switch vport context HW commands */
116 static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
119 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
121 MLX5_SET(modify_esw_vport_context_in, in, opcode,
122 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
123 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
125 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
126 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
129 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
130 u16 vlan, u8 qos, bool set)
132 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
134 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
135 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
138 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n",
139 vport, vlan, qos, set);
141 MLX5_SET(modify_esw_vport_context_in, in,
142 esw_vport_context.vport_cvlan_strip, 1);
143 /* insert only if no vlan in packet */
144 MLX5_SET(modify_esw_vport_context_in, in,
145 esw_vport_context.vport_cvlan_insert, 1);
146 MLX5_SET(modify_esw_vport_context_in, in,
147 esw_vport_context.cvlan_pcp, qos);
148 MLX5_SET(modify_esw_vport_context_in, in,
149 esw_vport_context.cvlan_id, vlan);
152 MLX5_SET(modify_esw_vport_context_in, in,
153 field_select.vport_cvlan_strip, 1);
154 MLX5_SET(modify_esw_vport_context_in, in,
155 field_select.vport_cvlan_insert, 1);
157 return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
160 /* HW L2 Table (MPFS) management */
161 static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index,
162 u8 *mac, u8 vlan_valid, u16 vlan)
164 u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0};
165 u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0};
168 MLX5_SET(set_l2_table_entry_in, in, opcode,
169 MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
170 MLX5_SET(set_l2_table_entry_in, in, table_index, index);
171 MLX5_SET(set_l2_table_entry_in, in, vlan_valid, vlan_valid);
172 MLX5_SET(set_l2_table_entry_in, in, vlan, vlan);
174 in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address);
175 ether_addr_copy(&in_mac_addr[2], mac);
177 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
180 static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index)
182 u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0};
183 u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0};
185 MLX5_SET(delete_l2_table_entry_in, in, opcode,
186 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
187 MLX5_SET(delete_l2_table_entry_in, in, table_index, index);
188 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
191 static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix)
195 *ix = find_first_zero_bit(l2_table->bitmap, l2_table->size);
196 if (*ix >= l2_table->size)
199 __set_bit(*ix, l2_table->bitmap);
204 static void free_l2_table_index(struct mlx5_l2_table *l2_table, u32 ix)
206 __clear_bit(ix, l2_table->bitmap);
209 static int set_l2_table_entry(struct mlx5_core_dev *dev, u8 *mac,
210 u8 vlan_valid, u16 vlan,
213 struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table;
216 err = alloc_l2_table_index(l2_table, index);
220 err = set_l2_table_entry_cmd(dev, *index, mac, vlan_valid, vlan);
222 free_l2_table_index(l2_table, *index);
227 static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
229 struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table;
231 del_l2_table_entry_cmd(dev, index);
232 free_l2_table_index(l2_table, index);
236 static struct mlx5_flow_rule *
237 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
238 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
240 int match_header = (is_zero_ether_addr(mac_c) ? 0 :
241 MLX5_MATCH_OUTER_HEADERS);
242 struct mlx5_flow_rule *flow_rule = NULL;
243 struct mlx5_flow_destination dest;
244 struct mlx5_flow_spec *spec;
245 void *mv_misc = NULL;
246 void *mc_misc = NULL;
251 match_header |= MLX5_MATCH_MISC_PARAMETERS;
253 spec = mlx5_vzalloc(sizeof(*spec));
255 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
258 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
259 outer_headers.dmac_47_16);
260 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
261 outer_headers.dmac_47_16);
263 if (match_header & MLX5_MATCH_OUTER_HEADERS) {
264 ether_addr_copy(dmac_v, mac_v);
265 ether_addr_copy(dmac_c, mac_c);
268 if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
269 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
271 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
273 MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
274 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
277 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
278 dest.vport_num = vport;
281 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
282 dmac_v, dmac_c, vport);
283 spec->match_criteria_enable = match_header;
285 mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
286 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
288 if (IS_ERR(flow_rule)) {
290 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
291 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
299 static struct mlx5_flow_rule *
300 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
304 eth_broadcast_addr(mac_c);
305 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
308 static struct mlx5_flow_rule *
309 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
314 eth_zero_addr(mac_c);
315 eth_zero_addr(mac_v);
318 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
321 static struct mlx5_flow_rule *
322 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
327 eth_zero_addr(mac_c);
328 eth_zero_addr(mac_v);
329 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
332 static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
334 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
335 struct mlx5_core_dev *dev = esw->dev;
336 struct mlx5_flow_namespace *root_ns;
337 struct mlx5_flow_table *fdb;
338 struct mlx5_flow_group *g;
339 void *match_criteria;
345 esw_debug(dev, "Create FDB log_max_size(%d)\n",
346 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
348 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
350 esw_warn(dev, "Failed to get FDB flow namespace\n");
354 flow_group_in = mlx5_vzalloc(inlen);
357 memset(flow_group_in, 0, inlen);
359 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
360 fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
363 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
366 esw->fdb_table.fdb = fdb;
368 /* Addresses group : Full match unicast/multicast addresses */
369 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
370 MLX5_MATCH_OUTER_HEADERS);
371 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
372 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
373 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
374 /* Preserve 2 entries for allmulti and promisc rules*/
375 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
376 eth_broadcast_addr(dmac);
377 g = mlx5_create_flow_group(fdb, flow_group_in);
380 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
383 esw->fdb_table.legacy.addr_grp = g;
385 /* Allmulti group : One rule that forwards any mcast traffic */
386 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
387 MLX5_MATCH_OUTER_HEADERS);
388 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
389 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
392 g = mlx5_create_flow_group(fdb, flow_group_in);
395 esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
398 esw->fdb_table.legacy.allmulti_grp = g;
400 /* Promiscuous group :
401 * One rule that forward all unmatched traffic from previous groups
404 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
405 MLX5_MATCH_MISC_PARAMETERS);
406 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
407 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
408 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
409 g = mlx5_create_flow_group(fdb, flow_group_in);
412 esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
415 esw->fdb_table.legacy.promisc_grp = g;
419 if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) {
420 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
421 esw->fdb_table.legacy.allmulti_grp = NULL;
423 if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) {
424 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
425 esw->fdb_table.legacy.addr_grp = NULL;
427 if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
428 mlx5_destroy_flow_table(esw->fdb_table.fdb);
429 esw->fdb_table.fdb = NULL;
433 kvfree(flow_group_in);
437 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
439 if (!esw->fdb_table.fdb)
442 esw_debug(esw->dev, "Destroy FDB Table\n");
443 mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
444 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
445 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
446 mlx5_destroy_flow_table(esw->fdb_table.fdb);
447 esw->fdb_table.fdb = NULL;
448 esw->fdb_table.legacy.addr_grp = NULL;
449 esw->fdb_table.legacy.allmulti_grp = NULL;
450 esw->fdb_table.legacy.promisc_grp = NULL;
453 /* E-Switch vport UC/MC lists management */
454 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
455 struct vport_addr *vaddr);
457 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
459 struct hlist_head *hash = esw->l2_table.l2_hash;
460 struct esw_uc_addr *esw_uc;
461 u8 *mac = vaddr->node.addr;
462 u32 vport = vaddr->vport;
465 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
468 "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n",
469 mac, vport, esw_uc->vport);
473 esw_uc = l2addr_hash_add(hash, mac, struct esw_uc_addr, GFP_KERNEL);
476 esw_uc->vport = vport;
478 err = set_l2_table_entry(esw->dev, mac, 0, 0, &esw_uc->table_index);
482 /* SRIOV is enabled: Forward UC MAC to vport */
483 if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY)
484 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
486 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n",
487 vport, mac, esw_uc->table_index, vaddr->flow_rule);
490 l2addr_hash_del(esw_uc);
494 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
496 struct hlist_head *hash = esw->l2_table.l2_hash;
497 struct esw_uc_addr *esw_uc;
498 u8 *mac = vaddr->node.addr;
499 u32 vport = vaddr->vport;
501 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
502 if (!esw_uc || esw_uc->vport != vport) {
504 "MAC(%pM) doesn't belong to vport (%d)\n",
508 esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n",
509 vport, mac, esw_uc->table_index, vaddr->flow_rule);
511 del_l2_table_entry(esw->dev, esw_uc->table_index);
513 if (vaddr->flow_rule)
514 mlx5_del_flow_rule(vaddr->flow_rule);
515 vaddr->flow_rule = NULL;
517 l2addr_hash_del(esw_uc);
521 static void update_allmulti_vports(struct mlx5_eswitch *esw,
522 struct vport_addr *vaddr,
523 struct esw_mc_addr *esw_mc)
525 u8 *mac = vaddr->node.addr;
528 for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
529 struct mlx5_vport *vport = &esw->vports[vport_idx];
530 struct hlist_head *vport_hash = vport->mc_list;
531 struct vport_addr *iter_vaddr =
532 l2addr_hash_find(vport_hash,
535 if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
536 vaddr->vport == vport_idx)
538 switch (vaddr->action) {
539 case MLX5_ACTION_ADD:
542 iter_vaddr = l2addr_hash_add(vport_hash, mac,
547 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
551 iter_vaddr->vport = vport_idx;
552 iter_vaddr->flow_rule =
553 esw_fdb_set_vport_rule(esw,
556 iter_vaddr->mc_promisc = true;
558 case MLX5_ACTION_DEL:
561 mlx5_del_flow_rule(iter_vaddr->flow_rule);
562 l2addr_hash_del(iter_vaddr);
568 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
570 struct hlist_head *hash = esw->mc_table;
571 struct esw_mc_addr *esw_mc;
572 u8 *mac = vaddr->node.addr;
573 u32 vport = vaddr->vport;
575 if (!esw->fdb_table.fdb)
578 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
582 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
586 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
587 esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
589 /* Add this multicast mac to all the mc promiscuous vports */
590 update_allmulti_vports(esw, vaddr, esw_mc);
593 /* If the multicast mac is added as a result of mc promiscuous vport,
594 * don't increment the multicast ref count
596 if (!vaddr->mc_promisc)
599 /* Forward MC MAC to vport */
600 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
602 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
603 vport, mac, vaddr->flow_rule,
604 esw_mc->refcnt, esw_mc->uplink_rule);
608 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
610 struct hlist_head *hash = esw->mc_table;
611 struct esw_mc_addr *esw_mc;
612 u8 *mac = vaddr->node.addr;
613 u32 vport = vaddr->vport;
615 if (!esw->fdb_table.fdb)
618 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
621 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
626 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
627 vport, mac, vaddr->flow_rule, esw_mc->refcnt,
628 esw_mc->uplink_rule);
630 if (vaddr->flow_rule)
631 mlx5_del_flow_rule(vaddr->flow_rule);
632 vaddr->flow_rule = NULL;
634 /* If the multicast mac is added as a result of mc promiscuous vport,
635 * don't decrement the multicast ref count.
637 if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
640 /* Remove this multicast mac from all the mc promiscuous vports */
641 update_allmulti_vports(esw, vaddr, esw_mc);
643 if (esw_mc->uplink_rule)
644 mlx5_del_flow_rule(esw_mc->uplink_rule);
646 l2addr_hash_del(esw_mc);
650 /* Apply vport UC/MC list to HW l2 table and FDB table */
651 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
652 u32 vport_num, int list_type)
654 struct mlx5_vport *vport = &esw->vports[vport_num];
655 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
656 vport_addr_action vport_addr_add;
657 vport_addr_action vport_addr_del;
658 struct vport_addr *addr;
659 struct l2addr_node *node;
660 struct hlist_head *hash;
661 struct hlist_node *tmp;
664 vport_addr_add = is_uc ? esw_add_uc_addr :
666 vport_addr_del = is_uc ? esw_del_uc_addr :
669 hash = is_uc ? vport->uc_list : vport->mc_list;
670 for_each_l2hash_node(node, tmp, hash, hi) {
671 addr = container_of(node, struct vport_addr, node);
672 switch (addr->action) {
673 case MLX5_ACTION_ADD:
674 vport_addr_add(esw, addr);
675 addr->action = MLX5_ACTION_NONE;
677 case MLX5_ACTION_DEL:
678 vport_addr_del(esw, addr);
679 l2addr_hash_del(addr);
685 /* Sync vport UC/MC list from vport context */
686 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
687 u32 vport_num, int list_type)
689 struct mlx5_vport *vport = &esw->vports[vport_num];
690 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
691 u8 (*mac_list)[ETH_ALEN];
692 struct l2addr_node *node;
693 struct vport_addr *addr;
694 struct hlist_head *hash;
695 struct hlist_node *tmp;
701 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
702 MLX5_MAX_MC_PER_VPORT(esw->dev);
704 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
708 hash = is_uc ? vport->uc_list : vport->mc_list;
710 for_each_l2hash_node(node, tmp, hash, hi) {
711 addr = container_of(node, struct vport_addr, node);
712 addr->action = MLX5_ACTION_DEL;
718 err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
722 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
723 vport_num, is_uc ? "UC" : "MC", size);
725 for (i = 0; i < size; i++) {
726 if (is_uc && !is_valid_ether_addr(mac_list[i]))
729 if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
732 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
734 addr->action = MLX5_ACTION_NONE;
735 /* If this mac was previously added because of allmulti
736 * promiscuous rx mode, its now converted to be original
739 if (addr->mc_promisc) {
740 struct esw_mc_addr *esw_mc =
741 l2addr_hash_find(esw->mc_table,
746 "Failed to MAC(%pM) in mcast DB\n",
751 addr->mc_promisc = false;
756 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
760 "Failed to add MAC(%pM) to vport[%d] DB\n",
761 mac_list[i], vport_num);
764 addr->vport = vport_num;
765 addr->action = MLX5_ACTION_ADD;
771 /* Sync vport UC/MC list from vport context
772 * Must be called after esw_update_vport_addr_list
774 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
776 struct mlx5_vport *vport = &esw->vports[vport_num];
777 struct l2addr_node *node;
778 struct vport_addr *addr;
779 struct hlist_head *hash;
780 struct hlist_node *tmp;
783 hash = vport->mc_list;
785 for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
786 u8 *mac = node->addr;
788 addr = l2addr_hash_find(hash, mac, struct vport_addr);
790 if (addr->action == MLX5_ACTION_DEL)
791 addr->action = MLX5_ACTION_NONE;
794 addr = l2addr_hash_add(hash, mac, struct vport_addr,
798 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
802 addr->vport = vport_num;
803 addr->action = MLX5_ACTION_ADD;
804 addr->mc_promisc = true;
808 /* Apply vport rx mode to HW FDB table */
809 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
810 bool promisc, bool mc_promisc)
812 struct esw_mc_addr *allmulti_addr = esw->mc_promisc;
813 struct mlx5_vport *vport = &esw->vports[vport_num];
815 if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
819 vport->allmulti_rule =
820 esw_fdb_set_vport_allmulti_rule(esw, vport_num);
821 if (!allmulti_addr->uplink_rule)
822 allmulti_addr->uplink_rule =
823 esw_fdb_set_vport_allmulti_rule(esw,
825 allmulti_addr->refcnt++;
826 } else if (vport->allmulti_rule) {
827 mlx5_del_flow_rule(vport->allmulti_rule);
828 vport->allmulti_rule = NULL;
830 if (--allmulti_addr->refcnt > 0)
833 if (allmulti_addr->uplink_rule)
834 mlx5_del_flow_rule(allmulti_addr->uplink_rule);
835 allmulti_addr->uplink_rule = NULL;
839 if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
843 vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
845 } else if (vport->promisc_rule) {
846 mlx5_del_flow_rule(vport->promisc_rule);
847 vport->promisc_rule = NULL;
851 /* Sync vport rx mode from vport context */
852 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
854 struct mlx5_vport *vport = &esw->vports[vport_num];
860 err = mlx5_query_nic_vport_promisc(esw->dev,
867 esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
868 vport_num, promisc_all, promisc_mc);
870 if (!vport->info.trusted || !vport->enabled) {
876 esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
877 (promisc_all || promisc_mc));
880 static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
882 struct mlx5_core_dev *dev = vport->dev;
883 struct mlx5_eswitch *esw = dev->priv.eswitch;
886 mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
887 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
890 if (vport->enabled_events & UC_ADDR_CHANGE) {
891 esw_update_vport_addr_list(esw, vport->vport,
892 MLX5_NVPRT_LIST_TYPE_UC);
893 esw_apply_vport_addr_list(esw, vport->vport,
894 MLX5_NVPRT_LIST_TYPE_UC);
897 if (vport->enabled_events & MC_ADDR_CHANGE) {
898 esw_update_vport_addr_list(esw, vport->vport,
899 MLX5_NVPRT_LIST_TYPE_MC);
902 if (vport->enabled_events & PROMISC_CHANGE) {
903 esw_update_vport_rx_mode(esw, vport->vport);
904 if (!IS_ERR_OR_NULL(vport->allmulti_rule))
905 esw_update_vport_mc_promisc(esw, vport->vport);
908 if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
909 esw_apply_vport_addr_list(esw, vport->vport,
910 MLX5_NVPRT_LIST_TYPE_MC);
913 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
915 arm_vport_context_events_cmd(dev, vport->vport,
916 vport->enabled_events);
919 static void esw_vport_change_handler(struct work_struct *work)
921 struct mlx5_vport *vport =
922 container_of(work, struct mlx5_vport, vport_change_handler);
923 struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
925 mutex_lock(&esw->state_lock);
926 esw_vport_change_handle_locked(vport);
927 mutex_unlock(&esw->state_lock);
930 static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
931 struct mlx5_vport *vport)
933 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
934 struct mlx5_flow_group *vlan_grp = NULL;
935 struct mlx5_flow_group *drop_grp = NULL;
936 struct mlx5_core_dev *dev = esw->dev;
937 struct mlx5_flow_namespace *root_ns;
938 struct mlx5_flow_table *acl;
939 void *match_criteria;
941 /* The egress acl table contains 2 rules:
942 * 1)Allow traffic with vlan_tag=vst_vlan_id
943 * 2)Drop all other traffic.
948 if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) ||
949 !IS_ERR_OR_NULL(vport->egress.acl))
952 esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
953 vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
955 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
957 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
961 flow_group_in = mlx5_vzalloc(inlen);
965 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
968 esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
973 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
974 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
975 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
976 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
977 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
978 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
980 vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
981 if (IS_ERR(vlan_grp)) {
982 err = PTR_ERR(vlan_grp);
983 esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
988 memset(flow_group_in, 0, inlen);
989 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
990 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
991 drop_grp = mlx5_create_flow_group(acl, flow_group_in);
992 if (IS_ERR(drop_grp)) {
993 err = PTR_ERR(drop_grp);
994 esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
999 vport->egress.acl = acl;
1000 vport->egress.drop_grp = drop_grp;
1001 vport->egress.allowed_vlans_grp = vlan_grp;
1003 kvfree(flow_group_in);
1004 if (err && !IS_ERR_OR_NULL(vlan_grp))
1005 mlx5_destroy_flow_group(vlan_grp);
1006 if (err && !IS_ERR_OR_NULL(acl))
1007 mlx5_destroy_flow_table(acl);
1010 static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
1011 struct mlx5_vport *vport)
1013 if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
1014 mlx5_del_flow_rule(vport->egress.allowed_vlan);
1016 if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
1017 mlx5_del_flow_rule(vport->egress.drop_rule);
1019 vport->egress.allowed_vlan = NULL;
1020 vport->egress.drop_rule = NULL;
1023 static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
1024 struct mlx5_vport *vport)
1026 if (IS_ERR_OR_NULL(vport->egress.acl))
1029 esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
1031 esw_vport_cleanup_egress_rules(esw, vport);
1032 mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
1033 mlx5_destroy_flow_group(vport->egress.drop_grp);
1034 mlx5_destroy_flow_table(vport->egress.acl);
1035 vport->egress.allowed_vlans_grp = NULL;
1036 vport->egress.drop_grp = NULL;
1037 vport->egress.acl = NULL;
1040 static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1041 struct mlx5_vport *vport)
1043 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1044 struct mlx5_core_dev *dev = esw->dev;
1045 struct mlx5_flow_namespace *root_ns;
1046 struct mlx5_flow_table *acl;
1047 struct mlx5_flow_group *g;
1048 void *match_criteria;
1050 /* The ingress acl table contains 4 groups
1051 * (2 active rules at the same time -
1052 * 1 allow rule from one of the first 3 groups.
1053 * 1 drop rule from the last group):
1054 * 1)Allow untagged traffic with smac=original mac.
1055 * 2)Allow untagged traffic.
1056 * 3)Allow traffic with smac=original mac.
1057 * 4)Drop all other traffic.
1062 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) ||
1063 !IS_ERR_OR_NULL(vport->ingress.acl))
1066 esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
1067 vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
1069 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
1071 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
1075 flow_group_in = mlx5_vzalloc(inlen);
1079 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
1082 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
1086 vport->ingress.acl = acl;
1088 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1090 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1091 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
1092 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1093 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1094 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1095 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
1097 g = mlx5_create_flow_group(acl, flow_group_in);
1100 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
1104 vport->ingress.allow_untagged_spoofchk_grp = g;
1106 memset(flow_group_in, 0, inlen);
1107 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1108 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
1109 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
1110 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
1112 g = mlx5_create_flow_group(acl, flow_group_in);
1115 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
1119 vport->ingress.allow_untagged_only_grp = g;
1121 memset(flow_group_in, 0, inlen);
1122 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1123 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1124 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1125 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
1126 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
1128 g = mlx5_create_flow_group(acl, flow_group_in);
1131 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
1135 vport->ingress.allow_spoofchk_only_grp = g;
1137 memset(flow_group_in, 0, inlen);
1138 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
1139 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
1141 g = mlx5_create_flow_group(acl, flow_group_in);
1144 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
1148 vport->ingress.drop_grp = g;
1152 if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
1153 mlx5_destroy_flow_group(
1154 vport->ingress.allow_spoofchk_only_grp);
1155 if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
1156 mlx5_destroy_flow_group(
1157 vport->ingress.allow_untagged_only_grp);
1158 if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
1159 mlx5_destroy_flow_group(
1160 vport->ingress.allow_untagged_spoofchk_grp);
1161 if (!IS_ERR_OR_NULL(vport->ingress.acl))
1162 mlx5_destroy_flow_table(vport->ingress.acl);
1165 kvfree(flow_group_in);
1168 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
1169 struct mlx5_vport *vport)
1171 if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
1172 mlx5_del_flow_rule(vport->ingress.drop_rule);
1174 if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
1175 mlx5_del_flow_rule(vport->ingress.allow_rule);
1177 vport->ingress.drop_rule = NULL;
1178 vport->ingress.allow_rule = NULL;
1181 static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
1182 struct mlx5_vport *vport)
1184 if (IS_ERR_OR_NULL(vport->ingress.acl))
1187 esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
1189 esw_vport_cleanup_ingress_rules(esw, vport);
1190 mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
1191 mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
1192 mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
1193 mlx5_destroy_flow_group(vport->ingress.drop_grp);
1194 mlx5_destroy_flow_table(vport->ingress.acl);
1195 vport->ingress.acl = NULL;
1196 vport->ingress.drop_grp = NULL;
1197 vport->ingress.allow_spoofchk_only_grp = NULL;
1198 vport->ingress.allow_untagged_only_grp = NULL;
1199 vport->ingress.allow_untagged_spoofchk_grp = NULL;
1202 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1203 struct mlx5_vport *vport)
1205 struct mlx5_flow_spec *spec;
1209 if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
1210 mlx5_core_warn(esw->dev,
1211 "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
1217 esw_vport_cleanup_ingress_rules(esw, vport);
1219 if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
1220 esw_vport_disable_ingress_acl(esw, vport);
1224 esw_vport_enable_ingress_acl(esw, vport);
1227 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
1228 vport->vport, vport->info.vlan, vport->info.qos);
1230 spec = mlx5_vzalloc(sizeof(*spec));
1233 esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
1238 if (vport->info.vlan || vport->info.qos)
1239 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
1241 if (vport->info.spoofchk) {
1242 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
1243 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
1244 smac_v = MLX5_ADDR_OF(fte_match_param,
1246 outer_headers.smac_47_16);
1247 ether_addr_copy(smac_v, vport->info.mac);
1250 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1251 vport->ingress.allow_rule =
1252 mlx5_add_flow_rule(vport->ingress.acl, spec,
1253 MLX5_FLOW_CONTEXT_ACTION_ALLOW,
1255 if (IS_ERR(vport->ingress.allow_rule)) {
1256 err = PTR_ERR(vport->ingress.allow_rule);
1258 "vport[%d] configure ingress allow rule, err(%d)\n",
1260 vport->ingress.allow_rule = NULL;
1264 memset(spec, 0, sizeof(*spec));
1265 vport->ingress.drop_rule =
1266 mlx5_add_flow_rule(vport->ingress.acl, spec,
1267 MLX5_FLOW_CONTEXT_ACTION_DROP,
1269 if (IS_ERR(vport->ingress.drop_rule)) {
1270 err = PTR_ERR(vport->ingress.drop_rule);
1272 "vport[%d] configure ingress drop rule, err(%d)\n",
1274 vport->ingress.drop_rule = NULL;
1280 esw_vport_cleanup_ingress_rules(esw, vport);
1285 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1286 struct mlx5_vport *vport)
1288 struct mlx5_flow_spec *spec;
1291 esw_vport_cleanup_egress_rules(esw, vport);
1293 if (!vport->info.vlan && !vport->info.qos) {
1294 esw_vport_disable_egress_acl(esw, vport);
1298 esw_vport_enable_egress_acl(esw, vport);
1301 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
1302 vport->vport, vport->info.vlan, vport->info.qos);
1304 spec = mlx5_vzalloc(sizeof(*spec));
1307 esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
1312 /* Allowed vlan rule */
1313 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
1314 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
1315 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1316 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
1318 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1319 vport->egress.allowed_vlan =
1320 mlx5_add_flow_rule(vport->egress.acl, spec,
1321 MLX5_FLOW_CONTEXT_ACTION_ALLOW,
1323 if (IS_ERR(vport->egress.allowed_vlan)) {
1324 err = PTR_ERR(vport->egress.allowed_vlan);
1326 "vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
1328 vport->egress.allowed_vlan = NULL;
1332 /* Drop others rule (star rule) */
1333 memset(spec, 0, sizeof(*spec));
1334 vport->egress.drop_rule =
1335 mlx5_add_flow_rule(vport->egress.acl, spec,
1336 MLX5_FLOW_CONTEXT_ACTION_DROP,
1338 if (IS_ERR(vport->egress.drop_rule)) {
1339 err = PTR_ERR(vport->egress.drop_rule);
1341 "vport[%d] configure egress drop rule failed, err(%d)\n",
1343 vport->egress.drop_rule = NULL;
1350 static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
1352 ((u8 *)node_guid)[7] = mac[0];
1353 ((u8 *)node_guid)[6] = mac[1];
1354 ((u8 *)node_guid)[5] = mac[2];
1355 ((u8 *)node_guid)[4] = 0xff;
1356 ((u8 *)node_guid)[3] = 0xfe;
1357 ((u8 *)node_guid)[2] = mac[3];
1358 ((u8 *)node_guid)[1] = mac[4];
1359 ((u8 *)node_guid)[0] = mac[5];
1362 static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
1363 struct mlx5_vport *vport)
1365 int vport_num = vport->vport;
1370 mlx5_modify_vport_admin_state(esw->dev,
1371 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1373 vport->info.link_state);
1374 mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac);
1375 mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, vport->info.node_guid);
1376 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
1377 (vport->info.vlan || vport->info.qos));
1379 /* Only legacy mode needs ACLs */
1380 if (esw->mode == SRIOV_LEGACY) {
1381 esw_vport_ingress_config(esw, vport);
1382 esw_vport_egress_config(esw, vport);
1385 static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
1388 struct mlx5_vport *vport = &esw->vports[vport_num];
1390 mutex_lock(&esw->state_lock);
1391 WARN_ON(vport->enabled);
1393 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
1395 /* Restore old vport configuration */
1396 esw_apply_vport_conf(esw, vport);
1398 /* Sync with current vport context */
1399 vport->enabled_events = enable_events;
1400 vport->enabled = true;
1402 /* only PF is trusted by default */
1404 vport->info.trusted = true;
1406 esw_vport_change_handle_locked(vport);
1408 esw->enabled_vports++;
1409 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
1410 mutex_unlock(&esw->state_lock);
1413 static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
1415 struct mlx5_vport *vport = &esw->vports[vport_num];
1417 if (!vport->enabled)
1420 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
1421 /* Mark this vport as disabled to discard new events */
1422 vport->enabled = false;
1424 synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC));
1425 /* Wait for current already scheduled events to complete */
1426 flush_workqueue(esw->work_queue);
1427 /* Disable events from this vport */
1428 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
1429 mutex_lock(&esw->state_lock);
1430 /* We don't assume VFs will cleanup after themselves.
1431 * Calling vport change handler while vport is disabled will cleanup
1432 * the vport resources.
1434 esw_vport_change_handle_locked(vport);
1435 vport->enabled_events = 0;
1437 if (vport_num && esw->mode == SRIOV_LEGACY) {
1438 mlx5_modify_vport_admin_state(esw->dev,
1439 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1441 MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
1442 esw_vport_disable_egress_acl(esw, vport);
1443 esw_vport_disable_ingress_acl(esw, vport);
1445 esw->enabled_vports--;
1446 mutex_unlock(&esw->state_lock);
1449 /* Public E-Switch API */
1450 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
1453 int i, enabled_events;
1455 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1456 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1459 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1460 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1461 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1465 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
1466 esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
1468 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
1469 esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
1471 esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
1473 esw_disable_vport(esw, 0);
1475 if (mode == SRIOV_LEGACY)
1476 err = esw_create_legacy_fdb_table(esw, nvfs + 1);
1478 err = esw_offloads_init(esw, nvfs + 1);
1482 enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE;
1483 for (i = 0; i <= nvfs; i++)
1484 esw_enable_vport(esw, i, enabled_events);
1486 esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
1487 esw->enabled_vports);
1491 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1492 esw->mode = SRIOV_NONE;
1496 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
1498 struct esw_mc_addr *mc_promisc;
1502 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1503 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1506 esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
1507 esw->enabled_vports, esw->mode);
1509 mc_promisc = esw->mc_promisc;
1510 nvports = esw->enabled_vports;
1512 for (i = 0; i < esw->total_vports; i++)
1513 esw_disable_vport(esw, i);
1515 if (mc_promisc && mc_promisc->uplink_rule)
1516 mlx5_del_flow_rule(mc_promisc->uplink_rule);
1518 if (esw->mode == SRIOV_LEGACY)
1519 esw_destroy_legacy_fdb_table(esw);
1520 else if (esw->mode == SRIOV_OFFLOADS)
1521 esw_offloads_cleanup(esw, nvports);
1523 esw->mode = SRIOV_NONE;
1524 /* VPORT 0 (PF) must be enabled back with non-sriov configuration */
1525 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1528 void mlx5_eswitch_attach(struct mlx5_eswitch *esw)
1530 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1531 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1534 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1535 /* VF Vports will be enabled when SRIOV is enabled */
1538 void mlx5_eswitch_detach(struct mlx5_eswitch *esw)
1540 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1541 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1544 esw_disable_vport(esw, 0);
1547 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1549 int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
1550 int total_vports = MLX5_TOTAL_VPORTS(dev);
1551 struct esw_mc_addr *mc_promisc;
1552 struct mlx5_eswitch *esw;
1556 if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
1557 MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1561 "Total vports %d, l2 table size(%d), per vport: max uc(%d) max mc(%d)\n",
1562 total_vports, l2_table_size,
1563 MLX5_MAX_UC_PER_VPORT(dev),
1564 MLX5_MAX_MC_PER_VPORT(dev));
1566 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1572 esw->l2_table.bitmap = kcalloc(BITS_TO_LONGS(l2_table_size),
1573 sizeof(uintptr_t), GFP_KERNEL);
1574 if (!esw->l2_table.bitmap) {
1578 esw->l2_table.size = l2_table_size;
1580 mc_promisc = kzalloc(sizeof(*mc_promisc), GFP_KERNEL);
1585 esw->mc_promisc = mc_promisc;
1587 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1588 if (!esw->work_queue) {
1593 esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
1600 esw->offloads.vport_reps =
1601 kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep),
1603 if (!esw->offloads.vport_reps) {
1608 mutex_init(&esw->state_lock);
1610 for (vport_num = 0; vport_num < total_vports; vport_num++) {
1611 struct mlx5_vport *vport = &esw->vports[vport_num];
1613 vport->vport = vport_num;
1614 vport->info.link_state = MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
1616 INIT_WORK(&vport->vport_change_handler,
1617 esw_vport_change_handler);
1620 esw->total_vports = total_vports;
1621 esw->enabled_vports = 0;
1622 esw->mode = SRIOV_NONE;
1624 dev->priv.eswitch = esw;
1627 if (esw->work_queue)
1628 destroy_workqueue(esw->work_queue);
1629 kfree(esw->l2_table.bitmap);
1631 kfree(esw->offloads.vport_reps);
1636 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1638 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1639 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1642 esw_info(esw->dev, "cleanup\n");
1644 esw->dev->priv.eswitch = NULL;
1645 destroy_workqueue(esw->work_queue);
1646 kfree(esw->l2_table.bitmap);
1647 kfree(esw->mc_promisc);
1648 kfree(esw->offloads.vport_reps);
1653 void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
1655 struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change;
1656 u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
1657 struct mlx5_vport *vport;
1660 pr_warn("MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n",
1665 vport = &esw->vports[vport_num];
1667 queue_work(esw->work_queue, &vport->vport_change_handler);
1670 /* Vport Administration */
1671 #define ESW_ALLOWED(esw) \
1672 (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
1673 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
1675 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1676 int vport, u8 mac[ETH_ALEN])
1678 struct mlx5_vport *evport;
1682 if (!ESW_ALLOWED(esw))
1684 if (!LEGAL_VPORT(esw, vport))
1687 mutex_lock(&esw->state_lock);
1688 evport = &esw->vports[vport];
1690 if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
1691 mlx5_core_warn(esw->dev,
1692 "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
1698 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
1700 mlx5_core_warn(esw->dev,
1701 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1706 node_guid_gen_from_mac(&node_guid, mac);
1707 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
1709 mlx5_core_warn(esw->dev,
1710 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
1713 ether_addr_copy(evport->info.mac, mac);
1714 evport->info.node_guid = node_guid;
1715 if (evport->enabled && esw->mode == SRIOV_LEGACY)
1716 err = esw_vport_ingress_config(esw, evport);
1719 mutex_unlock(&esw->state_lock);
1723 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1724 int vport, int link_state)
1726 struct mlx5_vport *evport;
1729 if (!ESW_ALLOWED(esw))
1731 if (!LEGAL_VPORT(esw, vport))
1734 mutex_lock(&esw->state_lock);
1735 evport = &esw->vports[vport];
1737 err = mlx5_modify_vport_admin_state(esw->dev,
1738 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1741 mlx5_core_warn(esw->dev,
1742 "Failed to set vport %d link state, err = %d",
1747 evport->info.link_state = link_state;
1750 mutex_unlock(&esw->state_lock);
1754 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1755 int vport, struct ifla_vf_info *ivi)
1757 struct mlx5_vport *evport;
1759 if (!ESW_ALLOWED(esw))
1761 if (!LEGAL_VPORT(esw, vport))
1764 evport = &esw->vports[vport];
1766 memset(ivi, 0, sizeof(*ivi));
1767 ivi->vf = vport - 1;
1769 mutex_lock(&esw->state_lock);
1770 ether_addr_copy(ivi->mac, evport->info.mac);
1771 ivi->linkstate = evport->info.link_state;
1772 ivi->vlan = evport->info.vlan;
1773 ivi->qos = evport->info.qos;
1774 ivi->spoofchk = evport->info.spoofchk;
1775 ivi->trusted = evport->info.trusted;
1776 mutex_unlock(&esw->state_lock);
1781 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1782 int vport, u16 vlan, u8 qos)
1784 struct mlx5_vport *evport;
1788 if (!ESW_ALLOWED(esw))
1790 if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
1796 mutex_lock(&esw->state_lock);
1797 evport = &esw->vports[vport];
1799 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
1803 evport->info.vlan = vlan;
1804 evport->info.qos = qos;
1805 if (evport->enabled && esw->mode == SRIOV_LEGACY) {
1806 err = esw_vport_ingress_config(esw, evport);
1809 err = esw_vport_egress_config(esw, evport);
1813 mutex_unlock(&esw->state_lock);
1817 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
1818 int vport, bool spoofchk)
1820 struct mlx5_vport *evport;
1824 if (!ESW_ALLOWED(esw))
1826 if (!LEGAL_VPORT(esw, vport))
1829 mutex_lock(&esw->state_lock);
1830 evport = &esw->vports[vport];
1831 pschk = evport->info.spoofchk;
1832 evport->info.spoofchk = spoofchk;
1833 if (evport->enabled && esw->mode == SRIOV_LEGACY)
1834 err = esw_vport_ingress_config(esw, evport);
1836 evport->info.spoofchk = pschk;
1837 mutex_unlock(&esw->state_lock);
1842 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
1843 int vport, bool setting)
1845 struct mlx5_vport *evport;
1847 if (!ESW_ALLOWED(esw))
1849 if (!LEGAL_VPORT(esw, vport))
1852 mutex_lock(&esw->state_lock);
1853 evport = &esw->vports[vport];
1854 evport->info.trusted = setting;
1855 if (evport->enabled)
1856 esw_vport_change_handle_locked(evport);
1857 mutex_unlock(&esw->state_lock);
1862 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
1864 struct ifla_vf_stats *vf_stats)
1866 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1867 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
1871 if (!ESW_ALLOWED(esw))
1873 if (!LEGAL_VPORT(esw, vport))
1876 out = mlx5_vzalloc(outlen);
1880 MLX5_SET(query_vport_counter_in, in, opcode,
1881 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1882 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
1883 MLX5_SET(query_vport_counter_in, in, vport_number, vport);
1885 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1887 memset(out, 0, outlen);
1888 err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
1892 #define MLX5_GET_CTR(p, x) \
1893 MLX5_GET64(query_vport_counter_out, p, x)
1895 memset(vf_stats, 0, sizeof(*vf_stats));
1896 vf_stats->rx_packets =
1897 MLX5_GET_CTR(out, received_eth_unicast.packets) +
1898 MLX5_GET_CTR(out, received_eth_multicast.packets) +
1899 MLX5_GET_CTR(out, received_eth_broadcast.packets);
1901 vf_stats->rx_bytes =
1902 MLX5_GET_CTR(out, received_eth_unicast.octets) +
1903 MLX5_GET_CTR(out, received_eth_multicast.octets) +
1904 MLX5_GET_CTR(out, received_eth_broadcast.octets);
1906 vf_stats->tx_packets =
1907 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
1908 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
1909 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
1911 vf_stats->tx_bytes =
1912 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
1913 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
1914 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
1916 vf_stats->multicast =
1917 MLX5_GET_CTR(out, received_eth_multicast.packets);
1919 vf_stats->broadcast =
1920 MLX5_GET_CTR(out, received_eth_broadcast.packets);