2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/dcbnl.h>
53 #include <net/switchdev.h>
54 #include <generated/utsrelease.h>
63 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
64 static const char mlxsw_sp_driver_version[] = "1.0";
70 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
73 * Packet control type.
74 * 0 - Ethernet control (e.g. EMADs, LACP)
77 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
80 * Packet protocol type. Must be set to 1 (Ethernet).
82 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
84 /* tx_hdr_rx_is_router
85 * Packet is sent from the router. Valid for data packets only.
87 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
90 * Indicates if the 'fid' field is valid and should be used for
91 * forwarding lookup. Valid for data packets only.
93 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
96 * Switch partition ID. Must be set to 0.
98 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
100 /* tx_hdr_control_tclass
101 * Indicates if the packet should use the control TClass and not one
102 * of the data TClasses.
104 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
107 * Egress TClass to be used on the egress device on the egress port.
109 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
112 * Destination local port for unicast packets.
113 * Destination multicast ID for multicast packets.
115 * Control packets are directed to a specific egress port, while data
116 * packets are transmitted through the CPU port (0) into the switch partition,
117 * where forwarding rules are applied.
119 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
122 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
123 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
124 * Valid for data packets only.
126 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
130 * 6 - Control packets
132 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
134 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
135 const struct mlxsw_tx_info *tx_info)
137 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
139 memset(txhdr, 0, MLXSW_TXHDR_LEN);
141 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
142 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
143 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
144 mlxsw_tx_hdr_swid_set(txhdr, 0);
145 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
146 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
147 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
150 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
152 char spad_pl[MLXSW_REG_SPAD_LEN];
155 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
158 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
162 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
165 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
166 char paos_pl[MLXSW_REG_PAOS_LEN];
168 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
169 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
170 MLXSW_PORT_ADMIN_STATUS_DOWN);
171 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
174 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
177 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
178 char paos_pl[MLXSW_REG_PAOS_LEN];
182 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
183 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
186 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
187 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
191 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
194 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
195 char ppad_pl[MLXSW_REG_PPAD_LEN];
197 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
198 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
199 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
202 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
204 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
205 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
207 ether_addr_copy(addr, mlxsw_sp->base_mac);
208 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
209 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
212 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
213 u16 vid, enum mlxsw_reg_spms_state state)
215 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
219 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
222 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
223 mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
224 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
229 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
231 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
232 char pmtu_pl[MLXSW_REG_PMTU_LEN];
236 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
237 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
238 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
241 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
246 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
247 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
250 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
253 char pspa_pl[MLXSW_REG_PSPA_LEN];
255 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
256 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
259 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
261 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
263 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
267 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
270 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
271 char svpe_pl[MLXSW_REG_SVPE_LEN];
273 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
274 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
277 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
278 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
281 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
282 char svfa_pl[MLXSW_REG_SVFA_LEN];
284 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
286 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
289 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
290 u16 vid, bool learn_enable)
292 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
296 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
299 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
301 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
307 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
309 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
310 char sspr_pl[MLXSW_REG_SSPR_LEN];
312 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
313 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
316 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
317 u8 local_port, u8 *p_module,
318 u8 *p_width, u8 *p_lane)
320 char pmlp_pl[MLXSW_REG_PMLP_LEN];
323 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
324 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
327 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
328 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
329 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
333 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
334 u8 module, u8 width, u8 lane)
336 char pmlp_pl[MLXSW_REG_PMLP_LEN];
339 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
340 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
341 for (i = 0; i < width; i++) {
342 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
343 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
346 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
349 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
351 char pmlp_pl[MLXSW_REG_PMLP_LEN];
353 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
354 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
355 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
358 static int mlxsw_sp_port_open(struct net_device *dev)
360 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
363 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
366 netif_start_queue(dev);
370 static int mlxsw_sp_port_stop(struct net_device *dev)
372 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
374 netif_stop_queue(dev);
375 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
378 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
379 struct net_device *dev)
381 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
382 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
383 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
384 const struct mlxsw_tx_info tx_info = {
385 .local_port = mlxsw_sp_port->local_port,
391 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
392 return NETDEV_TX_BUSY;
394 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
395 struct sk_buff *skb_orig = skb;
397 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
399 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
400 dev_kfree_skb_any(skb_orig);
405 if (eth_skb_pad(skb)) {
406 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
410 mlxsw_sp_txhdr_construct(skb, &tx_info);
411 /* TX header is consumed by HW on the way so we shouldn't count its
412 * bytes as being sent.
414 len = skb->len - MLXSW_TXHDR_LEN;
416 /* Due to a race we might fail here because of a full queue. In that
417 * unlikely case we simply drop the packet.
419 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
422 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
423 u64_stats_update_begin(&pcpu_stats->syncp);
424 pcpu_stats->tx_packets++;
425 pcpu_stats->tx_bytes += len;
426 u64_stats_update_end(&pcpu_stats->syncp);
428 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
429 dev_kfree_skb_any(skb);
434 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
438 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
440 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
441 struct sockaddr *addr = p;
444 if (!is_valid_ether_addr(addr->sa_data))
445 return -EADDRNOTAVAIL;
447 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
450 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
454 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
455 bool pause_en, bool pfc_en, u16 delay)
457 u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
459 delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
460 MLXSW_SP_PAUSE_DELAY;
462 if (pause_en || pfc_en)
463 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
464 pg_size + delay, pg_size);
466 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
469 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
470 u8 *prio_tc, bool pause_en,
471 struct ieee_pfc *my_pfc)
473 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
474 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
475 u16 delay = !!my_pfc ? my_pfc->delay : 0;
476 char pbmc_pl[MLXSW_REG_PBMC_LEN];
479 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
480 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
484 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
485 bool configure = false;
488 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
489 if (prio_tc[j] == i) {
490 pfc = pfc_en & BIT(j);
498 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
501 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
504 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
505 int mtu, bool pause_en)
507 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
508 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
509 struct ieee_pfc *my_pfc;
512 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
513 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
515 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
519 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
521 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
522 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
525 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
528 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
530 goto err_port_mtu_set;
535 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
539 static struct rtnl_link_stats64 *
540 mlxsw_sp_port_get_stats64(struct net_device *dev,
541 struct rtnl_link_stats64 *stats)
543 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
544 struct mlxsw_sp_port_pcpu_stats *p;
545 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
550 for_each_possible_cpu(i) {
551 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
553 start = u64_stats_fetch_begin_irq(&p->syncp);
554 rx_packets = p->rx_packets;
555 rx_bytes = p->rx_bytes;
556 tx_packets = p->tx_packets;
557 tx_bytes = p->tx_bytes;
558 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
560 stats->rx_packets += rx_packets;
561 stats->rx_bytes += rx_bytes;
562 stats->tx_packets += tx_packets;
563 stats->tx_bytes += tx_bytes;
564 /* tx_dropped is u32, updated without syncp protection. */
565 tx_dropped += p->tx_dropped;
567 stats->tx_dropped = tx_dropped;
571 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
572 u16 vid_end, bool is_member, bool untagged)
574 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
578 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
582 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
583 vid_end, is_member, untagged);
584 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
589 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
591 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
592 u16 vid, last_visited_vid;
595 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
596 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
599 last_visited_vid = vid;
600 goto err_port_vid_to_fid_set;
604 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
606 last_visited_vid = VLAN_N_VID;
607 goto err_port_vid_to_fid_set;
612 err_port_vid_to_fid_set:
613 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
614 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
619 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
621 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
625 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
629 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
630 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
639 static struct mlxsw_sp_vfid *
640 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
642 struct mlxsw_sp_vfid *vfid;
644 list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
645 if (vfid->vid == vid)
652 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
654 return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
655 MLXSW_SP_VFID_PORT_MAX);
658 static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
660 u16 fid = mlxsw_sp_vfid_to_fid(vfid);
661 char sfmr_pl[MLXSW_REG_SFMR_LEN];
663 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0);
664 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
667 static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
669 u16 fid = mlxsw_sp_vfid_to_fid(vfid);
670 char sfmr_pl[MLXSW_REG_SFMR_LEN];
672 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0);
673 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
676 static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
679 struct device *dev = mlxsw_sp->bus_info->dev;
680 struct mlxsw_sp_vfid *vfid;
684 n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
685 if (n_vfid == MLXSW_SP_VFID_PORT_MAX) {
686 dev_err(dev, "No available vFIDs\n");
687 return ERR_PTR(-ERANGE);
690 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
692 dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
696 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
698 goto err_allocate_vfid;
703 list_add(&vfid->list, &mlxsw_sp->port_vfids.list);
704 set_bit(n_vfid, mlxsw_sp->port_vfids.mapped);
709 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
710 return ERR_PTR(-ENOMEM);
713 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
714 struct mlxsw_sp_vfid *vfid)
716 clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
717 list_del(&vfid->list);
719 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
724 static struct mlxsw_sp_port *
725 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
726 struct mlxsw_sp_vfid *vfid)
728 struct mlxsw_sp_port *mlxsw_sp_vport;
730 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
734 /* dev will be set correctly after the VLAN device is linked
735 * with the real device. In case of bridge SELF invocation, dev
738 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
739 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
740 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
741 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
742 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
743 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
744 mlxsw_sp_vport->vport.vfid = vfid;
745 mlxsw_sp_vport->vport.vid = vfid->vid;
747 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
749 return mlxsw_sp_vport;
752 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
754 list_del(&mlxsw_sp_vport->vport.list);
755 kfree(mlxsw_sp_vport);
758 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
761 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
762 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
763 struct mlxsw_sp_port *mlxsw_sp_vport;
764 struct mlxsw_sp_vfid *vfid;
767 /* VLAN 0 is added to HW filter when device goes up, but it is
768 * reserved in our case, so simply return.
773 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
774 netdev_warn(dev, "VID=%d already configured\n", vid);
778 vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
780 vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
782 netdev_err(dev, "Failed to create vFID for VID=%d\n",
784 return PTR_ERR(vfid);
788 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
789 if (!mlxsw_sp_vport) {
790 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
792 goto err_port_vport_create;
795 if (!vfid->nr_vports) {
796 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
799 netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
801 goto err_vport_flood_set;
805 /* When adding the first VLAN interface on a bridged port we need to
806 * transition all the active 802.1Q bridge VLANs to use explicit
807 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
809 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
810 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
812 netdev_err(dev, "Failed to set to Virtual mode\n");
813 goto err_port_vp_mode_trans;
817 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
818 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
820 mlxsw_sp_vfid_to_fid(vfid->vfid),
823 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
825 goto err_port_vid_to_fid_set;
828 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
830 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
831 goto err_port_vid_learning_set;
834 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
836 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
838 goto err_port_add_vid;
841 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
842 MLXSW_REG_SPMS_STATE_FORWARDING);
844 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
845 goto err_port_stp_state_set;
852 err_port_stp_state_set:
853 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
855 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
856 err_port_vid_learning_set:
857 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
858 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
859 mlxsw_sp_vfid_to_fid(vfid->vfid), vid);
860 err_port_vid_to_fid_set:
861 if (list_is_singular(&mlxsw_sp_port->vports_list))
862 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
863 err_port_vp_mode_trans:
864 if (!vfid->nr_vports)
865 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
868 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
869 err_port_vport_create:
870 if (!vfid->nr_vports)
871 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
875 int mlxsw_sp_port_kill_vid(struct net_device *dev,
876 __be16 __always_unused proto, u16 vid)
878 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
879 struct mlxsw_sp_port *mlxsw_sp_vport;
880 struct mlxsw_sp_vfid *vfid;
883 /* VLAN 0 is removed from HW filter when device goes down, but
884 * it is reserved in our case, so simply return.
889 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
890 if (!mlxsw_sp_vport) {
891 netdev_warn(dev, "VID=%d does not exist\n", vid);
895 vfid = mlxsw_sp_vport->vport.vfid;
897 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
898 MLXSW_REG_SPMS_STATE_DISCARDING);
900 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
904 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
906 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
911 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
913 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
917 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
918 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
920 mlxsw_sp_vfid_to_fid(vfid->vfid),
923 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
928 /* When removing the last VLAN interface on a bridged port we need to
929 * transition all active 802.1Q bridge VLANs to use VID to FID
930 * mappings and set port's mode to VLAN mode.
932 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
933 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
935 netdev_err(dev, "Failed to set to VLAN mode\n");
941 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
943 /* Destroy the vFID if no vPorts are assigned to it anymore. */
944 if (!vfid->nr_vports)
945 mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
950 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
953 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
954 u8 module = mlxsw_sp_port->mapping.module;
955 u8 width = mlxsw_sp_port->mapping.width;
956 u8 lane = mlxsw_sp_port->mapping.lane;
959 if (!mlxsw_sp_port->split)
960 err = snprintf(name, len, "p%d", module + 1);
962 err = snprintf(name, len, "p%ds%d", module + 1,
971 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
972 .ndo_open = mlxsw_sp_port_open,
973 .ndo_stop = mlxsw_sp_port_stop,
974 .ndo_start_xmit = mlxsw_sp_port_xmit,
975 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
976 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
977 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
978 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
979 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
980 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
981 .ndo_fdb_add = switchdev_port_fdb_add,
982 .ndo_fdb_del = switchdev_port_fdb_del,
983 .ndo_fdb_dump = switchdev_port_fdb_dump,
984 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
985 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
986 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
987 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
990 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
991 struct ethtool_drvinfo *drvinfo)
993 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
994 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
996 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
997 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
998 sizeof(drvinfo->version));
999 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1001 mlxsw_sp->bus_info->fw_rev.major,
1002 mlxsw_sp->bus_info->fw_rev.minor,
1003 mlxsw_sp->bus_info->fw_rev.subminor);
1004 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1005 sizeof(drvinfo->bus_info));
1008 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1009 struct ethtool_pauseparam *pause)
1011 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1013 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1014 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1017 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1018 struct ethtool_pauseparam *pause)
1020 char pfcc_pl[MLXSW_REG_PFCC_LEN];
1022 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1023 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1024 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1026 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1030 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1031 struct ethtool_pauseparam *pause)
1033 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1034 bool pause_en = pause->tx_pause || pause->rx_pause;
1037 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1038 netdev_err(dev, "PFC already enabled on port\n");
1042 if (pause->autoneg) {
1043 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1047 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1049 netdev_err(dev, "Failed to configure port's headroom\n");
1053 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1055 netdev_err(dev, "Failed to set PAUSE parameters\n");
1056 goto err_port_pause_configure;
1059 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1060 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1064 err_port_pause_configure:
1065 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1066 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1070 struct mlxsw_sp_port_hw_stats {
1071 char str[ETH_GSTRING_LEN];
1072 u64 (*getter)(char *payload);
1075 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1077 .str = "a_frames_transmitted_ok",
1078 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1081 .str = "a_frames_received_ok",
1082 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1085 .str = "a_frame_check_sequence_errors",
1086 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1089 .str = "a_alignment_errors",
1090 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1093 .str = "a_octets_transmitted_ok",
1094 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1097 .str = "a_octets_received_ok",
1098 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1101 .str = "a_multicast_frames_xmitted_ok",
1102 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1105 .str = "a_broadcast_frames_xmitted_ok",
1106 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1109 .str = "a_multicast_frames_received_ok",
1110 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1113 .str = "a_broadcast_frames_received_ok",
1114 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1117 .str = "a_in_range_length_errors",
1118 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1121 .str = "a_out_of_range_length_field",
1122 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1125 .str = "a_frame_too_long_errors",
1126 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1129 .str = "a_symbol_error_during_carrier",
1130 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1133 .str = "a_mac_control_frames_transmitted",
1134 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1137 .str = "a_mac_control_frames_received",
1138 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1141 .str = "a_unsupported_opcodes_received",
1142 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1145 .str = "a_pause_mac_ctrl_frames_received",
1146 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1149 .str = "a_pause_mac_ctrl_frames_xmitted",
1150 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1154 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1156 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1157 u32 stringset, u8 *data)
1162 switch (stringset) {
1164 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1165 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1167 p += ETH_GSTRING_LEN;
1173 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1174 enum ethtool_phys_id_state state)
1176 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1177 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1178 char mlcr_pl[MLXSW_REG_MLCR_LEN];
1182 case ETHTOOL_ID_ACTIVE:
1185 case ETHTOOL_ID_INACTIVE:
1192 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1193 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1196 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1197 struct ethtool_stats *stats, u64 *data)
1199 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1200 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1201 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1205 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
1206 MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
1207 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1208 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1209 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
1212 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1216 return MLXSW_SP_PORT_HW_STATS_LEN;
1222 struct mlxsw_sp_port_link_mode {
1229 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1231 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1232 .supported = SUPPORTED_100baseT_Full,
1233 .advertised = ADVERTISED_100baseT_Full,
1237 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1241 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1242 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1243 .supported = SUPPORTED_1000baseKX_Full,
1244 .advertised = ADVERTISED_1000baseKX_Full,
1248 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1249 .supported = SUPPORTED_10000baseT_Full,
1250 .advertised = ADVERTISED_10000baseT_Full,
1254 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1255 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1256 .supported = SUPPORTED_10000baseKX4_Full,
1257 .advertised = ADVERTISED_10000baseKX4_Full,
1261 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1262 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1263 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1264 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1265 .supported = SUPPORTED_10000baseKR_Full,
1266 .advertised = ADVERTISED_10000baseKR_Full,
1270 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1271 .supported = SUPPORTED_20000baseKR2_Full,
1272 .advertised = ADVERTISED_20000baseKR2_Full,
1276 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1277 .supported = SUPPORTED_40000baseCR4_Full,
1278 .advertised = ADVERTISED_40000baseCR4_Full,
1282 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1283 .supported = SUPPORTED_40000baseKR4_Full,
1284 .advertised = ADVERTISED_40000baseKR4_Full,
1288 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1289 .supported = SUPPORTED_40000baseSR4_Full,
1290 .advertised = ADVERTISED_40000baseSR4_Full,
1294 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1295 .supported = SUPPORTED_40000baseLR4_Full,
1296 .advertised = ADVERTISED_40000baseLR4_Full,
1300 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1301 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1302 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1306 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1307 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1308 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1312 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1313 .supported = SUPPORTED_56000baseKR4_Full,
1314 .advertised = ADVERTISED_56000baseKR4_Full,
1318 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1319 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1320 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1321 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1326 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1328 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1330 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1331 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1332 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1333 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1334 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1335 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1336 return SUPPORTED_FIBRE;
1338 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1339 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1340 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1341 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1342 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1343 return SUPPORTED_Backplane;
1347 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1352 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1353 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1354 modes |= mlxsw_sp_port_link_mode[i].supported;
1359 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1364 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1365 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1366 modes |= mlxsw_sp_port_link_mode[i].advertised;
1371 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1372 struct ethtool_cmd *cmd)
1374 u32 speed = SPEED_UNKNOWN;
1375 u8 duplex = DUPLEX_UNKNOWN;
1381 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1382 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1383 speed = mlxsw_sp_port_link_mode[i].speed;
1384 duplex = DUPLEX_FULL;
1389 ethtool_cmd_speed_set(cmd, speed);
1390 cmd->duplex = duplex;
1393 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1395 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1396 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1397 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1398 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1401 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1402 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1403 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1406 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1407 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1408 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1409 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1415 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1416 struct ethtool_cmd *cmd)
1418 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1419 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1420 char ptys_pl[MLXSW_REG_PTYS_LEN];
1422 u32 eth_proto_admin;
1426 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1427 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1429 netdev_err(dev, "Failed to get proto");
1432 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap,
1433 ð_proto_admin, ð_proto_oper);
1435 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1436 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1437 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1438 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1439 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1440 eth_proto_oper, cmd);
1442 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1443 cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1444 cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1446 cmd->transceiver = XCVR_INTERNAL;
1450 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1455 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1456 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1457 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1462 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1467 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1468 if (speed == mlxsw_sp_port_link_mode[i].speed)
1469 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1474 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1479 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1480 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1481 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1486 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1487 struct ethtool_cmd *cmd)
1489 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1490 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1491 char ptys_pl[MLXSW_REG_PTYS_LEN];
1495 u32 eth_proto_admin;
1499 speed = ethtool_cmd_speed(cmd);
1501 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1502 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1503 mlxsw_sp_to_ptys_speed(speed);
1505 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1506 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1508 netdev_err(dev, "Failed to get proto");
1511 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, NULL);
1513 eth_proto_new = eth_proto_new & eth_proto_cap;
1514 if (!eth_proto_new) {
1515 netdev_err(dev, "Not supported proto admin requested");
1518 if (eth_proto_new == eth_proto_admin)
1521 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1522 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1524 netdev_err(dev, "Failed to set proto admin");
1528 err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1530 netdev_err(dev, "Failed to get oper status");
1536 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1538 netdev_err(dev, "Failed to set admin status");
1542 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1544 netdev_err(dev, "Failed to set admin status");
1551 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1552 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
1553 .get_link = ethtool_op_get_link,
1554 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
1555 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
1556 .get_strings = mlxsw_sp_port_get_strings,
1557 .set_phys_id = mlxsw_sp_port_set_phys_id,
1558 .get_ethtool_stats = mlxsw_sp_port_get_stats,
1559 .get_sset_count = mlxsw_sp_port_get_sset_count,
1560 .get_settings = mlxsw_sp_port_get_settings,
1561 .set_settings = mlxsw_sp_port_set_settings,
1565 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1567 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1568 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1569 char ptys_pl[MLXSW_REG_PTYS_LEN];
1570 u32 eth_proto_admin;
1572 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1573 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1575 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1578 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1579 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1580 bool dwrr, u8 dwrr_weight)
1582 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1583 char qeec_pl[MLXSW_REG_QEEC_LEN];
1585 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1587 mlxsw_reg_qeec_de_set(qeec_pl, true);
1588 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1589 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1590 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1593 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1594 enum mlxsw_reg_qeec_hr hr, u8 index,
1595 u8 next_index, u32 maxrate)
1597 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1598 char qeec_pl[MLXSW_REG_QEEC_LEN];
1600 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1602 mlxsw_reg_qeec_mase_set(qeec_pl, true);
1603 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1604 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1607 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1608 u8 switch_prio, u8 tclass)
1610 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1611 char qtct_pl[MLXSW_REG_QTCT_LEN];
1613 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1615 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1618 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1622 /* Setup the elements hierarcy, so that each TC is linked to
1623 * one subgroup, which are all member in the same group.
1625 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1626 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
1630 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1631 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1632 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
1637 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1638 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1639 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
1645 /* Make sure the max shaper is disabled in all hierarcies that
1648 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1649 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
1650 MLXSW_REG_QEEC_MAS_DIS);
1653 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1654 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1655 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1657 MLXSW_REG_QEEC_MAS_DIS);
1661 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1662 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1663 MLXSW_REG_QEEC_HIERARCY_TC,
1665 MLXSW_REG_QEEC_MAS_DIS);
1670 /* Map all priorities to traffic class 0. */
1671 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1672 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1680 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1681 bool split, u8 module, u8 width, u8 lane)
1683 struct mlxsw_sp_port *mlxsw_sp_port;
1684 struct net_device *dev;
1688 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1691 mlxsw_sp_port = netdev_priv(dev);
1692 mlxsw_sp_port->dev = dev;
1693 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1694 mlxsw_sp_port->local_port = local_port;
1695 mlxsw_sp_port->split = split;
1696 mlxsw_sp_port->mapping.module = module;
1697 mlxsw_sp_port->mapping.width = width;
1698 mlxsw_sp_port->mapping.lane = lane;
1699 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1700 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1701 if (!mlxsw_sp_port->active_vlans) {
1703 goto err_port_active_vlans_alloc;
1705 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1706 if (!mlxsw_sp_port->untagged_vlans) {
1708 goto err_port_untagged_vlans_alloc;
1710 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1712 mlxsw_sp_port->pcpu_stats =
1713 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1714 if (!mlxsw_sp_port->pcpu_stats) {
1716 goto err_alloc_stats;
1719 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1720 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1722 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1724 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1725 mlxsw_sp_port->local_port);
1726 goto err_dev_addr_init;
1729 netif_carrier_off(dev);
1731 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1732 NETIF_F_HW_VLAN_CTAG_FILTER;
1734 /* Each packet needs to have a Tx header (metadata) on top all other
1737 dev->hard_header_len += MLXSW_TXHDR_LEN;
1739 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1741 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1742 mlxsw_sp_port->local_port);
1743 goto err_port_system_port_mapping_set;
1746 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1748 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1749 mlxsw_sp_port->local_port);
1750 goto err_port_swid_set;
1753 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1755 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1756 mlxsw_sp_port->local_port);
1757 goto err_port_speed_by_width_set;
1760 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1762 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1763 mlxsw_sp_port->local_port);
1764 goto err_port_mtu_set;
1767 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1769 goto err_port_admin_status_set;
1771 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1773 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1774 mlxsw_sp_port->local_port);
1775 goto err_port_buffers_init;
1778 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1780 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1781 mlxsw_sp_port->local_port);
1782 goto err_port_ets_init;
1785 /* ETS and buffers must be initialized before DCB. */
1786 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1788 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1789 mlxsw_sp_port->local_port);
1790 goto err_port_dcb_init;
1793 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1794 err = register_netdev(dev);
1796 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1797 mlxsw_sp_port->local_port);
1798 goto err_register_netdev;
1801 err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
1802 mlxsw_sp_port->local_port, dev,
1803 mlxsw_sp_port->split, module);
1805 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1806 mlxsw_sp_port->local_port);
1807 goto err_core_port_init;
1810 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1812 goto err_port_vlan_init;
1814 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1818 mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1820 unregister_netdev(dev);
1821 err_register_netdev:
1824 err_port_buffers_init:
1825 err_port_admin_status_set:
1827 err_port_speed_by_width_set:
1829 err_port_system_port_mapping_set:
1831 free_percpu(mlxsw_sp_port->pcpu_stats);
1833 kfree(mlxsw_sp_port->untagged_vlans);
1834 err_port_untagged_vlans_alloc:
1835 kfree(mlxsw_sp_port->active_vlans);
1836 err_port_active_vlans_alloc:
1841 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1843 struct net_device *dev = mlxsw_sp_port->dev;
1844 struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
1846 list_for_each_entry_safe(mlxsw_sp_vport, tmp,
1847 &mlxsw_sp_port->vports_list, vport.list) {
1848 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1850 /* vPorts created for VLAN devices should already be gone
1851 * by now, since we unregistered the port netdev.
1853 WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
1854 mlxsw_sp_port_kill_vid(dev, 0, vid);
1858 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1860 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1864 mlxsw_sp->ports[local_port] = NULL;
1865 mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1866 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1867 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1868 mlxsw_sp_port_vports_fini(mlxsw_sp_port);
1869 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1870 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1871 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1872 free_percpu(mlxsw_sp_port->pcpu_stats);
1873 kfree(mlxsw_sp_port->untagged_vlans);
1874 kfree(mlxsw_sp_port->active_vlans);
1875 free_netdev(mlxsw_sp_port->dev);
1878 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1882 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1883 mlxsw_sp_port_remove(mlxsw_sp, i);
1884 kfree(mlxsw_sp->ports);
1887 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1889 u8 module, width, lane;
1894 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1895 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1896 if (!mlxsw_sp->ports)
1899 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1900 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1903 goto err_port_module_info_get;
1906 mlxsw_sp->port_to_module[i] = module;
1907 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
1910 goto err_port_create;
1915 err_port_module_info_get:
1916 for (i--; i >= 1; i--)
1917 mlxsw_sp_port_remove(mlxsw_sp, i);
1918 kfree(mlxsw_sp->ports);
1922 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1924 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1926 return local_port - offset;
1929 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1930 u8 module, unsigned int count)
1932 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1935 for (i = 0; i < count; i++) {
1936 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
1939 goto err_port_module_map;
1942 for (i = 0; i < count; i++) {
1943 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
1945 goto err_port_swid_set;
1948 for (i = 0; i < count; i++) {
1949 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1950 module, width, i * width);
1952 goto err_port_create;
1958 for (i--; i >= 0; i--)
1959 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1962 for (i--; i >= 0; i--)
1963 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
1964 MLXSW_PORT_SWID_DISABLED_PORT);
1966 err_port_module_map:
1967 for (i--; i >= 0; i--)
1968 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
1972 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1973 u8 base_port, unsigned int count)
1975 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
1978 /* Split by four means we need to re-create two ports, otherwise
1983 for (i = 0; i < count; i++) {
1984 local_port = base_port + i * 2;
1985 module = mlxsw_sp->port_to_module[local_port];
1987 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1991 for (i = 0; i < count; i++)
1992 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
1994 for (i = 0; i < count; i++) {
1995 local_port = base_port + i * 2;
1996 module = mlxsw_sp->port_to_module[local_port];
1998 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
2003 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
2006 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2007 struct mlxsw_sp_port *mlxsw_sp_port;
2008 u8 module, cur_width, base_port;
2012 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2013 if (!mlxsw_sp_port) {
2014 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2019 module = mlxsw_sp_port->mapping.module;
2020 cur_width = mlxsw_sp_port->mapping.width;
2022 if (count != 2 && count != 4) {
2023 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
2027 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
2028 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
2032 /* Make sure we have enough slave (even) ports for the split. */
2034 base_port = local_port;
2035 if (mlxsw_sp->ports[base_port + 1]) {
2036 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2040 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2041 if (mlxsw_sp->ports[base_port + 1] ||
2042 mlxsw_sp->ports[base_port + 3]) {
2043 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2048 for (i = 0; i < count; i++)
2049 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2051 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2053 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2054 goto err_port_split_create;
2059 err_port_split_create:
2060 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2064 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2066 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2067 struct mlxsw_sp_port *mlxsw_sp_port;
2068 u8 cur_width, base_port;
2072 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2073 if (!mlxsw_sp_port) {
2074 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2079 if (!mlxsw_sp_port->split) {
2080 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2084 cur_width = mlxsw_sp_port->mapping.width;
2085 count = cur_width == 1 ? 4 : 2;
2087 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2089 /* Determine which ports to remove. */
2090 if (count == 2 && local_port >= base_port + 2)
2091 base_port = base_port + 2;
2093 for (i = 0; i < count; i++)
2094 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2096 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2101 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2102 char *pude_pl, void *priv)
2104 struct mlxsw_sp *mlxsw_sp = priv;
2105 struct mlxsw_sp_port *mlxsw_sp_port;
2106 enum mlxsw_reg_pude_oper_status status;
2109 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2110 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2111 if (!mlxsw_sp_port) {
2112 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
2117 status = mlxsw_reg_pude_oper_status_get(pude_pl);
2118 if (status == MLXSW_PORT_OPER_STATUS_UP) {
2119 netdev_info(mlxsw_sp_port->dev, "link up\n");
2120 netif_carrier_on(mlxsw_sp_port->dev);
2122 netdev_info(mlxsw_sp_port->dev, "link down\n");
2123 netif_carrier_off(mlxsw_sp_port->dev);
2127 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
2128 .func = mlxsw_sp_pude_event_func,
2129 .trap_id = MLXSW_TRAP_ID_PUDE,
2132 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
2133 enum mlxsw_event_trap_id trap_id)
2135 struct mlxsw_event_listener *el;
2136 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2140 case MLXSW_TRAP_ID_PUDE:
2141 el = &mlxsw_sp_pude_event;
2144 err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
2148 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
2149 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2151 goto err_event_trap_set;
2156 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2160 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
2161 enum mlxsw_event_trap_id trap_id)
2163 struct mlxsw_event_listener *el;
2166 case MLXSW_TRAP_ID_PUDE:
2167 el = &mlxsw_sp_pude_event;
2170 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2173 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
2176 struct mlxsw_sp *mlxsw_sp = priv;
2177 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2178 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2180 if (unlikely(!mlxsw_sp_port)) {
2181 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2186 skb->dev = mlxsw_sp_port->dev;
2188 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2189 u64_stats_update_begin(&pcpu_stats->syncp);
2190 pcpu_stats->rx_packets++;
2191 pcpu_stats->rx_bytes += skb->len;
2192 u64_stats_update_end(&pcpu_stats->syncp);
2194 skb->protocol = eth_type_trans(skb, skb->dev);
2195 netif_receive_skb(skb);
2198 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2200 .func = mlxsw_sp_rx_listener_func,
2201 .local_port = MLXSW_PORT_DONT_CARE,
2202 .trap_id = MLXSW_TRAP_ID_FDB_MC,
2204 /* Traps for specific L2 packet types, not trapped as FDB MC */
2206 .func = mlxsw_sp_rx_listener_func,
2207 .local_port = MLXSW_PORT_DONT_CARE,
2208 .trap_id = MLXSW_TRAP_ID_STP,
2211 .func = mlxsw_sp_rx_listener_func,
2212 .local_port = MLXSW_PORT_DONT_CARE,
2213 .trap_id = MLXSW_TRAP_ID_LACP,
2216 .func = mlxsw_sp_rx_listener_func,
2217 .local_port = MLXSW_PORT_DONT_CARE,
2218 .trap_id = MLXSW_TRAP_ID_EAPOL,
2221 .func = mlxsw_sp_rx_listener_func,
2222 .local_port = MLXSW_PORT_DONT_CARE,
2223 .trap_id = MLXSW_TRAP_ID_LLDP,
2226 .func = mlxsw_sp_rx_listener_func,
2227 .local_port = MLXSW_PORT_DONT_CARE,
2228 .trap_id = MLXSW_TRAP_ID_MMRP,
2231 .func = mlxsw_sp_rx_listener_func,
2232 .local_port = MLXSW_PORT_DONT_CARE,
2233 .trap_id = MLXSW_TRAP_ID_MVRP,
2236 .func = mlxsw_sp_rx_listener_func,
2237 .local_port = MLXSW_PORT_DONT_CARE,
2238 .trap_id = MLXSW_TRAP_ID_RPVST,
2241 .func = mlxsw_sp_rx_listener_func,
2242 .local_port = MLXSW_PORT_DONT_CARE,
2243 .trap_id = MLXSW_TRAP_ID_DHCP,
2246 .func = mlxsw_sp_rx_listener_func,
2247 .local_port = MLXSW_PORT_DONT_CARE,
2248 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
2251 .func = mlxsw_sp_rx_listener_func,
2252 .local_port = MLXSW_PORT_DONT_CARE,
2253 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
2256 .func = mlxsw_sp_rx_listener_func,
2257 .local_port = MLXSW_PORT_DONT_CARE,
2258 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
2261 .func = mlxsw_sp_rx_listener_func,
2262 .local_port = MLXSW_PORT_DONT_CARE,
2263 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
2266 .func = mlxsw_sp_rx_listener_func,
2267 .local_port = MLXSW_PORT_DONT_CARE,
2268 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2272 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2274 char htgt_pl[MLXSW_REG_HTGT_LEN];
2275 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2279 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2280 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2284 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2285 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2289 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2290 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2291 &mlxsw_sp_rx_listener[i],
2294 goto err_rx_listener_register;
2296 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2297 mlxsw_sp_rx_listener[i].trap_id);
2298 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2300 goto err_rx_trap_set;
2305 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2306 &mlxsw_sp_rx_listener[i],
2308 err_rx_listener_register:
2309 for (i--; i >= 0; i--) {
2310 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2311 mlxsw_sp_rx_listener[i].trap_id);
2312 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2314 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2315 &mlxsw_sp_rx_listener[i],
2321 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2323 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2326 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2327 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2328 mlxsw_sp_rx_listener[i].trap_id);
2329 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2331 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2332 &mlxsw_sp_rx_listener[i],
2337 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2338 enum mlxsw_reg_sfgc_type type,
2339 enum mlxsw_reg_sfgc_bridge_type bridge_type)
2341 enum mlxsw_flood_table_type table_type;
2342 enum mlxsw_sp_flood_table flood_table;
2343 char sfgc_pl[MLXSW_REG_SFGC_LEN];
2345 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2346 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2348 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2350 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2351 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2353 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2355 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2357 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2360 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2364 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2365 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2368 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2369 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2373 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2374 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2382 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2384 char slcr_pl[MLXSW_REG_SLCR_LEN];
2386 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2387 MLXSW_REG_SLCR_LAG_HASH_DMAC |
2388 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2389 MLXSW_REG_SLCR_LAG_HASH_VLANID |
2390 MLXSW_REG_SLCR_LAG_HASH_SIP |
2391 MLXSW_REG_SLCR_LAG_HASH_DIP |
2392 MLXSW_REG_SLCR_LAG_HASH_SPORT |
2393 MLXSW_REG_SLCR_LAG_HASH_DPORT |
2394 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2395 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2398 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2399 const struct mlxsw_bus_info *mlxsw_bus_info)
2401 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2404 mlxsw_sp->core = mlxsw_core;
2405 mlxsw_sp->bus_info = mlxsw_bus_info;
2406 INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
2407 INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
2408 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2410 err = mlxsw_sp_base_mac_get(mlxsw_sp);
2412 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2416 err = mlxsw_sp_ports_create(mlxsw_sp);
2418 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2422 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2424 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2425 goto err_event_register;
2428 err = mlxsw_sp_traps_init(mlxsw_sp);
2430 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2431 goto err_rx_listener_register;
2434 err = mlxsw_sp_flood_init(mlxsw_sp);
2436 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2437 goto err_flood_init;
2440 err = mlxsw_sp_buffers_init(mlxsw_sp);
2442 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2443 goto err_buffers_init;
2446 err = mlxsw_sp_lag_init(mlxsw_sp);
2448 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2452 err = mlxsw_sp_switchdev_init(mlxsw_sp);
2454 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2455 goto err_switchdev_init;
2462 mlxsw_sp_buffers_fini(mlxsw_sp);
2465 mlxsw_sp_traps_fini(mlxsw_sp);
2466 err_rx_listener_register:
2467 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2469 mlxsw_sp_ports_remove(mlxsw_sp);
2473 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2475 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2477 mlxsw_sp_switchdev_fini(mlxsw_sp);
2478 mlxsw_sp_buffers_fini(mlxsw_sp);
2479 mlxsw_sp_traps_fini(mlxsw_sp);
2480 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2481 mlxsw_sp_ports_remove(mlxsw_sp);
2484 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2485 .used_max_vepa_channels = 1,
2486 .max_vepa_channels = 0,
2488 .max_lag = MLXSW_SP_LAG_MAX,
2489 .used_max_port_per_lag = 1,
2490 .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX,
2492 .max_mid = MLXSW_SP_MID_MAX,
2495 .used_max_system_port = 1,
2496 .max_system_port = 64,
2497 .used_max_vlan_groups = 1,
2498 .max_vlan_groups = 127,
2499 .used_max_regions = 1,
2501 .used_flood_tables = 1,
2502 .used_flood_mode = 1,
2504 .max_fid_offset_flood_tables = 2,
2505 .fid_offset_flood_table_size = VLAN_N_VID - 1,
2506 .max_fid_flood_tables = 2,
2507 .fid_flood_table_size = MLXSW_SP_VFID_MAX,
2508 .used_max_ib_mc = 1,
2515 .type = MLXSW_PORT_SWID_TYPE_ETH,
2520 static struct mlxsw_driver mlxsw_sp_driver = {
2521 .kind = MLXSW_DEVICE_KIND_SPECTRUM,
2522 .owner = THIS_MODULE,
2523 .priv_size = sizeof(struct mlxsw_sp),
2524 .init = mlxsw_sp_init,
2525 .fini = mlxsw_sp_fini,
2526 .port_split = mlxsw_sp_port_split,
2527 .port_unsplit = mlxsw_sp_port_unsplit,
2528 .sb_pool_get = mlxsw_sp_sb_pool_get,
2529 .sb_pool_set = mlxsw_sp_sb_pool_set,
2530 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
2531 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
2532 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
2533 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
2534 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
2535 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
2536 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
2537 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
2538 .txhdr_construct = mlxsw_sp_txhdr_construct,
2539 .txhdr_len = MLXSW_TXHDR_LEN,
2540 .profile = &mlxsw_sp_config_profile,
2544 mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
2546 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2547 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2549 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
2550 mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
2552 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2556 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2559 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2560 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2562 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2563 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2564 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2565 mlxsw_sp_port->local_port);
2567 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2571 mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
2573 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2574 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2576 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
2577 mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2579 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2583 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2586 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2587 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2589 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
2590 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2591 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2593 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2597 __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
2599 int err, last_err = 0;
2602 for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2603 err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
2612 __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
2614 int err, last_err = 0;
2617 for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2618 err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
2626 static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
2628 if (!list_empty(&mlxsw_sp_port->vports_list))
2629 if (mlxsw_sp_port->lagged)
2630 return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
2632 return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
2634 if (mlxsw_sp_port->lagged)
2635 return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
2637 return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
2640 static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
2642 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
2643 u16 fid = mlxsw_sp_vfid_to_fid(vfid);
2645 if (mlxsw_sp_vport->lagged)
2646 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
2649 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
2652 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2654 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2657 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
2659 struct net_device *dev = mlxsw_sp_port->dev;
2662 /* When port is not bridged untagged packets are tagged with
2663 * PVID=VID=1, thereby creating an implicit VLAN interface in
2664 * the device. Remove it and let bridge code take care of its
2667 err = mlxsw_sp_port_kill_vid(dev, 0, 1);
2671 mlxsw_sp_port->learning = 1;
2672 mlxsw_sp_port->learning_sync = 1;
2673 mlxsw_sp_port->uc_flood = 1;
2674 mlxsw_sp_port->bridged = 1;
2679 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2682 struct net_device *dev = mlxsw_sp_port->dev;
2684 if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2685 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2687 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2689 mlxsw_sp_port->learning = 0;
2690 mlxsw_sp_port->learning_sync = 0;
2691 mlxsw_sp_port->uc_flood = 0;
2692 mlxsw_sp_port->bridged = 0;
2694 /* Add implicit VLAN interface in the device, so that untagged
2695 * packets will be classified to the default vFID.
2697 return mlxsw_sp_port_add_vid(dev, 0, 1);
2700 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2701 struct net_device *br_dev)
2703 return !mlxsw_sp->master_bridge.dev ||
2704 mlxsw_sp->master_bridge.dev == br_dev;
2707 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2708 struct net_device *br_dev)
2710 mlxsw_sp->master_bridge.dev = br_dev;
2711 mlxsw_sp->master_bridge.ref_count++;
2714 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
2715 struct net_device *br_dev)
2717 if (--mlxsw_sp->master_bridge.ref_count == 0)
2718 mlxsw_sp->master_bridge.dev = NULL;
2721 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2723 char sldr_pl[MLXSW_REG_SLDR_LEN];
2725 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
2726 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2729 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2731 char sldr_pl[MLXSW_REG_SLDR_LEN];
2733 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
2734 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2737 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2738 u16 lag_id, u8 port_index)
2740 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2741 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2743 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
2744 lag_id, port_index);
2745 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2748 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2751 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2752 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2754 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
2756 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2759 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
2762 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2763 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2765 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
2767 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2770 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
2773 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2774 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2776 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
2778 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2781 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2782 struct net_device *lag_dev,
2785 struct mlxsw_sp_upper *lag;
2786 int free_lag_id = -1;
2789 for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
2790 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
2791 if (lag->ref_count) {
2792 if (lag->dev == lag_dev) {
2796 } else if (free_lag_id < 0) {
2800 if (free_lag_id < 0)
2802 *p_lag_id = free_lag_id;
2807 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2808 struct net_device *lag_dev,
2809 struct netdev_lag_upper_info *lag_upper_info)
2813 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2815 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2820 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2821 u16 lag_id, u8 *p_port_index)
2825 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2826 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2834 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2835 struct net_device *lag_dev)
2837 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2838 struct mlxsw_sp_upper *lag;
2843 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2846 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2847 if (!lag->ref_count) {
2848 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2854 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2857 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2859 goto err_col_port_add;
2860 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2862 goto err_col_port_enable;
2864 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2865 mlxsw_sp_port->local_port);
2866 mlxsw_sp_port->lag_id = lag_id;
2867 mlxsw_sp_port->lagged = 1;
2871 err_col_port_enable:
2872 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2874 if (!lag->ref_count)
2875 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2879 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2880 struct net_device *br_dev,
2883 static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2884 struct net_device *lag_dev)
2886 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2887 struct mlxsw_sp_port *mlxsw_sp_vport;
2888 struct mlxsw_sp_upper *lag;
2889 u16 lag_id = mlxsw_sp_port->lag_id;
2892 if (!mlxsw_sp_port->lagged)
2894 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2895 WARN_ON(lag->ref_count == 0);
2897 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2900 err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2904 /* In case we leave a LAG device that has bridges built on top,
2905 * then their teardown sequence is never issued and we need to
2906 * invoke the necessary cleanup routines ourselves.
2908 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2910 struct net_device *br_dev;
2912 if (!mlxsw_sp_vport->bridged)
2915 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2916 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
2919 if (mlxsw_sp_port->bridged) {
2920 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
2921 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
2922 mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
2925 if (lag->ref_count == 1) {
2926 if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
2927 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2928 err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2933 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2934 mlxsw_sp_port->local_port);
2935 mlxsw_sp_port->lagged = 0;
2940 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2943 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2944 char sldr_pl[MLXSW_REG_SLDR_LEN];
2946 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
2947 mlxsw_sp_port->local_port);
2948 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2951 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2954 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2955 char sldr_pl[MLXSW_REG_SLDR_LEN];
2957 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
2958 mlxsw_sp_port->local_port);
2959 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2962 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
2963 bool lag_tx_enabled)
2966 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
2967 mlxsw_sp_port->lag_id);
2969 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
2970 mlxsw_sp_port->lag_id);
2973 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2974 struct netdev_lag_lower_state_info *info)
2976 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2979 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2980 struct net_device *vlan_dev)
2982 struct mlxsw_sp_port *mlxsw_sp_vport;
2983 u16 vid = vlan_dev_vlan_id(vlan_dev);
2985 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2986 if (!mlxsw_sp_vport) {
2987 WARN_ON(!mlxsw_sp_vport);
2991 mlxsw_sp_vport->dev = vlan_dev;
2996 static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2997 struct net_device *vlan_dev)
2999 struct mlxsw_sp_port *mlxsw_sp_vport;
3000 u16 vid = vlan_dev_vlan_id(vlan_dev);
3002 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3003 if (!mlxsw_sp_vport) {
3004 WARN_ON(!mlxsw_sp_vport);
3008 /* When removing a VLAN device while still bridged we should first
3009 * remove it from the bridge, as we receive the bridge's notification
3010 * when the vPort is already gone.
3012 if (mlxsw_sp_vport->bridged) {
3013 struct net_device *br_dev;
3015 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
3016 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
3019 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3024 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3025 unsigned long event, void *ptr)
3027 struct netdev_notifier_changeupper_info *info;
3028 struct mlxsw_sp_port *mlxsw_sp_port;
3029 struct net_device *upper_dev;
3030 struct mlxsw_sp *mlxsw_sp;
3033 mlxsw_sp_port = netdev_priv(dev);
3034 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3038 case NETDEV_PRECHANGEUPPER:
3039 upper_dev = info->upper_dev;
3040 if (!info->master || !info->linking)
3042 /* HW limitation forbids to put ports to multiple bridges. */
3043 if (netif_is_bridge_master(upper_dev) &&
3044 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
3046 if (netif_is_lag_master(upper_dev) &&
3047 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3051 case NETDEV_CHANGEUPPER:
3052 upper_dev = info->upper_dev;
3053 if (is_vlan_dev(upper_dev)) {
3054 if (info->linking) {
3055 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
3058 netdev_err(dev, "Failed to link VLAN device\n");
3062 err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
3065 netdev_err(dev, "Failed to unlink VLAN device\n");
3069 } else if (netif_is_bridge_master(upper_dev)) {
3070 if (info->linking) {
3071 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
3073 netdev_err(dev, "Failed to join bridge\n");
3076 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
3078 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
3080 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
3082 netdev_err(dev, "Failed to leave bridge\n");
3086 } else if (netif_is_lag_master(upper_dev)) {
3087 if (info->linking) {
3088 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3091 netdev_err(dev, "Failed to join link aggregation\n");
3095 err = mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3098 netdev_err(dev, "Failed to leave link aggregation\n");
3109 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3110 unsigned long event, void *ptr)
3112 struct netdev_notifier_changelowerstate_info *info;
3113 struct mlxsw_sp_port *mlxsw_sp_port;
3116 mlxsw_sp_port = netdev_priv(dev);
3120 case NETDEV_CHANGELOWERSTATE:
3121 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
3122 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
3123 info->lower_state_info);
3125 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
3133 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
3134 unsigned long event, void *ptr)
3137 case NETDEV_PRECHANGEUPPER:
3138 case NETDEV_CHANGEUPPER:
3139 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
3140 case NETDEV_CHANGELOWERSTATE:
3141 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
3147 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3148 unsigned long event, void *ptr)
3150 struct net_device *dev;
3151 struct list_head *iter;
3154 netdev_for_each_lower_dev(lag_dev, dev, iter) {
3155 if (mlxsw_sp_port_dev_check(dev)) {
3156 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3157 if (ret == NOTIFY_BAD)
3165 static struct mlxsw_sp_vfid *
3166 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
3167 const struct net_device *br_dev)
3169 struct mlxsw_sp_vfid *vfid;
3171 list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
3172 if (vfid->br_dev == br_dev)
3179 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
3181 return vfid - MLXSW_SP_VFID_PORT_MAX;
3184 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
3186 return MLXSW_SP_VFID_PORT_MAX + br_vfid;
3189 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
3191 return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
3192 MLXSW_SP_VFID_BR_MAX);
3195 static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
3196 struct net_device *br_dev)
3198 struct device *dev = mlxsw_sp->bus_info->dev;
3199 struct mlxsw_sp_vfid *vfid;
3203 n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
3204 if (n_vfid == MLXSW_SP_VFID_MAX) {
3205 dev_err(dev, "No available vFIDs\n");
3206 return ERR_PTR(-ERANGE);
3209 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
3211 dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
3212 return ERR_PTR(err);
3215 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
3217 goto err_allocate_vfid;
3219 vfid->vfid = n_vfid;
3220 vfid->br_dev = br_dev;
3222 list_add(&vfid->list, &mlxsw_sp->br_vfids.list);
3223 set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped);
3228 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
3229 return ERR_PTR(-ENOMEM);
3232 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
3233 struct mlxsw_sp_vfid *vfid)
3235 u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
3237 clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
3238 list_del(&vfid->list);
3240 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
3245 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
3246 struct net_device *br_dev,
3249 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3250 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3251 struct net_device *dev = mlxsw_sp_vport->dev;
3252 struct mlxsw_sp_vfid *vfid, *new_vfid;
3255 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3261 /* We need a vFID to go back to after leaving the bridge's vFID. */
3262 new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
3264 new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
3265 if (IS_ERR(new_vfid)) {
3266 netdev_err(dev, "Failed to create vFID for VID=%d\n",
3268 return PTR_ERR(new_vfid);
3272 /* Invalidate existing {Port, VID} to vFID mapping and create a new
3273 * one for the new vFID.
3275 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3276 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3278 mlxsw_sp_vfid_to_fid(vfid->vfid),
3281 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3283 goto err_port_vid_to_fid_invalidate;
3286 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3287 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3289 mlxsw_sp_vfid_to_fid(new_vfid->vfid),
3292 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3294 goto err_port_vid_to_fid_validate;
3297 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3299 netdev_err(dev, "Failed to disable learning\n");
3300 goto err_port_vid_learning_set;
3303 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
3306 netdev_err(dev, "Failed clear to clear flooding\n");
3307 goto err_vport_flood_set;
3310 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
3311 MLXSW_REG_SPMS_STATE_FORWARDING);
3313 netdev_err(dev, "Failed to set STP state\n");
3314 goto err_port_stp_state_set;
3317 if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
3318 netdev_err(dev, "Failed to flush FDB\n");
3320 /* Switch between the vFIDs and destroy the old one if needed. */
3321 new_vfid->nr_vports++;
3322 mlxsw_sp_vport->vport.vfid = new_vfid;
3324 if (!vfid->nr_vports)
3325 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3327 mlxsw_sp_vport->learning = 0;
3328 mlxsw_sp_vport->learning_sync = 0;
3329 mlxsw_sp_vport->uc_flood = 0;
3330 mlxsw_sp_vport->bridged = 0;
3334 err_port_stp_state_set:
3335 err_vport_flood_set:
3336 err_port_vid_learning_set:
3337 err_port_vid_to_fid_validate:
3338 err_port_vid_to_fid_invalidate:
3339 /* Rollback vFID only if new. */
3340 if (!new_vfid->nr_vports)
3341 mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
3345 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3346 struct net_device *br_dev)
3348 struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
3349 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3350 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3351 struct net_device *dev = mlxsw_sp_vport->dev;
3352 struct mlxsw_sp_vfid *vfid;
3355 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3357 vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
3359 netdev_err(dev, "Failed to create bridge vFID\n");
3360 return PTR_ERR(vfid);
3364 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false);
3366 netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
3368 goto err_port_flood_set;
3371 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3373 netdev_err(dev, "Failed to enable learning\n");
3374 goto err_port_vid_learning_set;
3377 /* We need to invalidate existing {Port, VID} to vFID mapping and
3378 * create a new one for the bridge's vFID.
3380 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3381 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3383 mlxsw_sp_vfid_to_fid(old_vfid->vfid),
3386 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3388 goto err_port_vid_to_fid_invalidate;
3391 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3392 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3394 mlxsw_sp_vfid_to_fid(vfid->vfid),
3397 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3399 goto err_port_vid_to_fid_validate;
3402 /* Switch between the vFIDs and destroy the old one if needed. */
3404 mlxsw_sp_vport->vport.vfid = vfid;
3405 old_vfid->nr_vports--;
3406 if (!old_vfid->nr_vports)
3407 mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
3409 mlxsw_sp_vport->learning = 1;
3410 mlxsw_sp_vport->learning_sync = 1;
3411 mlxsw_sp_vport->uc_flood = 1;
3412 mlxsw_sp_vport->bridged = 1;
3416 err_port_vid_to_fid_validate:
3417 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3418 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
3419 mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid);
3420 err_port_vid_to_fid_invalidate:
3421 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3422 err_port_vid_learning_set:
3423 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false);
3425 if (!vfid->nr_vports)
3426 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3431 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3432 const struct net_device *br_dev)
3434 struct mlxsw_sp_port *mlxsw_sp_vport;
3436 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3438 if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
3445 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3446 unsigned long event, void *ptr,
3449 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3450 struct netdev_notifier_changeupper_info *info = ptr;
3451 struct mlxsw_sp_port *mlxsw_sp_vport;
3452 struct net_device *upper_dev;
3455 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3458 case NETDEV_PRECHANGEUPPER:
3459 upper_dev = info->upper_dev;
3460 if (!info->master || !info->linking)
3462 if (!netif_is_bridge_master(upper_dev))
3464 /* We can't have multiple VLAN interfaces configured on
3465 * the same port and being members in the same bridge.
3467 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3471 case NETDEV_CHANGEUPPER:
3472 upper_dev = info->upper_dev;
3475 if (info->linking) {
3476 if (!mlxsw_sp_vport) {
3477 WARN_ON(!mlxsw_sp_vport);
3480 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3483 netdev_err(dev, "Failed to join bridge\n");
3487 /* We ignore bridge's unlinking notifications if vPort
3488 * is gone, since we already left the bridge when the
3489 * VLAN device was unlinked from the real device.
3491 if (!mlxsw_sp_vport)
3493 err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
3496 netdev_err(dev, "Failed to leave bridge\n");
3505 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3506 unsigned long event, void *ptr,
3509 struct net_device *dev;
3510 struct list_head *iter;
3513 netdev_for_each_lower_dev(lag_dev, dev, iter) {
3514 if (mlxsw_sp_port_dev_check(dev)) {
3515 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3517 if (ret == NOTIFY_BAD)
3525 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
3526 unsigned long event, void *ptr)
3528 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3529 u16 vid = vlan_dev_vlan_id(vlan_dev);
3531 if (mlxsw_sp_port_dev_check(real_dev))
3532 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
3534 else if (netif_is_lag_master(real_dev))
3535 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3541 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3542 unsigned long event, void *ptr)
3544 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3546 if (mlxsw_sp_port_dev_check(dev))
3547 return mlxsw_sp_netdevice_port_event(dev, event, ptr);
3549 if (netif_is_lag_master(dev))
3550 return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3552 if (is_vlan_dev(dev))
3553 return mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3558 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3559 .notifier_call = mlxsw_sp_netdevice_event,
3562 static int __init mlxsw_sp_module_init(void)
3566 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3567 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3569 goto err_core_driver_register;
3572 err_core_driver_register:
3573 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3577 static void __exit mlxsw_sp_module_exit(void)
3579 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3580 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3583 module_init(mlxsw_sp_module_init);
3584 module_exit(mlxsw_sp_module_exit);
3586 MODULE_LICENSE("Dual BSD/GPL");
3587 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3588 MODULE_DESCRIPTION("Mellanox Spectrum driver");
3589 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);