mlxsw: spectrum: Enable L3 interfaces on top of bridge devices
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/notifier.h>
53 #include <linux/dcbnl.h>
54 #include <linux/inetdevice.h>
55 #include <net/switchdev.h>
56 #include <generated/utsrelease.h>
57
58 #include "spectrum.h"
59 #include "core.h"
60 #include "reg.h"
61 #include "port.h"
62 #include "trap.h"
63 #include "txheader.h"
64
65 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
66 static const char mlxsw_sp_driver_version[] = "1.0";
67
68 /* tx_hdr_version
69  * Tx header version.
70  * Must be set to 1.
71  */
72 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
73
74 /* tx_hdr_ctl
75  * Packet control type.
76  * 0 - Ethernet control (e.g. EMADs, LACP)
77  * 1 - Ethernet data
78  */
79 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
80
81 /* tx_hdr_proto
82  * Packet protocol type. Must be set to 1 (Ethernet).
83  */
84 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
85
86 /* tx_hdr_rx_is_router
87  * Packet is sent from the router. Valid for data packets only.
88  */
89 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
90
91 /* tx_hdr_fid_valid
92  * Indicates if the 'fid' field is valid and should be used for
93  * forwarding lookup. Valid for data packets only.
94  */
95 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
96
97 /* tx_hdr_swid
98  * Switch partition ID. Must be set to 0.
99  */
100 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
101
102 /* tx_hdr_control_tclass
103  * Indicates if the packet should use the control TClass and not one
104  * of the data TClasses.
105  */
106 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
107
108 /* tx_hdr_etclass
109  * Egress TClass to be used on the egress device on the egress port.
110  */
111 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
112
113 /* tx_hdr_port_mid
114  * Destination local port for unicast packets.
115  * Destination multicast ID for multicast packets.
116  *
117  * Control packets are directed to a specific egress port, while data
118  * packets are transmitted through the CPU port (0) into the switch partition,
119  * where forwarding rules are applied.
120  */
121 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
122
123 /* tx_hdr_fid
124  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
125  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
126  * Valid for data packets only.
127  */
128 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
129
130 /* tx_hdr_type
131  * 0 - Data packets
132  * 6 - Control packets
133  */
134 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
135
136 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
137                                      const struct mlxsw_tx_info *tx_info)
138 {
139         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
140
141         memset(txhdr, 0, MLXSW_TXHDR_LEN);
142
143         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
144         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
145         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
146         mlxsw_tx_hdr_swid_set(txhdr, 0);
147         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
148         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
149         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
150 }
151
152 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
153 {
154         char spad_pl[MLXSW_REG_SPAD_LEN];
155         int err;
156
157         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
158         if (err)
159                 return err;
160         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
161         return 0;
162 }
163
164 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
165                                           bool is_up)
166 {
167         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
168         char paos_pl[MLXSW_REG_PAOS_LEN];
169
170         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
171                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
172                             MLXSW_PORT_ADMIN_STATUS_DOWN);
173         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
174 }
175
176 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
177                                          bool *p_is_up)
178 {
179         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
180         char paos_pl[MLXSW_REG_PAOS_LEN];
181         u8 oper_status;
182         int err;
183
184         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
185         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
186         if (err)
187                 return err;
188         oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
189         *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
190         return 0;
191 }
192
193 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
194                                       unsigned char *addr)
195 {
196         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
197         char ppad_pl[MLXSW_REG_PPAD_LEN];
198
199         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
200         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
201         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
202 }
203
204 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
205 {
206         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
207         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
208
209         ether_addr_copy(addr, mlxsw_sp->base_mac);
210         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
211         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
212 }
213
214 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
215 {
216         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
217         char pmtu_pl[MLXSW_REG_PMTU_LEN];
218         int max_mtu;
219         int err;
220
221         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
222         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
223         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
224         if (err)
225                 return err;
226         max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
227
228         if (mtu > max_mtu)
229                 return -EINVAL;
230
231         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
232         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
233 }
234
235 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
236                                     u8 swid)
237 {
238         char pspa_pl[MLXSW_REG_PSPA_LEN];
239
240         mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
241         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
242 }
243
244 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
245 {
246         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
247
248         return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
249                                         swid);
250 }
251
252 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
253                                      bool enable)
254 {
255         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
256         char svpe_pl[MLXSW_REG_SVPE_LEN];
257
258         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
259         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
260 }
261
262 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
263                                  enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
264                                  u16 vid)
265 {
266         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
267         char svfa_pl[MLXSW_REG_SVFA_LEN];
268
269         mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
270                             fid, vid);
271         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
272 }
273
274 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
275                                           u16 vid, bool learn_enable)
276 {
277         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
278         char *spvmlr_pl;
279         int err;
280
281         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
282         if (!spvmlr_pl)
283                 return -ENOMEM;
284         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
285                               learn_enable);
286         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
287         kfree(spvmlr_pl);
288         return err;
289 }
290
291 static int
292 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
293 {
294         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
295         char sspr_pl[MLXSW_REG_SSPR_LEN];
296
297         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
298         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
299 }
300
301 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
302                                          u8 local_port, u8 *p_module,
303                                          u8 *p_width, u8 *p_lane)
304 {
305         char pmlp_pl[MLXSW_REG_PMLP_LEN];
306         int err;
307
308         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
309         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
310         if (err)
311                 return err;
312         *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
313         *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
314         *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
315         return 0;
316 }
317
318 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
319                                     u8 module, u8 width, u8 lane)
320 {
321         char pmlp_pl[MLXSW_REG_PMLP_LEN];
322         int i;
323
324         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
325         mlxsw_reg_pmlp_width_set(pmlp_pl, width);
326         for (i = 0; i < width; i++) {
327                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
328                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
329         }
330
331         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
332 }
333
334 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
335 {
336         char pmlp_pl[MLXSW_REG_PMLP_LEN];
337
338         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
339         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
340         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
341 }
342
343 static int mlxsw_sp_port_open(struct net_device *dev)
344 {
345         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
346         int err;
347
348         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
349         if (err)
350                 return err;
351         netif_start_queue(dev);
352         return 0;
353 }
354
355 static int mlxsw_sp_port_stop(struct net_device *dev)
356 {
357         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
358
359         netif_stop_queue(dev);
360         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
361 }
362
363 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
364                                       struct net_device *dev)
365 {
366         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
367         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
368         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
369         const struct mlxsw_tx_info tx_info = {
370                 .local_port = mlxsw_sp_port->local_port,
371                 .is_emad = false,
372         };
373         u64 len;
374         int err;
375
376         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
377                 return NETDEV_TX_BUSY;
378
379         if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
380                 struct sk_buff *skb_orig = skb;
381
382                 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
383                 if (!skb) {
384                         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
385                         dev_kfree_skb_any(skb_orig);
386                         return NETDEV_TX_OK;
387                 }
388         }
389
390         if (eth_skb_pad(skb)) {
391                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
392                 return NETDEV_TX_OK;
393         }
394
395         mlxsw_sp_txhdr_construct(skb, &tx_info);
396         /* TX header is consumed by HW on the way so we shouldn't count its
397          * bytes as being sent.
398          */
399         len = skb->len - MLXSW_TXHDR_LEN;
400
401         /* Due to a race we might fail here because of a full queue. In that
402          * unlikely case we simply drop the packet.
403          */
404         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
405
406         if (!err) {
407                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
408                 u64_stats_update_begin(&pcpu_stats->syncp);
409                 pcpu_stats->tx_packets++;
410                 pcpu_stats->tx_bytes += len;
411                 u64_stats_update_end(&pcpu_stats->syncp);
412         } else {
413                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
414                 dev_kfree_skb_any(skb);
415         }
416         return NETDEV_TX_OK;
417 }
418
419 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
420 {
421 }
422
423 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
424 {
425         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
426         struct sockaddr *addr = p;
427         int err;
428
429         if (!is_valid_ether_addr(addr->sa_data))
430                 return -EADDRNOTAVAIL;
431
432         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
433         if (err)
434                 return err;
435         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
436         return 0;
437 }
438
439 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
440                                  bool pause_en, bool pfc_en, u16 delay)
441 {
442         u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
443
444         delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
445                          MLXSW_SP_PAUSE_DELAY;
446
447         if (pause_en || pfc_en)
448                 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
449                                                     pg_size + delay, pg_size);
450         else
451                 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
452 }
453
454 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
455                                  u8 *prio_tc, bool pause_en,
456                                  struct ieee_pfc *my_pfc)
457 {
458         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
459         u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
460         u16 delay = !!my_pfc ? my_pfc->delay : 0;
461         char pbmc_pl[MLXSW_REG_PBMC_LEN];
462         int i, j, err;
463
464         mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
465         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
466         if (err)
467                 return err;
468
469         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
470                 bool configure = false;
471                 bool pfc = false;
472
473                 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
474                         if (prio_tc[j] == i) {
475                                 pfc = pfc_en & BIT(j);
476                                 configure = true;
477                                 break;
478                         }
479                 }
480
481                 if (!configure)
482                         continue;
483                 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
484         }
485
486         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
487 }
488
489 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
490                                       int mtu, bool pause_en)
491 {
492         u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
493         bool dcb_en = !!mlxsw_sp_port->dcb.ets;
494         struct ieee_pfc *my_pfc;
495         u8 *prio_tc;
496
497         prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
498         my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
499
500         return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
501                                             pause_en, my_pfc);
502 }
503
504 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
505 {
506         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
507         bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
508         int err;
509
510         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
511         if (err)
512                 return err;
513         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
514         if (err)
515                 goto err_port_mtu_set;
516         dev->mtu = mtu;
517         return 0;
518
519 err_port_mtu_set:
520         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
521         return err;
522 }
523
524 static struct rtnl_link_stats64 *
525 mlxsw_sp_port_get_stats64(struct net_device *dev,
526                           struct rtnl_link_stats64 *stats)
527 {
528         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
529         struct mlxsw_sp_port_pcpu_stats *p;
530         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
531         u32 tx_dropped = 0;
532         unsigned int start;
533         int i;
534
535         for_each_possible_cpu(i) {
536                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
537                 do {
538                         start = u64_stats_fetch_begin_irq(&p->syncp);
539                         rx_packets      = p->rx_packets;
540                         rx_bytes        = p->rx_bytes;
541                         tx_packets      = p->tx_packets;
542                         tx_bytes        = p->tx_bytes;
543                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
544
545                 stats->rx_packets       += rx_packets;
546                 stats->rx_bytes         += rx_bytes;
547                 stats->tx_packets       += tx_packets;
548                 stats->tx_bytes         += tx_bytes;
549                 /* tx_dropped is u32, updated without syncp protection. */
550                 tx_dropped      += p->tx_dropped;
551         }
552         stats->tx_dropped       = tx_dropped;
553         return stats;
554 }
555
556 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
557                            u16 vid_end, bool is_member, bool untagged)
558 {
559         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
560         char *spvm_pl;
561         int err;
562
563         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
564         if (!spvm_pl)
565                 return -ENOMEM;
566
567         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
568                             vid_end, is_member, untagged);
569         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
570         kfree(spvm_pl);
571         return err;
572 }
573
574 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
575 {
576         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
577         u16 vid, last_visited_vid;
578         int err;
579
580         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
581                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
582                                                    vid);
583                 if (err) {
584                         last_visited_vid = vid;
585                         goto err_port_vid_to_fid_set;
586                 }
587         }
588
589         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
590         if (err) {
591                 last_visited_vid = VLAN_N_VID;
592                 goto err_port_vid_to_fid_set;
593         }
594
595         return 0;
596
597 err_port_vid_to_fid_set:
598         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
599                 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
600                                              vid);
601         return err;
602 }
603
604 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
605 {
606         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
607         u16 vid;
608         int err;
609
610         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
611         if (err)
612                 return err;
613
614         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
615                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
616                                                    vid, vid);
617                 if (err)
618                         return err;
619         }
620
621         return 0;
622 }
623
624 static struct mlxsw_sp_port *
625 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
626 {
627         struct mlxsw_sp_port *mlxsw_sp_vport;
628
629         mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
630         if (!mlxsw_sp_vport)
631                 return NULL;
632
633         /* dev will be set correctly after the VLAN device is linked
634          * with the real device. In case of bridge SELF invocation, dev
635          * will remain as is.
636          */
637         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
638         mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
639         mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
640         mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
641         mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
642         mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
643         mlxsw_sp_vport->vport.vid = vid;
644
645         list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
646
647         return mlxsw_sp_vport;
648 }
649
650 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
651 {
652         list_del(&mlxsw_sp_vport->vport.list);
653         kfree(mlxsw_sp_vport);
654 }
655
656 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
657                           u16 vid)
658 {
659         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
660         struct mlxsw_sp_port *mlxsw_sp_vport;
661         bool untagged = vid == 1;
662         int err;
663
664         /* VLAN 0 is added to HW filter when device goes up, but it is
665          * reserved in our case, so simply return.
666          */
667         if (!vid)
668                 return 0;
669
670         if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
671                 netdev_warn(dev, "VID=%d already configured\n", vid);
672                 return 0;
673         }
674
675         mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
676         if (!mlxsw_sp_vport) {
677                 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
678                 return -ENOMEM;
679         }
680
681         /* When adding the first VLAN interface on a bridged port we need to
682          * transition all the active 802.1Q bridge VLANs to use explicit
683          * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
684          */
685         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
686                 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
687                 if (err) {
688                         netdev_err(dev, "Failed to set to Virtual mode\n");
689                         goto err_port_vp_mode_trans;
690                 }
691         }
692
693         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
694         if (err) {
695                 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
696                 goto err_port_vid_learning_set;
697         }
698
699         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
700         if (err) {
701                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
702                            vid);
703                 goto err_port_add_vid;
704         }
705
706         return 0;
707
708 err_port_add_vid:
709         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
710 err_port_vid_learning_set:
711         if (list_is_singular(&mlxsw_sp_port->vports_list))
712                 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
713 err_port_vp_mode_trans:
714         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
715         return err;
716 }
717
718 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
719                                   __be16 __always_unused proto, u16 vid)
720 {
721         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
722         struct mlxsw_sp_port *mlxsw_sp_vport;
723         struct mlxsw_sp_fid *f;
724         int err;
725
726         /* VLAN 0 is removed from HW filter when device goes down, but
727          * it is reserved in our case, so simply return.
728          */
729         if (!vid)
730                 return 0;
731
732         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
733         if (!mlxsw_sp_vport) {
734                 netdev_warn(dev, "VID=%d does not exist\n", vid);
735                 return 0;
736         }
737
738         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
739         if (err) {
740                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
741                            vid);
742                 return err;
743         }
744
745         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
746         if (err) {
747                 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
748                 return err;
749         }
750
751         /* Drop FID reference. If this was the last reference the
752          * resources will be freed.
753          */
754         f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
755         if (f && !WARN_ON(!f->leave))
756                 f->leave(mlxsw_sp_vport);
757
758         /* When removing the last VLAN interface on a bridged port we need to
759          * transition all active 802.1Q bridge VLANs to use VID to FID
760          * mappings and set port's mode to VLAN mode.
761          */
762         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
763                 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
764                 if (err) {
765                         netdev_err(dev, "Failed to set to VLAN mode\n");
766                         return err;
767                 }
768         }
769
770         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
771
772         return 0;
773 }
774
775 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
776                                             size_t len)
777 {
778         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
779         u8 module = mlxsw_sp_port->mapping.module;
780         u8 width = mlxsw_sp_port->mapping.width;
781         u8 lane = mlxsw_sp_port->mapping.lane;
782         int err;
783
784         if (!mlxsw_sp_port->split)
785                 err = snprintf(name, len, "p%d", module + 1);
786         else
787                 err = snprintf(name, len, "p%ds%d", module + 1,
788                                lane / width);
789
790         if (err >= len)
791                 return -EINVAL;
792
793         return 0;
794 }
795
796 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
797         .ndo_open               = mlxsw_sp_port_open,
798         .ndo_stop               = mlxsw_sp_port_stop,
799         .ndo_start_xmit         = mlxsw_sp_port_xmit,
800         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
801         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
802         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
803         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
804         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
805         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
806         .ndo_fdb_add            = switchdev_port_fdb_add,
807         .ndo_fdb_del            = switchdev_port_fdb_del,
808         .ndo_fdb_dump           = switchdev_port_fdb_dump,
809         .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
810         .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
811         .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
812         .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
813 };
814
815 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
816                                       struct ethtool_drvinfo *drvinfo)
817 {
818         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
819         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
820
821         strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
822         strlcpy(drvinfo->version, mlxsw_sp_driver_version,
823                 sizeof(drvinfo->version));
824         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
825                  "%d.%d.%d",
826                  mlxsw_sp->bus_info->fw_rev.major,
827                  mlxsw_sp->bus_info->fw_rev.minor,
828                  mlxsw_sp->bus_info->fw_rev.subminor);
829         strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
830                 sizeof(drvinfo->bus_info));
831 }
832
833 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
834                                          struct ethtool_pauseparam *pause)
835 {
836         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
837
838         pause->rx_pause = mlxsw_sp_port->link.rx_pause;
839         pause->tx_pause = mlxsw_sp_port->link.tx_pause;
840 }
841
842 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
843                                    struct ethtool_pauseparam *pause)
844 {
845         char pfcc_pl[MLXSW_REG_PFCC_LEN];
846
847         mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
848         mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
849         mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
850
851         return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
852                                pfcc_pl);
853 }
854
855 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
856                                         struct ethtool_pauseparam *pause)
857 {
858         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
859         bool pause_en = pause->tx_pause || pause->rx_pause;
860         int err;
861
862         if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
863                 netdev_err(dev, "PFC already enabled on port\n");
864                 return -EINVAL;
865         }
866
867         if (pause->autoneg) {
868                 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
869                 return -EINVAL;
870         }
871
872         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
873         if (err) {
874                 netdev_err(dev, "Failed to configure port's headroom\n");
875                 return err;
876         }
877
878         err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
879         if (err) {
880                 netdev_err(dev, "Failed to set PAUSE parameters\n");
881                 goto err_port_pause_configure;
882         }
883
884         mlxsw_sp_port->link.rx_pause = pause->rx_pause;
885         mlxsw_sp_port->link.tx_pause = pause->tx_pause;
886
887         return 0;
888
889 err_port_pause_configure:
890         pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
891         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
892         return err;
893 }
894
895 struct mlxsw_sp_port_hw_stats {
896         char str[ETH_GSTRING_LEN];
897         u64 (*getter)(char *payload);
898 };
899
900 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
901         {
902                 .str = "a_frames_transmitted_ok",
903                 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
904         },
905         {
906                 .str = "a_frames_received_ok",
907                 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
908         },
909         {
910                 .str = "a_frame_check_sequence_errors",
911                 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
912         },
913         {
914                 .str = "a_alignment_errors",
915                 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
916         },
917         {
918                 .str = "a_octets_transmitted_ok",
919                 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
920         },
921         {
922                 .str = "a_octets_received_ok",
923                 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
924         },
925         {
926                 .str = "a_multicast_frames_xmitted_ok",
927                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
928         },
929         {
930                 .str = "a_broadcast_frames_xmitted_ok",
931                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
932         },
933         {
934                 .str = "a_multicast_frames_received_ok",
935                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
936         },
937         {
938                 .str = "a_broadcast_frames_received_ok",
939                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
940         },
941         {
942                 .str = "a_in_range_length_errors",
943                 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
944         },
945         {
946                 .str = "a_out_of_range_length_field",
947                 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
948         },
949         {
950                 .str = "a_frame_too_long_errors",
951                 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
952         },
953         {
954                 .str = "a_symbol_error_during_carrier",
955                 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
956         },
957         {
958                 .str = "a_mac_control_frames_transmitted",
959                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
960         },
961         {
962                 .str = "a_mac_control_frames_received",
963                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
964         },
965         {
966                 .str = "a_unsupported_opcodes_received",
967                 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
968         },
969         {
970                 .str = "a_pause_mac_ctrl_frames_received",
971                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
972         },
973         {
974                 .str = "a_pause_mac_ctrl_frames_xmitted",
975                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
976         },
977 };
978
979 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
980
981 static void mlxsw_sp_port_get_strings(struct net_device *dev,
982                                       u32 stringset, u8 *data)
983 {
984         u8 *p = data;
985         int i;
986
987         switch (stringset) {
988         case ETH_SS_STATS:
989                 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
990                         memcpy(p, mlxsw_sp_port_hw_stats[i].str,
991                                ETH_GSTRING_LEN);
992                         p += ETH_GSTRING_LEN;
993                 }
994                 break;
995         }
996 }
997
998 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
999                                      enum ethtool_phys_id_state state)
1000 {
1001         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1002         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1003         char mlcr_pl[MLXSW_REG_MLCR_LEN];
1004         bool active;
1005
1006         switch (state) {
1007         case ETHTOOL_ID_ACTIVE:
1008                 active = true;
1009                 break;
1010         case ETHTOOL_ID_INACTIVE:
1011                 active = false;
1012                 break;
1013         default:
1014                 return -EOPNOTSUPP;
1015         }
1016
1017         mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1018         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1019 }
1020
1021 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1022                                     struct ethtool_stats *stats, u64 *data)
1023 {
1024         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1025         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1026         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1027         int i;
1028         int err;
1029
1030         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
1031                              MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
1032         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1033         for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1034                 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
1035 }
1036
1037 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1038 {
1039         switch (sset) {
1040         case ETH_SS_STATS:
1041                 return MLXSW_SP_PORT_HW_STATS_LEN;
1042         default:
1043                 return -EOPNOTSUPP;
1044         }
1045 }
1046
1047 struct mlxsw_sp_port_link_mode {
1048         u32 mask;
1049         u32 supported;
1050         u32 advertised;
1051         u32 speed;
1052 };
1053
1054 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1055         {
1056                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1057                 .supported      = SUPPORTED_100baseT_Full,
1058                 .advertised     = ADVERTISED_100baseT_Full,
1059                 .speed          = 100,
1060         },
1061         {
1062                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1063                 .speed          = 100,
1064         },
1065         {
1066                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1067                                   MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1068                 .supported      = SUPPORTED_1000baseKX_Full,
1069                 .advertised     = ADVERTISED_1000baseKX_Full,
1070                 .speed          = 1000,
1071         },
1072         {
1073                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1074                 .supported      = SUPPORTED_10000baseT_Full,
1075                 .advertised     = ADVERTISED_10000baseT_Full,
1076                 .speed          = 10000,
1077         },
1078         {
1079                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1080                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1081                 .supported      = SUPPORTED_10000baseKX4_Full,
1082                 .advertised     = ADVERTISED_10000baseKX4_Full,
1083                 .speed          = 10000,
1084         },
1085         {
1086                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1087                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1088                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1089                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1090                 .supported      = SUPPORTED_10000baseKR_Full,
1091                 .advertised     = ADVERTISED_10000baseKR_Full,
1092                 .speed          = 10000,
1093         },
1094         {
1095                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1096                 .supported      = SUPPORTED_20000baseKR2_Full,
1097                 .advertised     = ADVERTISED_20000baseKR2_Full,
1098                 .speed          = 20000,
1099         },
1100         {
1101                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1102                 .supported      = SUPPORTED_40000baseCR4_Full,
1103                 .advertised     = ADVERTISED_40000baseCR4_Full,
1104                 .speed          = 40000,
1105         },
1106         {
1107                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1108                 .supported      = SUPPORTED_40000baseKR4_Full,
1109                 .advertised     = ADVERTISED_40000baseKR4_Full,
1110                 .speed          = 40000,
1111         },
1112         {
1113                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1114                 .supported      = SUPPORTED_40000baseSR4_Full,
1115                 .advertised     = ADVERTISED_40000baseSR4_Full,
1116                 .speed          = 40000,
1117         },
1118         {
1119                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1120                 .supported      = SUPPORTED_40000baseLR4_Full,
1121                 .advertised     = ADVERTISED_40000baseLR4_Full,
1122                 .speed          = 40000,
1123         },
1124         {
1125                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1126                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1127                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1128                 .speed          = 25000,
1129         },
1130         {
1131                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1132                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1133                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1134                 .speed          = 50000,
1135         },
1136         {
1137                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1138                 .supported      = SUPPORTED_56000baseKR4_Full,
1139                 .advertised     = ADVERTISED_56000baseKR4_Full,
1140                 .speed          = 56000,
1141         },
1142         {
1143                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1144                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1145                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1146                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1147                 .speed          = 100000,
1148         },
1149 };
1150
1151 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1152
1153 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1154 {
1155         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1156                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1157                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1158                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1159                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1160                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1161                 return SUPPORTED_FIBRE;
1162
1163         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1164                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1165                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1166                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1167                               MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1168                 return SUPPORTED_Backplane;
1169         return 0;
1170 }
1171
1172 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1173 {
1174         u32 modes = 0;
1175         int i;
1176
1177         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1178                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1179                         modes |= mlxsw_sp_port_link_mode[i].supported;
1180         }
1181         return modes;
1182 }
1183
1184 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1185 {
1186         u32 modes = 0;
1187         int i;
1188
1189         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1190                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1191                         modes |= mlxsw_sp_port_link_mode[i].advertised;
1192         }
1193         return modes;
1194 }
1195
1196 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1197                                             struct ethtool_cmd *cmd)
1198 {
1199         u32 speed = SPEED_UNKNOWN;
1200         u8 duplex = DUPLEX_UNKNOWN;
1201         int i;
1202
1203         if (!carrier_ok)
1204                 goto out;
1205
1206         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1207                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1208                         speed = mlxsw_sp_port_link_mode[i].speed;
1209                         duplex = DUPLEX_FULL;
1210                         break;
1211                 }
1212         }
1213 out:
1214         ethtool_cmd_speed_set(cmd, speed);
1215         cmd->duplex = duplex;
1216 }
1217
1218 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1219 {
1220         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1221                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1222                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1223                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1224                 return PORT_FIBRE;
1225
1226         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1227                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1228                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1229                 return PORT_DA;
1230
1231         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1232                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1233                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1234                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1235                 return PORT_NONE;
1236
1237         return PORT_OTHER;
1238 }
1239
1240 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1241                                       struct ethtool_cmd *cmd)
1242 {
1243         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1244         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1245         char ptys_pl[MLXSW_REG_PTYS_LEN];
1246         u32 eth_proto_cap;
1247         u32 eth_proto_admin;
1248         u32 eth_proto_oper;
1249         int err;
1250
1251         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1252         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1253         if (err) {
1254                 netdev_err(dev, "Failed to get proto");
1255                 return err;
1256         }
1257         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1258                               &eth_proto_admin, &eth_proto_oper);
1259
1260         cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1261                          mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1262                          SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1263         cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1264         mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1265                                         eth_proto_oper, cmd);
1266
1267         eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1268         cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1269         cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1270
1271         cmd->transceiver = XCVR_INTERNAL;
1272         return 0;
1273 }
1274
1275 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1276 {
1277         u32 ptys_proto = 0;
1278         int i;
1279
1280         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1281                 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1282                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1283         }
1284         return ptys_proto;
1285 }
1286
1287 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1288 {
1289         u32 ptys_proto = 0;
1290         int i;
1291
1292         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1293                 if (speed == mlxsw_sp_port_link_mode[i].speed)
1294                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1295         }
1296         return ptys_proto;
1297 }
1298
1299 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1300 {
1301         u32 ptys_proto = 0;
1302         int i;
1303
1304         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1305                 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1306                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1307         }
1308         return ptys_proto;
1309 }
1310
1311 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1312                                       struct ethtool_cmd *cmd)
1313 {
1314         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1315         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1316         char ptys_pl[MLXSW_REG_PTYS_LEN];
1317         u32 speed;
1318         u32 eth_proto_new;
1319         u32 eth_proto_cap;
1320         u32 eth_proto_admin;
1321         bool is_up;
1322         int err;
1323
1324         speed = ethtool_cmd_speed(cmd);
1325
1326         eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1327                 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1328                 mlxsw_sp_to_ptys_speed(speed);
1329
1330         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1331         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1332         if (err) {
1333                 netdev_err(dev, "Failed to get proto");
1334                 return err;
1335         }
1336         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1337
1338         eth_proto_new = eth_proto_new & eth_proto_cap;
1339         if (!eth_proto_new) {
1340                 netdev_err(dev, "Not supported proto admin requested");
1341                 return -EINVAL;
1342         }
1343         if (eth_proto_new == eth_proto_admin)
1344                 return 0;
1345
1346         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1347         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1348         if (err) {
1349                 netdev_err(dev, "Failed to set proto admin");
1350                 return err;
1351         }
1352
1353         err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1354         if (err) {
1355                 netdev_err(dev, "Failed to get oper status");
1356                 return err;
1357         }
1358         if (!is_up)
1359                 return 0;
1360
1361         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1362         if (err) {
1363                 netdev_err(dev, "Failed to set admin status");
1364                 return err;
1365         }
1366
1367         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1368         if (err) {
1369                 netdev_err(dev, "Failed to set admin status");
1370                 return err;
1371         }
1372
1373         return 0;
1374 }
1375
1376 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1377         .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
1378         .get_link               = ethtool_op_get_link,
1379         .get_pauseparam         = mlxsw_sp_port_get_pauseparam,
1380         .set_pauseparam         = mlxsw_sp_port_set_pauseparam,
1381         .get_strings            = mlxsw_sp_port_get_strings,
1382         .set_phys_id            = mlxsw_sp_port_set_phys_id,
1383         .get_ethtool_stats      = mlxsw_sp_port_get_stats,
1384         .get_sset_count         = mlxsw_sp_port_get_sset_count,
1385         .get_settings           = mlxsw_sp_port_get_settings,
1386         .set_settings           = mlxsw_sp_port_set_settings,
1387 };
1388
1389 static int
1390 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1391 {
1392         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1393         u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1394         char ptys_pl[MLXSW_REG_PTYS_LEN];
1395         u32 eth_proto_admin;
1396
1397         eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1398         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1399                             eth_proto_admin);
1400         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1401 }
1402
1403 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1404                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1405                           bool dwrr, u8 dwrr_weight)
1406 {
1407         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1408         char qeec_pl[MLXSW_REG_QEEC_LEN];
1409
1410         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1411                             next_index);
1412         mlxsw_reg_qeec_de_set(qeec_pl, true);
1413         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1414         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1415         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1416 }
1417
1418 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1419                                   enum mlxsw_reg_qeec_hr hr, u8 index,
1420                                   u8 next_index, u32 maxrate)
1421 {
1422         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1423         char qeec_pl[MLXSW_REG_QEEC_LEN];
1424
1425         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1426                             next_index);
1427         mlxsw_reg_qeec_mase_set(qeec_pl, true);
1428         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1429         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1430 }
1431
1432 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1433                               u8 switch_prio, u8 tclass)
1434 {
1435         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1436         char qtct_pl[MLXSW_REG_QTCT_LEN];
1437
1438         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1439                             tclass);
1440         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1441 }
1442
1443 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1444 {
1445         int err, i;
1446
1447         /* Setup the elements hierarcy, so that each TC is linked to
1448          * one subgroup, which are all member in the same group.
1449          */
1450         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1451                                     MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
1452                                     0);
1453         if (err)
1454                 return err;
1455         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1456                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1457                                             MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
1458                                             0, false, 0);
1459                 if (err)
1460                         return err;
1461         }
1462         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1463                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1464                                             MLXSW_REG_QEEC_HIERARCY_TC, i, i,
1465                                             false, 0);
1466                 if (err)
1467                         return err;
1468         }
1469
1470         /* Make sure the max shaper is disabled in all hierarcies that
1471          * support it.
1472          */
1473         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1474                                             MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
1475                                             MLXSW_REG_QEEC_MAS_DIS);
1476         if (err)
1477                 return err;
1478         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1479                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1480                                                     MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1481                                                     i, 0,
1482                                                     MLXSW_REG_QEEC_MAS_DIS);
1483                 if (err)
1484                         return err;
1485         }
1486         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1487                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1488                                                     MLXSW_REG_QEEC_HIERARCY_TC,
1489                                                     i, i,
1490                                                     MLXSW_REG_QEEC_MAS_DIS);
1491                 if (err)
1492                         return err;
1493         }
1494
1495         /* Map all priorities to traffic class 0. */
1496         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1497                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1498                 if (err)
1499                         return err;
1500         }
1501
1502         return 0;
1503 }
1504
1505 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1506                                 bool split, u8 module, u8 width, u8 lane)
1507 {
1508         struct mlxsw_sp_port *mlxsw_sp_port;
1509         struct net_device *dev;
1510         size_t bytes;
1511         int err;
1512
1513         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1514         if (!dev)
1515                 return -ENOMEM;
1516         mlxsw_sp_port = netdev_priv(dev);
1517         mlxsw_sp_port->dev = dev;
1518         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1519         mlxsw_sp_port->local_port = local_port;
1520         mlxsw_sp_port->split = split;
1521         mlxsw_sp_port->mapping.module = module;
1522         mlxsw_sp_port->mapping.width = width;
1523         mlxsw_sp_port->mapping.lane = lane;
1524         bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1525         mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1526         if (!mlxsw_sp_port->active_vlans) {
1527                 err = -ENOMEM;
1528                 goto err_port_active_vlans_alloc;
1529         }
1530         mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1531         if (!mlxsw_sp_port->untagged_vlans) {
1532                 err = -ENOMEM;
1533                 goto err_port_untagged_vlans_alloc;
1534         }
1535         INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1536
1537         mlxsw_sp_port->pcpu_stats =
1538                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1539         if (!mlxsw_sp_port->pcpu_stats) {
1540                 err = -ENOMEM;
1541                 goto err_alloc_stats;
1542         }
1543
1544         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1545         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1546
1547         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1548         if (err) {
1549                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1550                         mlxsw_sp_port->local_port);
1551                 goto err_dev_addr_init;
1552         }
1553
1554         netif_carrier_off(dev);
1555
1556         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1557                          NETIF_F_HW_VLAN_CTAG_FILTER;
1558
1559         /* Each packet needs to have a Tx header (metadata) on top all other
1560          * headers.
1561          */
1562         dev->hard_header_len += MLXSW_TXHDR_LEN;
1563
1564         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1565         if (err) {
1566                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1567                         mlxsw_sp_port->local_port);
1568                 goto err_port_system_port_mapping_set;
1569         }
1570
1571         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1572         if (err) {
1573                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1574                         mlxsw_sp_port->local_port);
1575                 goto err_port_swid_set;
1576         }
1577
1578         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1579         if (err) {
1580                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1581                         mlxsw_sp_port->local_port);
1582                 goto err_port_speed_by_width_set;
1583         }
1584
1585         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1586         if (err) {
1587                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1588                         mlxsw_sp_port->local_port);
1589                 goto err_port_mtu_set;
1590         }
1591
1592         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1593         if (err)
1594                 goto err_port_admin_status_set;
1595
1596         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1597         if (err) {
1598                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1599                         mlxsw_sp_port->local_port);
1600                 goto err_port_buffers_init;
1601         }
1602
1603         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1604         if (err) {
1605                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1606                         mlxsw_sp_port->local_port);
1607                 goto err_port_ets_init;
1608         }
1609
1610         /* ETS and buffers must be initialized before DCB. */
1611         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1612         if (err) {
1613                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1614                         mlxsw_sp_port->local_port);
1615                 goto err_port_dcb_init;
1616         }
1617
1618         mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1619         err = register_netdev(dev);
1620         if (err) {
1621                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1622                         mlxsw_sp_port->local_port);
1623                 goto err_register_netdev;
1624         }
1625
1626         err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
1627                                    mlxsw_sp_port->local_port, dev,
1628                                    mlxsw_sp_port->split, module);
1629         if (err) {
1630                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1631                         mlxsw_sp_port->local_port);
1632                 goto err_core_port_init;
1633         }
1634
1635         err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1636         if (err)
1637                 goto err_port_vlan_init;
1638
1639         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1640         return 0;
1641
1642 err_port_vlan_init:
1643         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1644 err_core_port_init:
1645         unregister_netdev(dev);
1646 err_register_netdev:
1647 err_port_dcb_init:
1648 err_port_ets_init:
1649 err_port_buffers_init:
1650 err_port_admin_status_set:
1651 err_port_mtu_set:
1652 err_port_speed_by_width_set:
1653 err_port_swid_set:
1654 err_port_system_port_mapping_set:
1655 err_dev_addr_init:
1656         free_percpu(mlxsw_sp_port->pcpu_stats);
1657 err_alloc_stats:
1658         kfree(mlxsw_sp_port->untagged_vlans);
1659 err_port_untagged_vlans_alloc:
1660         kfree(mlxsw_sp_port->active_vlans);
1661 err_port_active_vlans_alloc:
1662         free_netdev(dev);
1663         return err;
1664 }
1665
1666 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1667 {
1668         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1669
1670         if (!mlxsw_sp_port)
1671                 return;
1672         mlxsw_sp->ports[local_port] = NULL;
1673         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1674         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1675         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1676         mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
1677         mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1678         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1679         mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1680         free_percpu(mlxsw_sp_port->pcpu_stats);
1681         kfree(mlxsw_sp_port->untagged_vlans);
1682         kfree(mlxsw_sp_port->active_vlans);
1683         WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
1684         free_netdev(mlxsw_sp_port->dev);
1685 }
1686
1687 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1688 {
1689         int i;
1690
1691         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1692                 mlxsw_sp_port_remove(mlxsw_sp, i);
1693         kfree(mlxsw_sp->ports);
1694 }
1695
1696 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1697 {
1698         u8 module, width, lane;
1699         size_t alloc_size;
1700         int i;
1701         int err;
1702
1703         alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1704         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1705         if (!mlxsw_sp->ports)
1706                 return -ENOMEM;
1707
1708         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1709                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1710                                                     &width, &lane);
1711                 if (err)
1712                         goto err_port_module_info_get;
1713                 if (!width)
1714                         continue;
1715                 mlxsw_sp->port_to_module[i] = module;
1716                 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
1717                                            lane);
1718                 if (err)
1719                         goto err_port_create;
1720         }
1721         return 0;
1722
1723 err_port_create:
1724 err_port_module_info_get:
1725         for (i--; i >= 1; i--)
1726                 mlxsw_sp_port_remove(mlxsw_sp, i);
1727         kfree(mlxsw_sp->ports);
1728         return err;
1729 }
1730
1731 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1732 {
1733         u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1734
1735         return local_port - offset;
1736 }
1737
1738 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1739                                       u8 module, unsigned int count)
1740 {
1741         u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1742         int err, i;
1743
1744         for (i = 0; i < count; i++) {
1745                 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
1746                                                width, i * width);
1747                 if (err)
1748                         goto err_port_module_map;
1749         }
1750
1751         for (i = 0; i < count; i++) {
1752                 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
1753                 if (err)
1754                         goto err_port_swid_set;
1755         }
1756
1757         for (i = 0; i < count; i++) {
1758                 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1759                                            module, width, i * width);
1760                 if (err)
1761                         goto err_port_create;
1762         }
1763
1764         return 0;
1765
1766 err_port_create:
1767         for (i--; i >= 0; i--)
1768                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1769         i = count;
1770 err_port_swid_set:
1771         for (i--; i >= 0; i--)
1772                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
1773                                          MLXSW_PORT_SWID_DISABLED_PORT);
1774         i = count;
1775 err_port_module_map:
1776         for (i--; i >= 0; i--)
1777                 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
1778         return err;
1779 }
1780
1781 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1782                                          u8 base_port, unsigned int count)
1783 {
1784         u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
1785         int i;
1786
1787         /* Split by four means we need to re-create two ports, otherwise
1788          * only one.
1789          */
1790         count = count / 2;
1791
1792         for (i = 0; i < count; i++) {
1793                 local_port = base_port + i * 2;
1794                 module = mlxsw_sp->port_to_module[local_port];
1795
1796                 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1797                                          0);
1798         }
1799
1800         for (i = 0; i < count; i++)
1801                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
1802
1803         for (i = 0; i < count; i++) {
1804                 local_port = base_port + i * 2;
1805                 module = mlxsw_sp->port_to_module[local_port];
1806
1807                 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
1808                                      width, 0);
1809         }
1810 }
1811
1812 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1813                                unsigned int count)
1814 {
1815         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1816         struct mlxsw_sp_port *mlxsw_sp_port;
1817         u8 module, cur_width, base_port;
1818         int i;
1819         int err;
1820
1821         mlxsw_sp_port = mlxsw_sp->ports[local_port];
1822         if (!mlxsw_sp_port) {
1823                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1824                         local_port);
1825                 return -EINVAL;
1826         }
1827
1828         module = mlxsw_sp_port->mapping.module;
1829         cur_width = mlxsw_sp_port->mapping.width;
1830
1831         if (count != 2 && count != 4) {
1832                 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
1833                 return -EINVAL;
1834         }
1835
1836         if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
1837                 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
1838                 return -EINVAL;
1839         }
1840
1841         /* Make sure we have enough slave (even) ports for the split. */
1842         if (count == 2) {
1843                 base_port = local_port;
1844                 if (mlxsw_sp->ports[base_port + 1]) {
1845                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1846                         return -EINVAL;
1847                 }
1848         } else {
1849                 base_port = mlxsw_sp_cluster_base_port_get(local_port);
1850                 if (mlxsw_sp->ports[base_port + 1] ||
1851                     mlxsw_sp->ports[base_port + 3]) {
1852                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1853                         return -EINVAL;
1854                 }
1855         }
1856
1857         for (i = 0; i < count; i++)
1858                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1859
1860         err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
1861         if (err) {
1862                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
1863                 goto err_port_split_create;
1864         }
1865
1866         return 0;
1867
1868 err_port_split_create:
1869         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
1870         return err;
1871 }
1872
1873 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
1874 {
1875         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1876         struct mlxsw_sp_port *mlxsw_sp_port;
1877         u8 cur_width, base_port;
1878         unsigned int count;
1879         int i;
1880
1881         mlxsw_sp_port = mlxsw_sp->ports[local_port];
1882         if (!mlxsw_sp_port) {
1883                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1884                         local_port);
1885                 return -EINVAL;
1886         }
1887
1888         if (!mlxsw_sp_port->split) {
1889                 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
1890                 return -EINVAL;
1891         }
1892
1893         cur_width = mlxsw_sp_port->mapping.width;
1894         count = cur_width == 1 ? 4 : 2;
1895
1896         base_port = mlxsw_sp_cluster_base_port_get(local_port);
1897
1898         /* Determine which ports to remove. */
1899         if (count == 2 && local_port >= base_port + 2)
1900                 base_port = base_port + 2;
1901
1902         for (i = 0; i < count; i++)
1903                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1904
1905         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
1906
1907         return 0;
1908 }
1909
1910 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
1911                                      char *pude_pl, void *priv)
1912 {
1913         struct mlxsw_sp *mlxsw_sp = priv;
1914         struct mlxsw_sp_port *mlxsw_sp_port;
1915         enum mlxsw_reg_pude_oper_status status;
1916         u8 local_port;
1917
1918         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1919         mlxsw_sp_port = mlxsw_sp->ports[local_port];
1920         if (!mlxsw_sp_port)
1921                 return;
1922
1923         status = mlxsw_reg_pude_oper_status_get(pude_pl);
1924         if (status == MLXSW_PORT_OPER_STATUS_UP) {
1925                 netdev_info(mlxsw_sp_port->dev, "link up\n");
1926                 netif_carrier_on(mlxsw_sp_port->dev);
1927         } else {
1928                 netdev_info(mlxsw_sp_port->dev, "link down\n");
1929                 netif_carrier_off(mlxsw_sp_port->dev);
1930         }
1931 }
1932
1933 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
1934         .func = mlxsw_sp_pude_event_func,
1935         .trap_id = MLXSW_TRAP_ID_PUDE,
1936 };
1937
1938 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
1939                                    enum mlxsw_event_trap_id trap_id)
1940 {
1941         struct mlxsw_event_listener *el;
1942         char hpkt_pl[MLXSW_REG_HPKT_LEN];
1943         int err;
1944
1945         switch (trap_id) {
1946         case MLXSW_TRAP_ID_PUDE:
1947                 el = &mlxsw_sp_pude_event;
1948                 break;
1949         }
1950         err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
1951         if (err)
1952                 return err;
1953
1954         mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
1955         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1956         if (err)
1957                 goto err_event_trap_set;
1958
1959         return 0;
1960
1961 err_event_trap_set:
1962         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1963         return err;
1964 }
1965
1966 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
1967                                       enum mlxsw_event_trap_id trap_id)
1968 {
1969         struct mlxsw_event_listener *el;
1970
1971         switch (trap_id) {
1972         case MLXSW_TRAP_ID_PUDE:
1973                 el = &mlxsw_sp_pude_event;
1974                 break;
1975         }
1976         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1977 }
1978
1979 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
1980                                       void *priv)
1981 {
1982         struct mlxsw_sp *mlxsw_sp = priv;
1983         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1984         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1985
1986         if (unlikely(!mlxsw_sp_port)) {
1987                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
1988                                      local_port);
1989                 return;
1990         }
1991
1992         skb->dev = mlxsw_sp_port->dev;
1993
1994         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1995         u64_stats_update_begin(&pcpu_stats->syncp);
1996         pcpu_stats->rx_packets++;
1997         pcpu_stats->rx_bytes += skb->len;
1998         u64_stats_update_end(&pcpu_stats->syncp);
1999
2000         skb->protocol = eth_type_trans(skb, skb->dev);
2001         netif_receive_skb(skb);
2002 }
2003
2004 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2005         {
2006                 .func = mlxsw_sp_rx_listener_func,
2007                 .local_port = MLXSW_PORT_DONT_CARE,
2008                 .trap_id = MLXSW_TRAP_ID_FDB_MC,
2009         },
2010         /* Traps for specific L2 packet types, not trapped as FDB MC */
2011         {
2012                 .func = mlxsw_sp_rx_listener_func,
2013                 .local_port = MLXSW_PORT_DONT_CARE,
2014                 .trap_id = MLXSW_TRAP_ID_STP,
2015         },
2016         {
2017                 .func = mlxsw_sp_rx_listener_func,
2018                 .local_port = MLXSW_PORT_DONT_CARE,
2019                 .trap_id = MLXSW_TRAP_ID_LACP,
2020         },
2021         {
2022                 .func = mlxsw_sp_rx_listener_func,
2023                 .local_port = MLXSW_PORT_DONT_CARE,
2024                 .trap_id = MLXSW_TRAP_ID_EAPOL,
2025         },
2026         {
2027                 .func = mlxsw_sp_rx_listener_func,
2028                 .local_port = MLXSW_PORT_DONT_CARE,
2029                 .trap_id = MLXSW_TRAP_ID_LLDP,
2030         },
2031         {
2032                 .func = mlxsw_sp_rx_listener_func,
2033                 .local_port = MLXSW_PORT_DONT_CARE,
2034                 .trap_id = MLXSW_TRAP_ID_MMRP,
2035         },
2036         {
2037                 .func = mlxsw_sp_rx_listener_func,
2038                 .local_port = MLXSW_PORT_DONT_CARE,
2039                 .trap_id = MLXSW_TRAP_ID_MVRP,
2040         },
2041         {
2042                 .func = mlxsw_sp_rx_listener_func,
2043                 .local_port = MLXSW_PORT_DONT_CARE,
2044                 .trap_id = MLXSW_TRAP_ID_RPVST,
2045         },
2046         {
2047                 .func = mlxsw_sp_rx_listener_func,
2048                 .local_port = MLXSW_PORT_DONT_CARE,
2049                 .trap_id = MLXSW_TRAP_ID_DHCP,
2050         },
2051         {
2052                 .func = mlxsw_sp_rx_listener_func,
2053                 .local_port = MLXSW_PORT_DONT_CARE,
2054                 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
2055         },
2056         {
2057                 .func = mlxsw_sp_rx_listener_func,
2058                 .local_port = MLXSW_PORT_DONT_CARE,
2059                 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
2060         },
2061         {
2062                 .func = mlxsw_sp_rx_listener_func,
2063                 .local_port = MLXSW_PORT_DONT_CARE,
2064                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
2065         },
2066         {
2067                 .func = mlxsw_sp_rx_listener_func,
2068                 .local_port = MLXSW_PORT_DONT_CARE,
2069                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
2070         },
2071         {
2072                 .func = mlxsw_sp_rx_listener_func,
2073                 .local_port = MLXSW_PORT_DONT_CARE,
2074                 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2075         },
2076         {
2077                 .func = mlxsw_sp_rx_listener_func,
2078                 .local_port = MLXSW_PORT_DONT_CARE,
2079                 .trap_id = MLXSW_TRAP_ID_ARPBC,
2080         },
2081         {
2082                 .func = mlxsw_sp_rx_listener_func,
2083                 .local_port = MLXSW_PORT_DONT_CARE,
2084                 .trap_id = MLXSW_TRAP_ID_ARPUC,
2085         },
2086         {
2087                 .func = mlxsw_sp_rx_listener_func,
2088                 .local_port = MLXSW_PORT_DONT_CARE,
2089                 .trap_id = MLXSW_TRAP_ID_IP2ME,
2090         },
2091         {
2092                 .func = mlxsw_sp_rx_listener_func,
2093                 .local_port = MLXSW_PORT_DONT_CARE,
2094                 .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0,
2095         },
2096         {
2097                 .func = mlxsw_sp_rx_listener_func,
2098                 .local_port = MLXSW_PORT_DONT_CARE,
2099                 .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4,
2100         },
2101 };
2102
2103 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2104 {
2105         char htgt_pl[MLXSW_REG_HTGT_LEN];
2106         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2107         int i;
2108         int err;
2109
2110         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2111         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2112         if (err)
2113                 return err;
2114
2115         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2116         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2117         if (err)
2118                 return err;
2119
2120         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2121                 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2122                                                       &mlxsw_sp_rx_listener[i],
2123                                                       mlxsw_sp);
2124                 if (err)
2125                         goto err_rx_listener_register;
2126
2127                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2128                                     mlxsw_sp_rx_listener[i].trap_id);
2129                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2130                 if (err)
2131                         goto err_rx_trap_set;
2132         }
2133         return 0;
2134
2135 err_rx_trap_set:
2136         mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2137                                           &mlxsw_sp_rx_listener[i],
2138                                           mlxsw_sp);
2139 err_rx_listener_register:
2140         for (i--; i >= 0; i--) {
2141                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
2142                                     mlxsw_sp_rx_listener[i].trap_id);
2143                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2144
2145                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2146                                                   &mlxsw_sp_rx_listener[i],
2147                                                   mlxsw_sp);
2148         }
2149         return err;
2150 }
2151
2152 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2153 {
2154         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2155         int i;
2156
2157         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2158                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
2159                                     mlxsw_sp_rx_listener[i].trap_id);
2160                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2161
2162                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2163                                                   &mlxsw_sp_rx_listener[i],
2164                                                   mlxsw_sp);
2165         }
2166 }
2167
2168 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2169                                  enum mlxsw_reg_sfgc_type type,
2170                                  enum mlxsw_reg_sfgc_bridge_type bridge_type)
2171 {
2172         enum mlxsw_flood_table_type table_type;
2173         enum mlxsw_sp_flood_table flood_table;
2174         char sfgc_pl[MLXSW_REG_SFGC_LEN];
2175
2176         if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2177                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2178         else
2179                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2180
2181         if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2182                 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2183         else
2184                 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2185
2186         mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2187                             flood_table);
2188         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2189 }
2190
2191 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2192 {
2193         int type, err;
2194
2195         for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2196                 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2197                         continue;
2198
2199                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2200                                             MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2201                 if (err)
2202                         return err;
2203
2204                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2205                                             MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2206                 if (err)
2207                         return err;
2208         }
2209
2210         return 0;
2211 }
2212
2213 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2214 {
2215         char slcr_pl[MLXSW_REG_SLCR_LEN];
2216
2217         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2218                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2219                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2220                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2221                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2222                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2223                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2224                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2225                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2226         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2227 }
2228
2229 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2230                          const struct mlxsw_bus_info *mlxsw_bus_info)
2231 {
2232         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2233         int err;
2234
2235         mlxsw_sp->core = mlxsw_core;
2236         mlxsw_sp->bus_info = mlxsw_bus_info;
2237         INIT_LIST_HEAD(&mlxsw_sp->fids);
2238         INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
2239         INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2240
2241         err = mlxsw_sp_base_mac_get(mlxsw_sp);
2242         if (err) {
2243                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2244                 return err;
2245         }
2246
2247         err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2248         if (err) {
2249                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2250                 return err;
2251         }
2252
2253         err = mlxsw_sp_traps_init(mlxsw_sp);
2254         if (err) {
2255                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2256                 goto err_rx_listener_register;
2257         }
2258
2259         err = mlxsw_sp_flood_init(mlxsw_sp);
2260         if (err) {
2261                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2262                 goto err_flood_init;
2263         }
2264
2265         err = mlxsw_sp_buffers_init(mlxsw_sp);
2266         if (err) {
2267                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2268                 goto err_buffers_init;
2269         }
2270
2271         err = mlxsw_sp_lag_init(mlxsw_sp);
2272         if (err) {
2273                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2274                 goto err_lag_init;
2275         }
2276
2277         err = mlxsw_sp_switchdev_init(mlxsw_sp);
2278         if (err) {
2279                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2280                 goto err_switchdev_init;
2281         }
2282
2283         err = mlxsw_sp_router_init(mlxsw_sp);
2284         if (err) {
2285                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
2286                 goto err_router_init;
2287         }
2288
2289         err = mlxsw_sp_ports_create(mlxsw_sp);
2290         if (err) {
2291                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2292                 goto err_ports_create;
2293         }
2294
2295         return 0;
2296
2297 err_ports_create:
2298         mlxsw_sp_router_fini(mlxsw_sp);
2299 err_router_init:
2300         mlxsw_sp_switchdev_fini(mlxsw_sp);
2301 err_switchdev_init:
2302 err_lag_init:
2303         mlxsw_sp_buffers_fini(mlxsw_sp);
2304 err_buffers_init:
2305 err_flood_init:
2306         mlxsw_sp_traps_fini(mlxsw_sp);
2307 err_rx_listener_register:
2308         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2309         return err;
2310 }
2311
2312 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2313 {
2314         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2315         int i;
2316
2317         mlxsw_sp_ports_remove(mlxsw_sp);
2318         mlxsw_sp_router_fini(mlxsw_sp);
2319         mlxsw_sp_switchdev_fini(mlxsw_sp);
2320         mlxsw_sp_buffers_fini(mlxsw_sp);
2321         mlxsw_sp_traps_fini(mlxsw_sp);
2322         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2323         WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
2324         WARN_ON(!list_empty(&mlxsw_sp->fids));
2325         for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
2326                 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
2327 }
2328
2329 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2330         .used_max_vepa_channels         = 1,
2331         .max_vepa_channels              = 0,
2332         .used_max_lag                   = 1,
2333         .max_lag                        = MLXSW_SP_LAG_MAX,
2334         .used_max_port_per_lag          = 1,
2335         .max_port_per_lag               = MLXSW_SP_PORT_PER_LAG_MAX,
2336         .used_max_mid                   = 1,
2337         .max_mid                        = MLXSW_SP_MID_MAX,
2338         .used_max_pgt                   = 1,
2339         .max_pgt                        = 0,
2340         .used_max_system_port           = 1,
2341         .max_system_port                = 64,
2342         .used_max_vlan_groups           = 1,
2343         .max_vlan_groups                = 127,
2344         .used_max_regions               = 1,
2345         .max_regions                    = 400,
2346         .used_flood_tables              = 1,
2347         .used_flood_mode                = 1,
2348         .flood_mode                     = 3,
2349         .max_fid_offset_flood_tables    = 2,
2350         .fid_offset_flood_table_size    = VLAN_N_VID - 1,
2351         .max_fid_flood_tables           = 2,
2352         .fid_flood_table_size           = MLXSW_SP_VFID_MAX,
2353         .used_max_ib_mc                 = 1,
2354         .max_ib_mc                      = 0,
2355         .used_max_pkey                  = 1,
2356         .max_pkey                       = 0,
2357         .swid_config                    = {
2358                 {
2359                         .used_type      = 1,
2360                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
2361                 }
2362         },
2363 };
2364
2365 static struct mlxsw_driver mlxsw_sp_driver = {
2366         .kind                           = MLXSW_DEVICE_KIND_SPECTRUM,
2367         .owner                          = THIS_MODULE,
2368         .priv_size                      = sizeof(struct mlxsw_sp),
2369         .init                           = mlxsw_sp_init,
2370         .fini                           = mlxsw_sp_fini,
2371         .port_split                     = mlxsw_sp_port_split,
2372         .port_unsplit                   = mlxsw_sp_port_unsplit,
2373         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
2374         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
2375         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
2376         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
2377         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
2378         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
2379         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
2380         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
2381         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
2382         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
2383         .txhdr_construct                = mlxsw_sp_txhdr_construct,
2384         .txhdr_len                      = MLXSW_TXHDR_LEN,
2385         .profile                        = &mlxsw_sp_config_profile,
2386 };
2387
2388 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2389 {
2390         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2391 }
2392
2393 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
2394 {
2395         struct net_device *lower_dev;
2396         struct list_head *iter;
2397
2398         if (mlxsw_sp_port_dev_check(dev))
2399                 return netdev_priv(dev);
2400
2401         netdev_for_each_all_lower_dev(dev, lower_dev, iter) {
2402                 if (mlxsw_sp_port_dev_check(lower_dev))
2403                         return netdev_priv(lower_dev);
2404         }
2405         return NULL;
2406 }
2407
2408 static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
2409 {
2410         struct mlxsw_sp_port *mlxsw_sp_port;
2411
2412         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2413         return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
2414 }
2415
2416 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
2417 {
2418         struct net_device *lower_dev;
2419         struct list_head *iter;
2420
2421         if (mlxsw_sp_port_dev_check(dev))
2422                 return netdev_priv(dev);
2423
2424         netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) {
2425                 if (mlxsw_sp_port_dev_check(lower_dev))
2426                         return netdev_priv(lower_dev);
2427         }
2428         return NULL;
2429 }
2430
2431 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
2432 {
2433         struct mlxsw_sp_port *mlxsw_sp_port;
2434
2435         rcu_read_lock();
2436         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
2437         if (mlxsw_sp_port)
2438                 dev_hold(mlxsw_sp_port->dev);
2439         rcu_read_unlock();
2440         return mlxsw_sp_port;
2441 }
2442
2443 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
2444 {
2445         dev_put(mlxsw_sp_port->dev);
2446 }
2447
2448 static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
2449                                        unsigned long event)
2450 {
2451         switch (event) {
2452         case NETDEV_UP:
2453                 if (!r)
2454                         return true;
2455                 r->ref_count++;
2456                 return false;
2457         case NETDEV_DOWN:
2458                 if (r && --r->ref_count == 0)
2459                         return true;
2460                 /* It is possible we already removed the RIF ourselves
2461                  * if it was assigned to a netdev that is now a bridge
2462                  * or LAG slave.
2463                  */
2464                 return false;
2465         }
2466
2467         return false;
2468 }
2469
2470 static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
2471 {
2472         int i;
2473
2474         for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
2475                 if (!mlxsw_sp->rifs[i])
2476                         return i;
2477
2478         return MLXSW_SP_RIF_MAX;
2479 }
2480
2481 static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
2482                                            bool *p_lagged, u16 *p_system_port)
2483 {
2484         u8 local_port = mlxsw_sp_vport->local_port;
2485
2486         *p_lagged = mlxsw_sp_vport->lagged;
2487         *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
2488 }
2489
2490 static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
2491                                     struct net_device *l3_dev, u16 rif,
2492                                     bool create)
2493 {
2494         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2495         bool lagged = mlxsw_sp_vport->lagged;
2496         char ritr_pl[MLXSW_REG_RITR_LEN];
2497         u16 system_port;
2498
2499         mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
2500                             l3_dev->mtu, l3_dev->dev_addr);
2501
2502         mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
2503         mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
2504                                   mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
2505
2506         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2507 }
2508
2509 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
2510
2511 static struct mlxsw_sp_fid *
2512 mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
2513 {
2514         struct mlxsw_sp_fid *f;
2515
2516         f = kzalloc(sizeof(*f), GFP_KERNEL);
2517         if (!f)
2518                 return NULL;
2519
2520         f->leave = mlxsw_sp_vport_rif_sp_leave;
2521         f->ref_count = 0;
2522         f->dev = l3_dev;
2523         f->fid = fid;
2524
2525         return f;
2526 }
2527
2528 static struct mlxsw_sp_rif *
2529 mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
2530 {
2531         struct mlxsw_sp_rif *r;
2532
2533         r = kzalloc(sizeof(*r), GFP_KERNEL);
2534         if (!r)
2535                 return NULL;
2536
2537         ether_addr_copy(r->addr, l3_dev->dev_addr);
2538         r->mtu = l3_dev->mtu;
2539         r->ref_count = 1;
2540         r->dev = l3_dev;
2541         r->rif = rif;
2542         r->f = f;
2543
2544         return r;
2545 }
2546
2547 static struct mlxsw_sp_rif *
2548 mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
2549                              struct net_device *l3_dev)
2550 {
2551         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2552         struct mlxsw_sp_fid *f;
2553         struct mlxsw_sp_rif *r;
2554         u16 fid, rif;
2555         int err;
2556
2557         rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
2558         if (rif == MLXSW_SP_RIF_MAX)
2559                 return ERR_PTR(-ERANGE);
2560
2561         err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
2562         if (err)
2563                 return ERR_PTR(err);
2564
2565         fid = mlxsw_sp_rif_sp_to_fid(rif);
2566         err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
2567         if (err)
2568                 goto err_rif_fdb_op;
2569
2570         f = mlxsw_sp_rfid_alloc(fid, l3_dev);
2571         if (!f) {
2572                 err = -ENOMEM;
2573                 goto err_rfid_alloc;
2574         }
2575
2576         r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
2577         if (!r) {
2578                 err = -ENOMEM;
2579                 goto err_rif_alloc;
2580         }
2581
2582         f->r = r;
2583         mlxsw_sp->rifs[rif] = r;
2584
2585         return r;
2586
2587 err_rif_alloc:
2588         kfree(f);
2589 err_rfid_alloc:
2590         mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2591 err_rif_fdb_op:
2592         mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
2593         return ERR_PTR(err);
2594 }
2595
2596 static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
2597                                           struct mlxsw_sp_rif *r)
2598 {
2599         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2600         struct net_device *l3_dev = r->dev;
2601         struct mlxsw_sp_fid *f = r->f;
2602         u16 fid = f->fid;
2603         u16 rif = r->rif;
2604
2605         mlxsw_sp->rifs[rif] = NULL;
2606         f->r = NULL;
2607
2608         kfree(r);
2609
2610         kfree(f);
2611
2612         mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2613
2614         mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
2615 }
2616
2617 static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
2618                                       struct net_device *l3_dev)
2619 {
2620         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2621         struct mlxsw_sp_rif *r;
2622
2623         r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
2624         if (!r) {
2625                 r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
2626                 if (IS_ERR(r))
2627                         return PTR_ERR(r);
2628         }
2629
2630         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
2631         r->f->ref_count++;
2632
2633         netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
2634
2635         return 0;
2636 }
2637
2638 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
2639 {
2640         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
2641
2642         netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
2643
2644         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
2645         if (--f->ref_count == 0)
2646                 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
2647 }
2648
2649 static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
2650                                          struct net_device *port_dev,
2651                                          unsigned long event, u16 vid)
2652 {
2653         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
2654         struct mlxsw_sp_port *mlxsw_sp_vport;
2655
2656         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2657         if (WARN_ON(!mlxsw_sp_vport))
2658                 return -EINVAL;
2659
2660         switch (event) {
2661         case NETDEV_UP:
2662                 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
2663         case NETDEV_DOWN:
2664                 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
2665                 break;
2666         }
2667
2668         return 0;
2669 }
2670
2671 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
2672                                         unsigned long event)
2673 {
2674         if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
2675                 return 0;
2676
2677         return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
2678 }
2679
2680 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
2681                                          struct net_device *lag_dev,
2682                                          unsigned long event, u16 vid)
2683 {
2684         struct net_device *port_dev;
2685         struct list_head *iter;
2686         int err;
2687
2688         netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
2689                 if (mlxsw_sp_port_dev_check(port_dev)) {
2690                         err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
2691                                                             event, vid);
2692                         if (err)
2693                                 return err;
2694                 }
2695         }
2696
2697         return 0;
2698 }
2699
2700 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
2701                                        unsigned long event)
2702 {
2703         if (netif_is_bridge_port(lag_dev))
2704                 return 0;
2705
2706         return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
2707 }
2708
2709 static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
2710                                                     struct net_device *l3_dev)
2711 {
2712         u16 fid;
2713
2714         if (is_vlan_dev(l3_dev))
2715                 fid = vlan_dev_vlan_id(l3_dev);
2716         else if (mlxsw_sp->master_bridge.dev == l3_dev)
2717                 fid = 1;
2718         else
2719                 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
2720
2721         return mlxsw_sp_fid_find(mlxsw_sp, fid);
2722 }
2723
2724 static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
2725 {
2726         if (mlxsw_sp_fid_is_vfid(fid))
2727                 return MLXSW_REG_RITR_FID_IF;
2728         else
2729                 return MLXSW_REG_RITR_VLAN_IF;
2730 }
2731
2732 static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
2733                                   struct net_device *l3_dev,
2734                                   u16 fid, u16 rif,
2735                                   bool create)
2736 {
2737         enum mlxsw_reg_ritr_if_type rif_type;
2738         char ritr_pl[MLXSW_REG_RITR_LEN];
2739
2740         rif_type = mlxsw_sp_rif_type_get(fid);
2741         mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
2742                             l3_dev->dev_addr);
2743         mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
2744
2745         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2746 }
2747
2748 static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
2749                                       struct net_device *l3_dev,
2750                                       struct mlxsw_sp_fid *f)
2751 {
2752         struct mlxsw_sp_rif *r;
2753         u16 rif;
2754         int err;
2755
2756         rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
2757         if (rif == MLXSW_SP_RIF_MAX)
2758                 return -ERANGE;
2759
2760         err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
2761         if (err)
2762                 return err;
2763
2764         err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
2765         if (err)
2766                 goto err_rif_fdb_op;
2767
2768         r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
2769         if (!r) {
2770                 err = -ENOMEM;
2771                 goto err_rif_alloc;
2772         }
2773
2774         f->r = r;
2775         mlxsw_sp->rifs[rif] = r;
2776
2777         netdev_dbg(l3_dev, "RIF=%d created\n", rif);
2778
2779         return 0;
2780
2781 err_rif_alloc:
2782         mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
2783 err_rif_fdb_op:
2784         mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
2785         return err;
2786 }
2787
2788 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
2789                                  struct mlxsw_sp_rif *r)
2790 {
2791         struct net_device *l3_dev = r->dev;
2792         struct mlxsw_sp_fid *f = r->f;
2793         u16 rif = r->rif;
2794
2795         mlxsw_sp->rifs[rif] = NULL;
2796         f->r = NULL;
2797
2798         kfree(r);
2799
2800         mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
2801
2802         mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
2803
2804         netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
2805 }
2806
2807 static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
2808                                           struct net_device *br_dev,
2809                                           unsigned long event)
2810 {
2811         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
2812         struct mlxsw_sp_fid *f;
2813
2814         /* FID can either be an actual FID if the L3 device is the
2815          * VLAN-aware bridge or a VLAN device on top. Otherwise, the
2816          * L3 device is a VLAN-unaware bridge and we get a vFID.
2817          */
2818         f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
2819         if (WARN_ON(!f))
2820                 return -EINVAL;
2821
2822         switch (event) {
2823         case NETDEV_UP:
2824                 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
2825         case NETDEV_DOWN:
2826                 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
2827                 break;
2828         }
2829
2830         return 0;
2831 }
2832
2833 static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
2834                                         unsigned long event)
2835 {
2836         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
2837         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
2838         u16 vid = vlan_dev_vlan_id(vlan_dev);
2839
2840         if (mlxsw_sp_port_dev_check(real_dev))
2841                 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
2842                                                      vid);
2843         else if (netif_is_lag_master(real_dev))
2844                 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
2845                                                      vid);
2846         else if (netif_is_bridge_master(real_dev) &&
2847                  mlxsw_sp->master_bridge.dev == real_dev)
2848                 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
2849                                                       event);
2850
2851         return 0;
2852 }
2853
2854 static int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
2855                                    unsigned long event, void *ptr)
2856 {
2857         struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
2858         struct net_device *dev = ifa->ifa_dev->dev;
2859         struct mlxsw_sp *mlxsw_sp;
2860         struct mlxsw_sp_rif *r;
2861         int err = 0;
2862
2863         mlxsw_sp = mlxsw_sp_lower_get(dev);
2864         if (!mlxsw_sp)
2865                 goto out;
2866
2867         r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
2868         if (!mlxsw_sp_rif_should_config(r, event))
2869                 goto out;
2870
2871         if (mlxsw_sp_port_dev_check(dev))
2872                 err = mlxsw_sp_inetaddr_port_event(dev, event);
2873         else if (netif_is_lag_master(dev))
2874                 err = mlxsw_sp_inetaddr_lag_event(dev, event);
2875         else if (netif_is_bridge_master(dev))
2876                 err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
2877         else if (is_vlan_dev(dev))
2878                 err = mlxsw_sp_inetaddr_vlan_event(dev, event);
2879
2880 out:
2881         return notifier_from_errno(err);
2882 }
2883
2884 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
2885                              const char *mac, int mtu)
2886 {
2887         char ritr_pl[MLXSW_REG_RITR_LEN];
2888         int err;
2889
2890         mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
2891         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2892         if (err)
2893                 return err;
2894
2895         mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
2896         mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
2897         mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
2898         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2899 }
2900
2901 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
2902 {
2903         struct mlxsw_sp *mlxsw_sp;
2904         struct mlxsw_sp_rif *r;
2905         int err;
2906
2907         mlxsw_sp = mlxsw_sp_lower_get(dev);
2908         if (!mlxsw_sp)
2909                 return 0;
2910
2911         r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
2912         if (!r)
2913                 return 0;
2914
2915         err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
2916         if (err)
2917                 return err;
2918
2919         err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
2920         if (err)
2921                 goto err_rif_edit;
2922
2923         err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
2924         if (err)
2925                 goto err_rif_fdb_op;
2926
2927         ether_addr_copy(r->addr, dev->dev_addr);
2928         r->mtu = dev->mtu;
2929
2930         netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
2931
2932         return 0;
2933
2934 err_rif_fdb_op:
2935         mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
2936 err_rif_edit:
2937         mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
2938         return err;
2939 }
2940
2941 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
2942                                          u16 fid)
2943 {
2944         if (mlxsw_sp_fid_is_vfid(fid))
2945                 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
2946         else
2947                 return test_bit(fid, lag_port->active_vlans);
2948 }
2949
2950 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
2951                                            u16 fid)
2952 {
2953         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2954         u8 local_port = mlxsw_sp_port->local_port;
2955         u16 lag_id = mlxsw_sp_port->lag_id;
2956         int i, count = 0;
2957
2958         if (!mlxsw_sp_port->lagged)
2959                 return true;
2960
2961         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2962                 struct mlxsw_sp_port *lag_port;
2963
2964                 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
2965                 if (!lag_port || lag_port->local_port == local_port)
2966                         continue;
2967                 if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
2968                         count++;
2969         }
2970
2971         return !count;
2972 }
2973
2974 static int
2975 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2976                                     u16 fid)
2977 {
2978         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2979         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2980
2981         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2982         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2983         mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2984                                                 mlxsw_sp_port->local_port);
2985
2986         netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
2987                    mlxsw_sp_port->local_port, fid);
2988
2989         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2990 }
2991
2992 static int
2993 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2994                                       u16 fid)
2995 {
2996         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2997         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2998
2999         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
3000         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3001         mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
3002
3003         netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
3004                    mlxsw_sp_port->lag_id, fid);
3005
3006         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3007 }
3008
3009 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
3010 {
3011         if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
3012                 return 0;
3013
3014         if (mlxsw_sp_port->lagged)
3015                 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
3016                                                              fid);
3017         else
3018                 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
3019 }
3020
3021 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
3022 {
3023         struct mlxsw_sp_fid *f, *tmp;
3024
3025         list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
3026                 if (--f->ref_count == 0)
3027                         mlxsw_sp_fid_destroy(mlxsw_sp, f);
3028                 else
3029                         WARN_ON_ONCE(1);
3030 }
3031
3032 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
3033                                          struct net_device *br_dev)
3034 {
3035         return !mlxsw_sp->master_bridge.dev ||
3036                mlxsw_sp->master_bridge.dev == br_dev;
3037 }
3038
3039 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
3040                                        struct net_device *br_dev)
3041 {
3042         mlxsw_sp->master_bridge.dev = br_dev;
3043         mlxsw_sp->master_bridge.ref_count++;
3044 }
3045
3046 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
3047 {
3048         if (--mlxsw_sp->master_bridge.ref_count == 0) {
3049                 mlxsw_sp->master_bridge.dev = NULL;
3050                 /* It's possible upper VLAN devices are still holding
3051                  * references to underlying FIDs. Drop the reference
3052                  * and release the resources if it was the last one.
3053                  * If it wasn't, then something bad happened.
3054                  */
3055                 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
3056         }
3057 }
3058
3059 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
3060                                      struct net_device *br_dev)
3061 {
3062         struct net_device *dev = mlxsw_sp_port->dev;
3063         int err;
3064
3065         /* When port is not bridged untagged packets are tagged with
3066          * PVID=VID=1, thereby creating an implicit VLAN interface in
3067          * the device. Remove it and let bridge code take care of its
3068          * own VLANs.
3069          */
3070         err = mlxsw_sp_port_kill_vid(dev, 0, 1);
3071         if (err)
3072                 return err;
3073
3074         mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
3075
3076         mlxsw_sp_port->learning = 1;
3077         mlxsw_sp_port->learning_sync = 1;
3078         mlxsw_sp_port->uc_flood = 1;
3079         mlxsw_sp_port->bridged = 1;
3080
3081         return 0;
3082 }
3083
3084 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3085 {
3086         struct net_device *dev = mlxsw_sp_port->dev;
3087
3088         mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
3089
3090         mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
3091
3092         mlxsw_sp_port->learning = 0;
3093         mlxsw_sp_port->learning_sync = 0;
3094         mlxsw_sp_port->uc_flood = 0;
3095         mlxsw_sp_port->bridged = 0;
3096
3097         /* Add implicit VLAN interface in the device, so that untagged
3098          * packets will be classified to the default vFID.
3099          */
3100         mlxsw_sp_port_add_vid(dev, 0, 1);
3101 }
3102
3103 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3104 {
3105         char sldr_pl[MLXSW_REG_SLDR_LEN];
3106
3107         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3108         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3109 }
3110
3111 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3112 {
3113         char sldr_pl[MLXSW_REG_SLDR_LEN];
3114
3115         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3116         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3117 }
3118
3119 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3120                                      u16 lag_id, u8 port_index)
3121 {
3122         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3123         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3124
3125         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3126                                       lag_id, port_index);
3127         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3128 }
3129
3130 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3131                                         u16 lag_id)
3132 {
3133         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3134         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3135
3136         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3137                                          lag_id);
3138         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3139 }
3140
3141 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3142                                         u16 lag_id)
3143 {
3144         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3145         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3146
3147         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3148                                         lag_id);
3149         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3150 }
3151
3152 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3153                                          u16 lag_id)
3154 {
3155         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3156         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3157
3158         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3159                                          lag_id);
3160         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3161 }
3162
3163 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3164                                   struct net_device *lag_dev,
3165                                   u16 *p_lag_id)
3166 {
3167         struct mlxsw_sp_upper *lag;
3168         int free_lag_id = -1;
3169         int i;
3170
3171         for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
3172                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3173                 if (lag->ref_count) {
3174                         if (lag->dev == lag_dev) {
3175                                 *p_lag_id = i;
3176                                 return 0;
3177                         }
3178                 } else if (free_lag_id < 0) {
3179                         free_lag_id = i;
3180                 }
3181         }
3182         if (free_lag_id < 0)
3183                 return -EBUSY;
3184         *p_lag_id = free_lag_id;
3185         return 0;
3186 }
3187
3188 static bool
3189 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3190                           struct net_device *lag_dev,
3191                           struct netdev_lag_upper_info *lag_upper_info)
3192 {
3193         u16 lag_id;
3194
3195         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3196                 return false;
3197         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3198                 return false;
3199         return true;
3200 }
3201
3202 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3203                                        u16 lag_id, u8 *p_port_index)
3204 {
3205         int i;
3206
3207         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
3208                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3209                         *p_port_index = i;
3210                         return 0;
3211                 }
3212         }
3213         return -EBUSY;
3214 }
3215
3216 static void
3217 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3218                                   u16 lag_id)
3219 {
3220         struct mlxsw_sp_port *mlxsw_sp_vport;
3221         struct mlxsw_sp_fid *f;
3222
3223         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3224         if (WARN_ON(!mlxsw_sp_vport))
3225                 return;
3226
3227         /* If vPort is assigned a RIF, then leave it since it's no
3228          * longer valid.
3229          */
3230         f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3231         if (f)
3232                 f->leave(mlxsw_sp_vport);
3233
3234         mlxsw_sp_vport->lag_id = lag_id;
3235         mlxsw_sp_vport->lagged = 1;
3236 }
3237
3238 static void
3239 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3240 {
3241         struct mlxsw_sp_port *mlxsw_sp_vport;
3242         struct mlxsw_sp_fid *f;
3243
3244         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3245         if (WARN_ON(!mlxsw_sp_vport))
3246                 return;
3247
3248         f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3249         if (f)
3250                 f->leave(mlxsw_sp_vport);
3251
3252         mlxsw_sp_vport->lagged = 0;
3253 }
3254
3255 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3256                                   struct net_device *lag_dev)
3257 {
3258         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3259         struct mlxsw_sp_upper *lag;
3260         u16 lag_id;
3261         u8 port_index;
3262         int err;
3263
3264         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3265         if (err)
3266                 return err;
3267         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3268         if (!lag->ref_count) {
3269                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3270                 if (err)
3271                         return err;
3272                 lag->dev = lag_dev;
3273         }
3274
3275         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3276         if (err)
3277                 return err;
3278         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3279         if (err)
3280                 goto err_col_port_add;
3281         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
3282         if (err)
3283                 goto err_col_port_enable;
3284
3285         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3286                                    mlxsw_sp_port->local_port);
3287         mlxsw_sp_port->lag_id = lag_id;
3288         mlxsw_sp_port->lagged = 1;
3289         lag->ref_count++;
3290
3291         mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id);
3292
3293         return 0;
3294
3295 err_col_port_enable:
3296         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3297 err_col_port_add:
3298         if (!lag->ref_count)
3299                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3300         return err;
3301 }
3302
3303 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3304                                     struct net_device *lag_dev)
3305 {
3306         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3307         u16 lag_id = mlxsw_sp_port->lag_id;
3308         struct mlxsw_sp_upper *lag;
3309
3310         if (!mlxsw_sp_port->lagged)
3311                 return;
3312         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3313         WARN_ON(lag->ref_count == 0);
3314
3315         mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
3316         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3317
3318         if (mlxsw_sp_port->bridged) {
3319                 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
3320                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3321         }
3322
3323         if (lag->ref_count == 1)
3324                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3325
3326         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3327                                      mlxsw_sp_port->local_port);
3328         mlxsw_sp_port->lagged = 0;
3329         lag->ref_count--;
3330
3331         mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
3332 }
3333
3334 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3335                                       u16 lag_id)
3336 {
3337         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3338         char sldr_pl[MLXSW_REG_SLDR_LEN];
3339
3340         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3341                                          mlxsw_sp_port->local_port);
3342         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3343 }
3344
3345 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3346                                          u16 lag_id)
3347 {
3348         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3349         char sldr_pl[MLXSW_REG_SLDR_LEN];
3350
3351         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3352                                             mlxsw_sp_port->local_port);
3353         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3354 }
3355
3356 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
3357                                        bool lag_tx_enabled)
3358 {
3359         if (lag_tx_enabled)
3360                 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
3361                                                   mlxsw_sp_port->lag_id);
3362         else
3363                 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3364                                                      mlxsw_sp_port->lag_id);
3365 }
3366
3367 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3368                                      struct netdev_lag_lower_state_info *info)
3369 {
3370         return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
3371 }
3372
3373 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
3374                                    struct net_device *vlan_dev)
3375 {
3376         struct mlxsw_sp_port *mlxsw_sp_vport;
3377         u16 vid = vlan_dev_vlan_id(vlan_dev);
3378
3379         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3380         if (WARN_ON(!mlxsw_sp_vport))
3381                 return -EINVAL;
3382
3383         mlxsw_sp_vport->dev = vlan_dev;
3384
3385         return 0;
3386 }
3387
3388 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
3389                                       struct net_device *vlan_dev)
3390 {
3391         struct mlxsw_sp_port *mlxsw_sp_vport;
3392         u16 vid = vlan_dev_vlan_id(vlan_dev);
3393
3394         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3395         if (WARN_ON(!mlxsw_sp_vport))
3396                 return;
3397
3398         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3399 }
3400
3401 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3402                                                unsigned long event, void *ptr)
3403 {
3404         struct netdev_notifier_changeupper_info *info;
3405         struct mlxsw_sp_port *mlxsw_sp_port;
3406         struct net_device *upper_dev;
3407         struct mlxsw_sp *mlxsw_sp;
3408         int err = 0;
3409
3410         mlxsw_sp_port = netdev_priv(dev);
3411         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3412         info = ptr;
3413
3414         switch (event) {
3415         case NETDEV_PRECHANGEUPPER:
3416                 upper_dev = info->upper_dev;
3417                 if (!is_vlan_dev(upper_dev) &&
3418                     !netif_is_lag_master(upper_dev) &&
3419                     !netif_is_bridge_master(upper_dev))
3420                         return -EINVAL;
3421                 if (!info->linking)
3422                         break;
3423                 /* HW limitation forbids to put ports to multiple bridges. */
3424                 if (netif_is_bridge_master(upper_dev) &&
3425                     !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
3426                         return -EINVAL;
3427                 if (netif_is_lag_master(upper_dev) &&
3428                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3429                                                info->upper_info))
3430                         return -EINVAL;
3431                 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
3432                         return -EINVAL;
3433                 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
3434                     !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
3435                         return -EINVAL;
3436                 break;
3437         case NETDEV_CHANGEUPPER:
3438                 upper_dev = info->upper_dev;
3439                 if (is_vlan_dev(upper_dev)) {
3440                         if (info->linking)
3441                                 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
3442                                                               upper_dev);
3443                         else
3444                                  mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
3445                                                            upper_dev);
3446                 } else if (netif_is_bridge_master(upper_dev)) {
3447                         if (info->linking)
3448                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
3449                                                                 upper_dev);
3450                         else
3451                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3452                 } else if (netif_is_lag_master(upper_dev)) {
3453                         if (info->linking)
3454                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3455                                                              upper_dev);
3456                         else
3457                                 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3458                                                         upper_dev);
3459                 } else {
3460                         err = -EINVAL;
3461                         WARN_ON(1);
3462                 }
3463                 break;
3464         }
3465
3466         return err;
3467 }
3468
3469 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3470                                                unsigned long event, void *ptr)
3471 {
3472         struct netdev_notifier_changelowerstate_info *info;
3473         struct mlxsw_sp_port *mlxsw_sp_port;
3474         int err;
3475
3476         mlxsw_sp_port = netdev_priv(dev);
3477         info = ptr;
3478
3479         switch (event) {
3480         case NETDEV_CHANGELOWERSTATE:
3481                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
3482                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
3483                                                         info->lower_state_info);
3484                         if (err)
3485                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
3486                 }
3487                 break;
3488         }
3489
3490         return 0;
3491 }
3492
3493 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
3494                                          unsigned long event, void *ptr)
3495 {
3496         switch (event) {
3497         case NETDEV_PRECHANGEUPPER:
3498         case NETDEV_CHANGEUPPER:
3499                 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
3500         case NETDEV_CHANGELOWERSTATE:
3501                 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
3502         }
3503
3504         return 0;
3505 }
3506
3507 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3508                                         unsigned long event, void *ptr)
3509 {
3510         struct net_device *dev;
3511         struct list_head *iter;
3512         int ret;
3513
3514         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3515                 if (mlxsw_sp_port_dev_check(dev)) {
3516                         ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3517                         if (ret)
3518                                 return ret;
3519                 }
3520         }
3521
3522         return 0;
3523 }
3524
3525 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
3526                                             struct net_device *vlan_dev)
3527 {
3528         u16 fid = vlan_dev_vlan_id(vlan_dev);
3529         struct mlxsw_sp_fid *f;
3530
3531         f = mlxsw_sp_fid_find(mlxsw_sp, fid);
3532         if (!f) {
3533                 f = mlxsw_sp_fid_create(mlxsw_sp, fid);
3534                 if (IS_ERR(f))
3535                         return PTR_ERR(f);
3536         }
3537
3538         f->ref_count++;
3539
3540         return 0;
3541 }
3542
3543 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
3544                                                struct net_device *vlan_dev)
3545 {
3546         u16 fid = vlan_dev_vlan_id(vlan_dev);
3547         struct mlxsw_sp_fid *f;
3548
3549         f = mlxsw_sp_fid_find(mlxsw_sp, fid);
3550         if (f && f->r)
3551                 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3552         if (f && --f->ref_count == 0)
3553                 mlxsw_sp_fid_destroy(mlxsw_sp, f);
3554 }
3555
3556 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
3557                                            unsigned long event, void *ptr)
3558 {
3559         struct netdev_notifier_changeupper_info *info;
3560         struct net_device *upper_dev;
3561         struct mlxsw_sp *mlxsw_sp;
3562         int err;
3563
3564         mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3565         if (!mlxsw_sp)
3566                 return 0;
3567         if (br_dev != mlxsw_sp->master_bridge.dev)
3568                 return 0;
3569
3570         info = ptr;
3571
3572         switch (event) {
3573         case NETDEV_CHANGEUPPER:
3574                 upper_dev = info->upper_dev;
3575                 if (!is_vlan_dev(upper_dev))
3576                         break;
3577                 if (info->linking) {
3578                         err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
3579                                                                upper_dev);
3580                         if (err)
3581                                 return err;
3582                 } else {
3583                         mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev);
3584                 }
3585                 break;
3586         }
3587
3588         return 0;
3589 }
3590
3591 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
3592 {
3593         return find_first_zero_bit(mlxsw_sp->vfids.mapped,
3594                                    MLXSW_SP_VFID_MAX);
3595 }
3596
3597 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
3598 {
3599         char sfmr_pl[MLXSW_REG_SFMR_LEN];
3600
3601         mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
3602         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
3603 }
3604
3605 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
3606
3607 static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
3608                                                  struct net_device *br_dev)
3609 {
3610         struct device *dev = mlxsw_sp->bus_info->dev;
3611         struct mlxsw_sp_fid *f;
3612         u16 vfid, fid;
3613         int err;
3614
3615         vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
3616         if (vfid == MLXSW_SP_VFID_MAX) {
3617                 dev_err(dev, "No available vFIDs\n");
3618                 return ERR_PTR(-ERANGE);
3619         }
3620
3621         fid = mlxsw_sp_vfid_to_fid(vfid);
3622         err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
3623         if (err) {
3624                 dev_err(dev, "Failed to create FID=%d\n", fid);
3625                 return ERR_PTR(err);
3626         }
3627
3628         f = kzalloc(sizeof(*f), GFP_KERNEL);
3629         if (!f)
3630                 goto err_allocate_vfid;
3631
3632         f->leave = mlxsw_sp_vport_vfid_leave;
3633         f->fid = fid;
3634         f->dev = br_dev;
3635
3636         list_add(&f->list, &mlxsw_sp->vfids.list);
3637         set_bit(vfid, mlxsw_sp->vfids.mapped);
3638
3639         return f;
3640
3641 err_allocate_vfid:
3642         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3643         return ERR_PTR(-ENOMEM);
3644 }
3645
3646 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
3647                                   struct mlxsw_sp_fid *f)
3648 {
3649         u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
3650         u16 fid = f->fid;
3651
3652         clear_bit(vfid, mlxsw_sp->vfids.mapped);
3653         list_del(&f->list);
3654
3655         if (f->r)
3656                 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3657
3658         kfree(f);
3659
3660         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3661 }
3662
3663 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
3664                                   bool valid)
3665 {
3666         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
3667         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3668
3669         return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
3670                                             vid);
3671 }
3672
3673 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3674                                     struct net_device *br_dev)
3675 {
3676         struct mlxsw_sp_fid *f;
3677         int err;
3678
3679         f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
3680         if (!f) {
3681                 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
3682                 if (IS_ERR(f))
3683                         return PTR_ERR(f);
3684         }
3685
3686         err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
3687         if (err)
3688                 goto err_vport_flood_set;
3689
3690         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
3691         if (err)
3692                 goto err_vport_fid_map;
3693
3694         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
3695         f->ref_count++;
3696
3697         netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
3698
3699         return 0;
3700
3701 err_vport_fid_map:
3702         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3703 err_vport_flood_set:
3704         if (!f->ref_count)
3705                 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3706         return err;
3707 }
3708
3709 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3710 {
3711         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3712
3713         netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
3714
3715         mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
3716
3717         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3718
3719         mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
3720
3721         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
3722         if (--f->ref_count == 0)
3723                 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3724 }
3725
3726 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3727                                       struct net_device *br_dev)
3728 {
3729         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3730         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3731         struct net_device *dev = mlxsw_sp_vport->dev;
3732         int err;
3733
3734         if (f && !WARN_ON(!f->leave))
3735                 f->leave(mlxsw_sp_vport);
3736
3737         err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
3738         if (err) {
3739                 netdev_err(dev, "Failed to join vFID\n");
3740                 return err;
3741         }
3742
3743         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3744         if (err) {
3745                 netdev_err(dev, "Failed to enable learning\n");
3746                 goto err_port_vid_learning_set;
3747         }
3748
3749         mlxsw_sp_vport->learning = 1;
3750         mlxsw_sp_vport->learning_sync = 1;
3751         mlxsw_sp_vport->uc_flood = 1;
3752         mlxsw_sp_vport->bridged = 1;
3753
3754         return 0;
3755
3756 err_port_vid_learning_set:
3757         mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
3758         return err;
3759 }
3760
3761 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3762 {
3763         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3764
3765         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3766
3767         mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
3768
3769         mlxsw_sp_vport->learning = 0;
3770         mlxsw_sp_vport->learning_sync = 0;
3771         mlxsw_sp_vport->uc_flood = 0;
3772         mlxsw_sp_vport->bridged = 0;
3773 }
3774
3775 static bool
3776 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3777                                   const struct net_device *br_dev)
3778 {
3779         struct mlxsw_sp_port *mlxsw_sp_vport;
3780
3781         list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3782                             vport.list) {
3783                 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
3784
3785                 if (dev && dev == br_dev)
3786                         return false;
3787         }
3788
3789         return true;
3790 }
3791
3792 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3793                                           unsigned long event, void *ptr,
3794                                           u16 vid)
3795 {
3796         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3797         struct netdev_notifier_changeupper_info *info = ptr;
3798         struct mlxsw_sp_port *mlxsw_sp_vport;
3799         struct net_device *upper_dev;
3800         int err = 0;
3801
3802         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3803
3804         switch (event) {
3805         case NETDEV_PRECHANGEUPPER:
3806                 upper_dev = info->upper_dev;
3807                 if (!netif_is_bridge_master(upper_dev))
3808                         return -EINVAL;
3809                 if (!info->linking)
3810                         break;
3811                 /* We can't have multiple VLAN interfaces configured on
3812                  * the same port and being members in the same bridge.
3813                  */
3814                 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3815                                                        upper_dev))
3816                         return -EINVAL;
3817                 break;
3818         case NETDEV_CHANGEUPPER:
3819                 upper_dev = info->upper_dev;
3820                 if (info->linking) {
3821                         if (WARN_ON(!mlxsw_sp_vport))
3822                                 return -EINVAL;
3823                         err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3824                                                          upper_dev);
3825                 } else {
3826                         if (!mlxsw_sp_vport)
3827                                 return 0;
3828                         mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
3829                 }
3830         }
3831
3832         return err;
3833 }
3834
3835 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3836                                               unsigned long event, void *ptr,
3837                                               u16 vid)
3838 {
3839         struct net_device *dev;
3840         struct list_head *iter;
3841         int ret;
3842
3843         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3844                 if (mlxsw_sp_port_dev_check(dev)) {
3845                         ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3846                                                              vid);
3847                         if (ret)
3848                                 return ret;
3849                 }
3850         }
3851
3852         return 0;
3853 }
3854
3855 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
3856                                          unsigned long event, void *ptr)
3857 {
3858         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3859         u16 vid = vlan_dev_vlan_id(vlan_dev);
3860
3861         if (mlxsw_sp_port_dev_check(real_dev))
3862                 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
3863                                                       vid);
3864         else if (netif_is_lag_master(real_dev))
3865                 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3866                                                           vid);
3867
3868         return 0;
3869 }
3870
3871 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3872                                     unsigned long event, void *ptr)
3873 {
3874         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3875         int err = 0;
3876
3877         if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
3878                 err = mlxsw_sp_netdevice_router_port_event(dev);
3879         else if (mlxsw_sp_port_dev_check(dev))
3880                 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3881         else if (netif_is_lag_master(dev))
3882                 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3883         else if (netif_is_bridge_master(dev))
3884                 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
3885         else if (is_vlan_dev(dev))
3886                 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3887
3888         return notifier_from_errno(err);
3889 }
3890
3891 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3892         .notifier_call = mlxsw_sp_netdevice_event,
3893 };
3894
3895 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
3896         .notifier_call = mlxsw_sp_inetaddr_event,
3897         .priority = 10, /* Must be called before FIB notifier block */
3898 };
3899
3900 static int __init mlxsw_sp_module_init(void)
3901 {
3902         int err;
3903
3904         register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3905         register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
3906         err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3907         if (err)
3908                 goto err_core_driver_register;
3909         return 0;
3910
3911 err_core_driver_register:
3912         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3913         return err;
3914 }
3915
3916 static void __exit mlxsw_sp_module_exit(void)
3917 {
3918         mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3919         unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
3920         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3921 }
3922
3923 module_init(mlxsw_sp_module_init);
3924 module_exit(mlxsw_sp_module_exit);
3925
3926 MODULE_LICENSE("Dual BSD/GPL");
3927 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3928 MODULE_DESCRIPTION("Mellanox Spectrum driver");
3929 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);