Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/notifier.h>
53 #include <linux/dcbnl.h>
54 #include <net/switchdev.h>
55 #include <generated/utsrelease.h>
56
57 #include "spectrum.h"
58 #include "core.h"
59 #include "reg.h"
60 #include "port.h"
61 #include "trap.h"
62 #include "txheader.h"
63
64 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
65 static const char mlxsw_sp_driver_version[] = "1.0";
66
67 /* tx_hdr_version
68  * Tx header version.
69  * Must be set to 1.
70  */
71 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
72
73 /* tx_hdr_ctl
74  * Packet control type.
75  * 0 - Ethernet control (e.g. EMADs, LACP)
76  * 1 - Ethernet data
77  */
78 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
79
80 /* tx_hdr_proto
81  * Packet protocol type. Must be set to 1 (Ethernet).
82  */
83 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
84
85 /* tx_hdr_rx_is_router
86  * Packet is sent from the router. Valid for data packets only.
87  */
88 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
89
90 /* tx_hdr_fid_valid
91  * Indicates if the 'fid' field is valid and should be used for
92  * forwarding lookup. Valid for data packets only.
93  */
94 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
95
96 /* tx_hdr_swid
97  * Switch partition ID. Must be set to 0.
98  */
99 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
100
101 /* tx_hdr_control_tclass
102  * Indicates if the packet should use the control TClass and not one
103  * of the data TClasses.
104  */
105 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
106
107 /* tx_hdr_etclass
108  * Egress TClass to be used on the egress device on the egress port.
109  */
110 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
111
112 /* tx_hdr_port_mid
113  * Destination local port for unicast packets.
114  * Destination multicast ID for multicast packets.
115  *
116  * Control packets are directed to a specific egress port, while data
117  * packets are transmitted through the CPU port (0) into the switch partition,
118  * where forwarding rules are applied.
119  */
120 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
121
122 /* tx_hdr_fid
123  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
124  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
125  * Valid for data packets only.
126  */
127 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
128
129 /* tx_hdr_type
130  * 0 - Data packets
131  * 6 - Control packets
132  */
133 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
134
135 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
136                                      const struct mlxsw_tx_info *tx_info)
137 {
138         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
139
140         memset(txhdr, 0, MLXSW_TXHDR_LEN);
141
142         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
143         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
144         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
145         mlxsw_tx_hdr_swid_set(txhdr, 0);
146         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
147         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
148         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
149 }
150
151 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
152 {
153         char spad_pl[MLXSW_REG_SPAD_LEN];
154         int err;
155
156         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
157         if (err)
158                 return err;
159         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
160         return 0;
161 }
162
163 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
164                                           bool is_up)
165 {
166         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
167         char paos_pl[MLXSW_REG_PAOS_LEN];
168
169         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
170                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
171                             MLXSW_PORT_ADMIN_STATUS_DOWN);
172         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
173 }
174
175 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
176                                          bool *p_is_up)
177 {
178         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
179         char paos_pl[MLXSW_REG_PAOS_LEN];
180         u8 oper_status;
181         int err;
182
183         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
184         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
185         if (err)
186                 return err;
187         oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
188         *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
189         return 0;
190 }
191
192 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
193                                       unsigned char *addr)
194 {
195         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
196         char ppad_pl[MLXSW_REG_PPAD_LEN];
197
198         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
199         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
200         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
201 }
202
203 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
204 {
205         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
206         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
207
208         ether_addr_copy(addr, mlxsw_sp->base_mac);
209         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
210         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
211 }
212
213 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
214                                        u16 vid, enum mlxsw_reg_spms_state state)
215 {
216         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
217         char *spms_pl;
218         int err;
219
220         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
221         if (!spms_pl)
222                 return -ENOMEM;
223         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
224         mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
225         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
226         kfree(spms_pl);
227         return err;
228 }
229
230 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
231 {
232         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
233         char pmtu_pl[MLXSW_REG_PMTU_LEN];
234         int max_mtu;
235         int err;
236
237         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
238         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
239         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
240         if (err)
241                 return err;
242         max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
243
244         if (mtu > max_mtu)
245                 return -EINVAL;
246
247         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
248         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
249 }
250
251 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
252                                     u8 swid)
253 {
254         char pspa_pl[MLXSW_REG_PSPA_LEN];
255
256         mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
257         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
258 }
259
260 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
261 {
262         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
263
264         return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
265                                         swid);
266 }
267
268 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
269                                      bool enable)
270 {
271         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
272         char svpe_pl[MLXSW_REG_SVPE_LEN];
273
274         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
275         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
276 }
277
278 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
279                                  enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
280                                  u16 vid)
281 {
282         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
283         char svfa_pl[MLXSW_REG_SVFA_LEN];
284
285         mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
286                             fid, vid);
287         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
288 }
289
290 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
291                                           u16 vid, bool learn_enable)
292 {
293         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
294         char *spvmlr_pl;
295         int err;
296
297         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
298         if (!spvmlr_pl)
299                 return -ENOMEM;
300         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
301                               learn_enable);
302         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
303         kfree(spvmlr_pl);
304         return err;
305 }
306
307 static int
308 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
309 {
310         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
311         char sspr_pl[MLXSW_REG_SSPR_LEN];
312
313         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
314         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
315 }
316
317 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
318                                          u8 local_port, u8 *p_module,
319                                          u8 *p_width, u8 *p_lane)
320 {
321         char pmlp_pl[MLXSW_REG_PMLP_LEN];
322         int err;
323
324         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
325         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
326         if (err)
327                 return err;
328         *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
329         *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
330         *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
331         return 0;
332 }
333
334 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
335                                     u8 module, u8 width, u8 lane)
336 {
337         char pmlp_pl[MLXSW_REG_PMLP_LEN];
338         int i;
339
340         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
341         mlxsw_reg_pmlp_width_set(pmlp_pl, width);
342         for (i = 0; i < width; i++) {
343                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
344                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
345         }
346
347         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
348 }
349
350 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
351 {
352         char pmlp_pl[MLXSW_REG_PMLP_LEN];
353
354         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
355         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
356         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
357 }
358
359 static int mlxsw_sp_port_open(struct net_device *dev)
360 {
361         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
362         int err;
363
364         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
365         if (err)
366                 return err;
367         netif_start_queue(dev);
368         return 0;
369 }
370
371 static int mlxsw_sp_port_stop(struct net_device *dev)
372 {
373         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
374
375         netif_stop_queue(dev);
376         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
377 }
378
379 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
380                                       struct net_device *dev)
381 {
382         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
383         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
384         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
385         const struct mlxsw_tx_info tx_info = {
386                 .local_port = mlxsw_sp_port->local_port,
387                 .is_emad = false,
388         };
389         u64 len;
390         int err;
391
392         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
393                 return NETDEV_TX_BUSY;
394
395         if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
396                 struct sk_buff *skb_orig = skb;
397
398                 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
399                 if (!skb) {
400                         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
401                         dev_kfree_skb_any(skb_orig);
402                         return NETDEV_TX_OK;
403                 }
404         }
405
406         if (eth_skb_pad(skb)) {
407                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
408                 return NETDEV_TX_OK;
409         }
410
411         mlxsw_sp_txhdr_construct(skb, &tx_info);
412         /* TX header is consumed by HW on the way so we shouldn't count its
413          * bytes as being sent.
414          */
415         len = skb->len - MLXSW_TXHDR_LEN;
416
417         /* Due to a race we might fail here because of a full queue. In that
418          * unlikely case we simply drop the packet.
419          */
420         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
421
422         if (!err) {
423                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
424                 u64_stats_update_begin(&pcpu_stats->syncp);
425                 pcpu_stats->tx_packets++;
426                 pcpu_stats->tx_bytes += len;
427                 u64_stats_update_end(&pcpu_stats->syncp);
428         } else {
429                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
430                 dev_kfree_skb_any(skb);
431         }
432         return NETDEV_TX_OK;
433 }
434
435 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
436 {
437 }
438
439 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
440 {
441         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
442         struct sockaddr *addr = p;
443         int err;
444
445         if (!is_valid_ether_addr(addr->sa_data))
446                 return -EADDRNOTAVAIL;
447
448         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
449         if (err)
450                 return err;
451         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
452         return 0;
453 }
454
455 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
456                                  bool pause_en, bool pfc_en, u16 delay)
457 {
458         u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
459
460         delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
461                          MLXSW_SP_PAUSE_DELAY;
462
463         if (pause_en || pfc_en)
464                 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
465                                                     pg_size + delay, pg_size);
466         else
467                 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
468 }
469
470 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
471                                  u8 *prio_tc, bool pause_en,
472                                  struct ieee_pfc *my_pfc)
473 {
474         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
475         u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
476         u16 delay = !!my_pfc ? my_pfc->delay : 0;
477         char pbmc_pl[MLXSW_REG_PBMC_LEN];
478         int i, j, err;
479
480         mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
481         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
482         if (err)
483                 return err;
484
485         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
486                 bool configure = false;
487                 bool pfc = false;
488
489                 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
490                         if (prio_tc[j] == i) {
491                                 pfc = pfc_en & BIT(j);
492                                 configure = true;
493                                 break;
494                         }
495                 }
496
497                 if (!configure)
498                         continue;
499                 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
500         }
501
502         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
503 }
504
505 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
506                                       int mtu, bool pause_en)
507 {
508         u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
509         bool dcb_en = !!mlxsw_sp_port->dcb.ets;
510         struct ieee_pfc *my_pfc;
511         u8 *prio_tc;
512
513         prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
514         my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
515
516         return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
517                                             pause_en, my_pfc);
518 }
519
520 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
521 {
522         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
523         bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
524         int err;
525
526         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
527         if (err)
528                 return err;
529         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
530         if (err)
531                 goto err_port_mtu_set;
532         dev->mtu = mtu;
533         return 0;
534
535 err_port_mtu_set:
536         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
537         return err;
538 }
539
540 static struct rtnl_link_stats64 *
541 mlxsw_sp_port_get_stats64(struct net_device *dev,
542                           struct rtnl_link_stats64 *stats)
543 {
544         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
545         struct mlxsw_sp_port_pcpu_stats *p;
546         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
547         u32 tx_dropped = 0;
548         unsigned int start;
549         int i;
550
551         for_each_possible_cpu(i) {
552                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
553                 do {
554                         start = u64_stats_fetch_begin_irq(&p->syncp);
555                         rx_packets      = p->rx_packets;
556                         rx_bytes        = p->rx_bytes;
557                         tx_packets      = p->tx_packets;
558                         tx_bytes        = p->tx_bytes;
559                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
560
561                 stats->rx_packets       += rx_packets;
562                 stats->rx_bytes         += rx_bytes;
563                 stats->tx_packets       += tx_packets;
564                 stats->tx_bytes         += tx_bytes;
565                 /* tx_dropped is u32, updated without syncp protection. */
566                 tx_dropped      += p->tx_dropped;
567         }
568         stats->tx_dropped       = tx_dropped;
569         return stats;
570 }
571
572 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
573                            u16 vid_end, bool is_member, bool untagged)
574 {
575         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
576         char *spvm_pl;
577         int err;
578
579         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
580         if (!spvm_pl)
581                 return -ENOMEM;
582
583         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
584                             vid_end, is_member, untagged);
585         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
586         kfree(spvm_pl);
587         return err;
588 }
589
590 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
591 {
592         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
593         u16 vid, last_visited_vid;
594         int err;
595
596         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
597                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
598                                                    vid);
599                 if (err) {
600                         last_visited_vid = vid;
601                         goto err_port_vid_to_fid_set;
602                 }
603         }
604
605         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
606         if (err) {
607                 last_visited_vid = VLAN_N_VID;
608                 goto err_port_vid_to_fid_set;
609         }
610
611         return 0;
612
613 err_port_vid_to_fid_set:
614         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
615                 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
616                                              vid);
617         return err;
618 }
619
620 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
621 {
622         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
623         u16 vid;
624         int err;
625
626         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
627         if (err)
628                 return err;
629
630         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
631                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
632                                                    vid, vid);
633                 if (err)
634                         return err;
635         }
636
637         return 0;
638 }
639
640 static struct mlxsw_sp_fid *
641 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
642 {
643         struct mlxsw_sp_fid *f;
644
645         list_for_each_entry(f, &mlxsw_sp->port_vfids.list, list) {
646                 if (f->vid == vid)
647                         return f;
648         }
649
650         return NULL;
651 }
652
653 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
654 {
655         return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
656                                    MLXSW_SP_VFID_PORT_MAX);
657 }
658
659 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
660 {
661         char sfmr_pl[MLXSW_REG_SFMR_LEN];
662
663         mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
664         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
665 }
666
667 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
668
669 static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
670                                                  u16 vid)
671 {
672         struct device *dev = mlxsw_sp->bus_info->dev;
673         struct mlxsw_sp_fid *f;
674         u16 vfid, fid;
675         int err;
676
677         vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
678         if (vfid == MLXSW_SP_VFID_PORT_MAX) {
679                 dev_err(dev, "No available vFIDs\n");
680                 return ERR_PTR(-ERANGE);
681         }
682
683         fid = mlxsw_sp_vfid_to_fid(vfid);
684         err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
685         if (err) {
686                 dev_err(dev, "Failed to create FID=%d\n", fid);
687                 return ERR_PTR(err);
688         }
689
690         f = kzalloc(sizeof(*f), GFP_KERNEL);
691         if (!f)
692                 goto err_allocate_vfid;
693
694         f->leave = mlxsw_sp_vport_vfid_leave;
695         f->fid = fid;
696         f->vid = vid;
697
698         list_add(&f->list, &mlxsw_sp->port_vfids.list);
699         set_bit(vfid, mlxsw_sp->port_vfids.mapped);
700
701         return f;
702
703 err_allocate_vfid:
704         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
705         return ERR_PTR(-ENOMEM);
706 }
707
708 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
709                                   struct mlxsw_sp_fid *f)
710 {
711         u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
712
713         clear_bit(vfid, mlxsw_sp->port_vfids.mapped);
714         list_del(&f->list);
715
716         mlxsw_sp_vfid_op(mlxsw_sp, f->fid, false);
717
718         kfree(f);
719 }
720
721 static struct mlxsw_sp_port *
722 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
723 {
724         struct mlxsw_sp_port *mlxsw_sp_vport;
725
726         mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
727         if (!mlxsw_sp_vport)
728                 return NULL;
729
730         /* dev will be set correctly after the VLAN device is linked
731          * with the real device. In case of bridge SELF invocation, dev
732          * will remain as is.
733          */
734         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
735         mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
736         mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
737         mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
738         mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
739         mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
740         mlxsw_sp_vport->vport.vid = vid;
741
742         list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
743
744         return mlxsw_sp_vport;
745 }
746
747 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
748 {
749         list_del(&mlxsw_sp_vport->vport.list);
750         kfree(mlxsw_sp_vport);
751 }
752
753 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
754                                   bool valid)
755 {
756         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
757         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
758
759         return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
760                                             vid);
761 }
762
763 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport)
764 {
765         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
766         struct mlxsw_sp_fid *f;
767         int err;
768
769         f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, vid);
770         if (!f) {
771                 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, vid);
772                 if (IS_ERR(f))
773                         return PTR_ERR(f);
774         }
775
776         if (!f->ref_count) {
777                 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
778                 if (err)
779                         goto err_vport_flood_set;
780         }
781
782         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
783         if (err)
784                 goto err_vport_fid_map;
785
786         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
787         f->ref_count++;
788
789         return 0;
790
791 err_vport_fid_map:
792         if (!f->ref_count)
793                 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
794 err_vport_flood_set:
795         if (!f->ref_count)
796                 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
797         return err;
798 }
799
800 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
801 {
802         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
803
804         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
805
806         mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
807
808         if (--f->ref_count == 0) {
809                 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
810                 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
811         }
812 }
813
814 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
815                           u16 vid)
816 {
817         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
818         struct mlxsw_sp_port *mlxsw_sp_vport;
819         int err;
820
821         /* VLAN 0 is added to HW filter when device goes up, but it is
822          * reserved in our case, so simply return.
823          */
824         if (!vid)
825                 return 0;
826
827         if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
828                 netdev_warn(dev, "VID=%d already configured\n", vid);
829                 return 0;
830         }
831
832         mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
833         if (!mlxsw_sp_vport) {
834                 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
835                 return -ENOMEM;
836         }
837
838         /* When adding the first VLAN interface on a bridged port we need to
839          * transition all the active 802.1Q bridge VLANs to use explicit
840          * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
841          */
842         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
843                 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
844                 if (err) {
845                         netdev_err(dev, "Failed to set to Virtual mode\n");
846                         goto err_port_vp_mode_trans;
847                 }
848         }
849
850         err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
851         if (err) {
852                 netdev_err(dev, "Failed to join vFID\n");
853                 goto err_vport_vfid_join;
854         }
855
856         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
857         if (err) {
858                 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
859                 goto err_port_vid_learning_set;
860         }
861
862         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
863         if (err) {
864                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
865                            vid);
866                 goto err_port_add_vid;
867         }
868
869         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
870                                           MLXSW_REG_SPMS_STATE_FORWARDING);
871         if (err) {
872                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
873                 goto err_port_stp_state_set;
874         }
875
876         return 0;
877
878 err_port_stp_state_set:
879         mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
880 err_port_add_vid:
881         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
882 err_port_vid_learning_set:
883         mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
884 err_vport_vfid_join:
885         if (list_is_singular(&mlxsw_sp_port->vports_list))
886                 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
887 err_port_vp_mode_trans:
888         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
889         return err;
890 }
891
892 int mlxsw_sp_port_kill_vid(struct net_device *dev,
893                            __be16 __always_unused proto, u16 vid)
894 {
895         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
896         struct mlxsw_sp_port *mlxsw_sp_vport;
897         struct mlxsw_sp_fid *f;
898         int err;
899
900         /* VLAN 0 is removed from HW filter when device goes down, but
901          * it is reserved in our case, so simply return.
902          */
903         if (!vid)
904                 return 0;
905
906         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
907         if (!mlxsw_sp_vport) {
908                 netdev_warn(dev, "VID=%d does not exist\n", vid);
909                 return 0;
910         }
911
912         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
913                                           MLXSW_REG_SPMS_STATE_DISCARDING);
914         if (err) {
915                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
916                 return err;
917         }
918
919         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
920         if (err) {
921                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
922                            vid);
923                 return err;
924         }
925
926         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
927         if (err) {
928                 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
929                 return err;
930         }
931
932         /* Drop FID reference. If this was the last reference the
933          * resources will be freed.
934          */
935         f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
936         if (f && !WARN_ON(!f->leave))
937                 f->leave(mlxsw_sp_vport);
938
939         /* When removing the last VLAN interface on a bridged port we need to
940          * transition all active 802.1Q bridge VLANs to use VID to FID
941          * mappings and set port's mode to VLAN mode.
942          */
943         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
944                 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
945                 if (err) {
946                         netdev_err(dev, "Failed to set to VLAN mode\n");
947                         return err;
948                 }
949         }
950
951         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
952
953         return 0;
954 }
955
956 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
957                                             size_t len)
958 {
959         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
960         u8 module = mlxsw_sp_port->mapping.module;
961         u8 width = mlxsw_sp_port->mapping.width;
962         u8 lane = mlxsw_sp_port->mapping.lane;
963         int err;
964
965         if (!mlxsw_sp_port->split)
966                 err = snprintf(name, len, "p%d", module + 1);
967         else
968                 err = snprintf(name, len, "p%ds%d", module + 1,
969                                lane / width);
970
971         if (err >= len)
972                 return -EINVAL;
973
974         return 0;
975 }
976
977 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
978         .ndo_open               = mlxsw_sp_port_open,
979         .ndo_stop               = mlxsw_sp_port_stop,
980         .ndo_start_xmit         = mlxsw_sp_port_xmit,
981         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
982         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
983         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
984         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
985         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
986         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
987         .ndo_fdb_add            = switchdev_port_fdb_add,
988         .ndo_fdb_del            = switchdev_port_fdb_del,
989         .ndo_fdb_dump           = switchdev_port_fdb_dump,
990         .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
991         .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
992         .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
993         .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
994 };
995
996 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
997                                       struct ethtool_drvinfo *drvinfo)
998 {
999         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1000         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1001
1002         strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1003         strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1004                 sizeof(drvinfo->version));
1005         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1006                  "%d.%d.%d",
1007                  mlxsw_sp->bus_info->fw_rev.major,
1008                  mlxsw_sp->bus_info->fw_rev.minor,
1009                  mlxsw_sp->bus_info->fw_rev.subminor);
1010         strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1011                 sizeof(drvinfo->bus_info));
1012 }
1013
1014 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1015                                          struct ethtool_pauseparam *pause)
1016 {
1017         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1018
1019         pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1020         pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1021 }
1022
1023 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1024                                    struct ethtool_pauseparam *pause)
1025 {
1026         char pfcc_pl[MLXSW_REG_PFCC_LEN];
1027
1028         mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1029         mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1030         mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1031
1032         return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1033                                pfcc_pl);
1034 }
1035
1036 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1037                                         struct ethtool_pauseparam *pause)
1038 {
1039         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1040         bool pause_en = pause->tx_pause || pause->rx_pause;
1041         int err;
1042
1043         if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1044                 netdev_err(dev, "PFC already enabled on port\n");
1045                 return -EINVAL;
1046         }
1047
1048         if (pause->autoneg) {
1049                 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1050                 return -EINVAL;
1051         }
1052
1053         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1054         if (err) {
1055                 netdev_err(dev, "Failed to configure port's headroom\n");
1056                 return err;
1057         }
1058
1059         err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1060         if (err) {
1061                 netdev_err(dev, "Failed to set PAUSE parameters\n");
1062                 goto err_port_pause_configure;
1063         }
1064
1065         mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1066         mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1067
1068         return 0;
1069
1070 err_port_pause_configure:
1071         pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1072         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1073         return err;
1074 }
1075
1076 struct mlxsw_sp_port_hw_stats {
1077         char str[ETH_GSTRING_LEN];
1078         u64 (*getter)(char *payload);
1079 };
1080
1081 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1082         {
1083                 .str = "a_frames_transmitted_ok",
1084                 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1085         },
1086         {
1087                 .str = "a_frames_received_ok",
1088                 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1089         },
1090         {
1091                 .str = "a_frame_check_sequence_errors",
1092                 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1093         },
1094         {
1095                 .str = "a_alignment_errors",
1096                 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1097         },
1098         {
1099                 .str = "a_octets_transmitted_ok",
1100                 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1101         },
1102         {
1103                 .str = "a_octets_received_ok",
1104                 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1105         },
1106         {
1107                 .str = "a_multicast_frames_xmitted_ok",
1108                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1109         },
1110         {
1111                 .str = "a_broadcast_frames_xmitted_ok",
1112                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1113         },
1114         {
1115                 .str = "a_multicast_frames_received_ok",
1116                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1117         },
1118         {
1119                 .str = "a_broadcast_frames_received_ok",
1120                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1121         },
1122         {
1123                 .str = "a_in_range_length_errors",
1124                 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1125         },
1126         {
1127                 .str = "a_out_of_range_length_field",
1128                 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1129         },
1130         {
1131                 .str = "a_frame_too_long_errors",
1132                 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1133         },
1134         {
1135                 .str = "a_symbol_error_during_carrier",
1136                 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1137         },
1138         {
1139                 .str = "a_mac_control_frames_transmitted",
1140                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1141         },
1142         {
1143                 .str = "a_mac_control_frames_received",
1144                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1145         },
1146         {
1147                 .str = "a_unsupported_opcodes_received",
1148                 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1149         },
1150         {
1151                 .str = "a_pause_mac_ctrl_frames_received",
1152                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1153         },
1154         {
1155                 .str = "a_pause_mac_ctrl_frames_xmitted",
1156                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1157         },
1158 };
1159
1160 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1161
1162 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1163                                       u32 stringset, u8 *data)
1164 {
1165         u8 *p = data;
1166         int i;
1167
1168         switch (stringset) {
1169         case ETH_SS_STATS:
1170                 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1171                         memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1172                                ETH_GSTRING_LEN);
1173                         p += ETH_GSTRING_LEN;
1174                 }
1175                 break;
1176         }
1177 }
1178
1179 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1180                                      enum ethtool_phys_id_state state)
1181 {
1182         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1183         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1184         char mlcr_pl[MLXSW_REG_MLCR_LEN];
1185         bool active;
1186
1187         switch (state) {
1188         case ETHTOOL_ID_ACTIVE:
1189                 active = true;
1190                 break;
1191         case ETHTOOL_ID_INACTIVE:
1192                 active = false;
1193                 break;
1194         default:
1195                 return -EOPNOTSUPP;
1196         }
1197
1198         mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1199         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1200 }
1201
1202 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1203                                     struct ethtool_stats *stats, u64 *data)
1204 {
1205         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1206         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1207         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1208         int i;
1209         int err;
1210
1211         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
1212                              MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
1213         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1214         for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1215                 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
1216 }
1217
1218 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1219 {
1220         switch (sset) {
1221         case ETH_SS_STATS:
1222                 return MLXSW_SP_PORT_HW_STATS_LEN;
1223         default:
1224                 return -EOPNOTSUPP;
1225         }
1226 }
1227
1228 struct mlxsw_sp_port_link_mode {
1229         u32 mask;
1230         u32 supported;
1231         u32 advertised;
1232         u32 speed;
1233 };
1234
1235 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1236         {
1237                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1238                 .supported      = SUPPORTED_100baseT_Full,
1239                 .advertised     = ADVERTISED_100baseT_Full,
1240                 .speed          = 100,
1241         },
1242         {
1243                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1244                 .speed          = 100,
1245         },
1246         {
1247                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1248                                   MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1249                 .supported      = SUPPORTED_1000baseKX_Full,
1250                 .advertised     = ADVERTISED_1000baseKX_Full,
1251                 .speed          = 1000,
1252         },
1253         {
1254                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1255                 .supported      = SUPPORTED_10000baseT_Full,
1256                 .advertised     = ADVERTISED_10000baseT_Full,
1257                 .speed          = 10000,
1258         },
1259         {
1260                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1261                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1262                 .supported      = SUPPORTED_10000baseKX4_Full,
1263                 .advertised     = ADVERTISED_10000baseKX4_Full,
1264                 .speed          = 10000,
1265         },
1266         {
1267                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1268                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1269                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1270                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1271                 .supported      = SUPPORTED_10000baseKR_Full,
1272                 .advertised     = ADVERTISED_10000baseKR_Full,
1273                 .speed          = 10000,
1274         },
1275         {
1276                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1277                 .supported      = SUPPORTED_20000baseKR2_Full,
1278                 .advertised     = ADVERTISED_20000baseKR2_Full,
1279                 .speed          = 20000,
1280         },
1281         {
1282                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1283                 .supported      = SUPPORTED_40000baseCR4_Full,
1284                 .advertised     = ADVERTISED_40000baseCR4_Full,
1285                 .speed          = 40000,
1286         },
1287         {
1288                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1289                 .supported      = SUPPORTED_40000baseKR4_Full,
1290                 .advertised     = ADVERTISED_40000baseKR4_Full,
1291                 .speed          = 40000,
1292         },
1293         {
1294                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1295                 .supported      = SUPPORTED_40000baseSR4_Full,
1296                 .advertised     = ADVERTISED_40000baseSR4_Full,
1297                 .speed          = 40000,
1298         },
1299         {
1300                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1301                 .supported      = SUPPORTED_40000baseLR4_Full,
1302                 .advertised     = ADVERTISED_40000baseLR4_Full,
1303                 .speed          = 40000,
1304         },
1305         {
1306                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1307                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1308                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1309                 .speed          = 25000,
1310         },
1311         {
1312                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1313                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1314                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1315                 .speed          = 50000,
1316         },
1317         {
1318                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1319                 .supported      = SUPPORTED_56000baseKR4_Full,
1320                 .advertised     = ADVERTISED_56000baseKR4_Full,
1321                 .speed          = 56000,
1322         },
1323         {
1324                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1325                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1326                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1327                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1328                 .speed          = 100000,
1329         },
1330 };
1331
1332 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1333
1334 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1335 {
1336         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1337                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1338                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1339                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1340                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1341                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1342                 return SUPPORTED_FIBRE;
1343
1344         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1345                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1346                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1347                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1348                               MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1349                 return SUPPORTED_Backplane;
1350         return 0;
1351 }
1352
1353 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1354 {
1355         u32 modes = 0;
1356         int i;
1357
1358         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1359                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1360                         modes |= mlxsw_sp_port_link_mode[i].supported;
1361         }
1362         return modes;
1363 }
1364
1365 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1366 {
1367         u32 modes = 0;
1368         int i;
1369
1370         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1371                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1372                         modes |= mlxsw_sp_port_link_mode[i].advertised;
1373         }
1374         return modes;
1375 }
1376
1377 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1378                                             struct ethtool_cmd *cmd)
1379 {
1380         u32 speed = SPEED_UNKNOWN;
1381         u8 duplex = DUPLEX_UNKNOWN;
1382         int i;
1383
1384         if (!carrier_ok)
1385                 goto out;
1386
1387         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1388                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1389                         speed = mlxsw_sp_port_link_mode[i].speed;
1390                         duplex = DUPLEX_FULL;
1391                         break;
1392                 }
1393         }
1394 out:
1395         ethtool_cmd_speed_set(cmd, speed);
1396         cmd->duplex = duplex;
1397 }
1398
1399 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1400 {
1401         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1402                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1403                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1404                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1405                 return PORT_FIBRE;
1406
1407         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1408                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1409                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1410                 return PORT_DA;
1411
1412         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1413                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1414                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1415                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1416                 return PORT_NONE;
1417
1418         return PORT_OTHER;
1419 }
1420
1421 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1422                                       struct ethtool_cmd *cmd)
1423 {
1424         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1425         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1426         char ptys_pl[MLXSW_REG_PTYS_LEN];
1427         u32 eth_proto_cap;
1428         u32 eth_proto_admin;
1429         u32 eth_proto_oper;
1430         int err;
1431
1432         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1433         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1434         if (err) {
1435                 netdev_err(dev, "Failed to get proto");
1436                 return err;
1437         }
1438         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1439                               &eth_proto_admin, &eth_proto_oper);
1440
1441         cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1442                          mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1443                          SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1444         cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1445         mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1446                                         eth_proto_oper, cmd);
1447
1448         eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1449         cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1450         cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1451
1452         cmd->transceiver = XCVR_INTERNAL;
1453         return 0;
1454 }
1455
1456 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1457 {
1458         u32 ptys_proto = 0;
1459         int i;
1460
1461         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1462                 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1463                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1464         }
1465         return ptys_proto;
1466 }
1467
1468 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1469 {
1470         u32 ptys_proto = 0;
1471         int i;
1472
1473         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1474                 if (speed == mlxsw_sp_port_link_mode[i].speed)
1475                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1476         }
1477         return ptys_proto;
1478 }
1479
1480 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1481 {
1482         u32 ptys_proto = 0;
1483         int i;
1484
1485         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1486                 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1487                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1488         }
1489         return ptys_proto;
1490 }
1491
1492 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1493                                       struct ethtool_cmd *cmd)
1494 {
1495         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1496         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1497         char ptys_pl[MLXSW_REG_PTYS_LEN];
1498         u32 speed;
1499         u32 eth_proto_new;
1500         u32 eth_proto_cap;
1501         u32 eth_proto_admin;
1502         bool is_up;
1503         int err;
1504
1505         speed = ethtool_cmd_speed(cmd);
1506
1507         eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1508                 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1509                 mlxsw_sp_to_ptys_speed(speed);
1510
1511         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1512         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1513         if (err) {
1514                 netdev_err(dev, "Failed to get proto");
1515                 return err;
1516         }
1517         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1518
1519         eth_proto_new = eth_proto_new & eth_proto_cap;
1520         if (!eth_proto_new) {
1521                 netdev_err(dev, "Not supported proto admin requested");
1522                 return -EINVAL;
1523         }
1524         if (eth_proto_new == eth_proto_admin)
1525                 return 0;
1526
1527         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1528         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1529         if (err) {
1530                 netdev_err(dev, "Failed to set proto admin");
1531                 return err;
1532         }
1533
1534         err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1535         if (err) {
1536                 netdev_err(dev, "Failed to get oper status");
1537                 return err;
1538         }
1539         if (!is_up)
1540                 return 0;
1541
1542         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1543         if (err) {
1544                 netdev_err(dev, "Failed to set admin status");
1545                 return err;
1546         }
1547
1548         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1549         if (err) {
1550                 netdev_err(dev, "Failed to set admin status");
1551                 return err;
1552         }
1553
1554         return 0;
1555 }
1556
1557 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1558         .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
1559         .get_link               = ethtool_op_get_link,
1560         .get_pauseparam         = mlxsw_sp_port_get_pauseparam,
1561         .set_pauseparam         = mlxsw_sp_port_set_pauseparam,
1562         .get_strings            = mlxsw_sp_port_get_strings,
1563         .set_phys_id            = mlxsw_sp_port_set_phys_id,
1564         .get_ethtool_stats      = mlxsw_sp_port_get_stats,
1565         .get_sset_count         = mlxsw_sp_port_get_sset_count,
1566         .get_settings           = mlxsw_sp_port_get_settings,
1567         .set_settings           = mlxsw_sp_port_set_settings,
1568 };
1569
1570 static int
1571 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1572 {
1573         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1574         u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1575         char ptys_pl[MLXSW_REG_PTYS_LEN];
1576         u32 eth_proto_admin;
1577
1578         eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1579         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1580                             eth_proto_admin);
1581         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1582 }
1583
1584 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1585                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1586                           bool dwrr, u8 dwrr_weight)
1587 {
1588         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1589         char qeec_pl[MLXSW_REG_QEEC_LEN];
1590
1591         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1592                             next_index);
1593         mlxsw_reg_qeec_de_set(qeec_pl, true);
1594         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1595         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1596         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1597 }
1598
1599 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1600                                   enum mlxsw_reg_qeec_hr hr, u8 index,
1601                                   u8 next_index, u32 maxrate)
1602 {
1603         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1604         char qeec_pl[MLXSW_REG_QEEC_LEN];
1605
1606         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1607                             next_index);
1608         mlxsw_reg_qeec_mase_set(qeec_pl, true);
1609         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1610         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1611 }
1612
1613 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1614                               u8 switch_prio, u8 tclass)
1615 {
1616         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1617         char qtct_pl[MLXSW_REG_QTCT_LEN];
1618
1619         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1620                             tclass);
1621         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1622 }
1623
1624 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1625 {
1626         int err, i;
1627
1628         /* Setup the elements hierarcy, so that each TC is linked to
1629          * one subgroup, which are all member in the same group.
1630          */
1631         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1632                                     MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
1633                                     0);
1634         if (err)
1635                 return err;
1636         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1637                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1638                                             MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
1639                                             0, false, 0);
1640                 if (err)
1641                         return err;
1642         }
1643         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1644                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1645                                             MLXSW_REG_QEEC_HIERARCY_TC, i, i,
1646                                             false, 0);
1647                 if (err)
1648                         return err;
1649         }
1650
1651         /* Make sure the max shaper is disabled in all hierarcies that
1652          * support it.
1653          */
1654         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1655                                             MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
1656                                             MLXSW_REG_QEEC_MAS_DIS);
1657         if (err)
1658                 return err;
1659         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1660                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1661                                                     MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1662                                                     i, 0,
1663                                                     MLXSW_REG_QEEC_MAS_DIS);
1664                 if (err)
1665                         return err;
1666         }
1667         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1668                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1669                                                     MLXSW_REG_QEEC_HIERARCY_TC,
1670                                                     i, i,
1671                                                     MLXSW_REG_QEEC_MAS_DIS);
1672                 if (err)
1673                         return err;
1674         }
1675
1676         /* Map all priorities to traffic class 0. */
1677         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1678                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1679                 if (err)
1680                         return err;
1681         }
1682
1683         return 0;
1684 }
1685
1686 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1687                                 bool split, u8 module, u8 width, u8 lane)
1688 {
1689         struct mlxsw_sp_port *mlxsw_sp_port;
1690         struct net_device *dev;
1691         size_t bytes;
1692         int err;
1693
1694         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1695         if (!dev)
1696                 return -ENOMEM;
1697         mlxsw_sp_port = netdev_priv(dev);
1698         mlxsw_sp_port->dev = dev;
1699         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1700         mlxsw_sp_port->local_port = local_port;
1701         mlxsw_sp_port->split = split;
1702         mlxsw_sp_port->mapping.module = module;
1703         mlxsw_sp_port->mapping.width = width;
1704         mlxsw_sp_port->mapping.lane = lane;
1705         bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1706         mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1707         if (!mlxsw_sp_port->active_vlans) {
1708                 err = -ENOMEM;
1709                 goto err_port_active_vlans_alloc;
1710         }
1711         mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1712         if (!mlxsw_sp_port->untagged_vlans) {
1713                 err = -ENOMEM;
1714                 goto err_port_untagged_vlans_alloc;
1715         }
1716         INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1717
1718         mlxsw_sp_port->pcpu_stats =
1719                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1720         if (!mlxsw_sp_port->pcpu_stats) {
1721                 err = -ENOMEM;
1722                 goto err_alloc_stats;
1723         }
1724
1725         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1726         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1727
1728         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1729         if (err) {
1730                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1731                         mlxsw_sp_port->local_port);
1732                 goto err_dev_addr_init;
1733         }
1734
1735         netif_carrier_off(dev);
1736
1737         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1738                          NETIF_F_HW_VLAN_CTAG_FILTER;
1739
1740         /* Each packet needs to have a Tx header (metadata) on top all other
1741          * headers.
1742          */
1743         dev->hard_header_len += MLXSW_TXHDR_LEN;
1744
1745         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1746         if (err) {
1747                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1748                         mlxsw_sp_port->local_port);
1749                 goto err_port_system_port_mapping_set;
1750         }
1751
1752         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1753         if (err) {
1754                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1755                         mlxsw_sp_port->local_port);
1756                 goto err_port_swid_set;
1757         }
1758
1759         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1760         if (err) {
1761                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1762                         mlxsw_sp_port->local_port);
1763                 goto err_port_speed_by_width_set;
1764         }
1765
1766         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1767         if (err) {
1768                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1769                         mlxsw_sp_port->local_port);
1770                 goto err_port_mtu_set;
1771         }
1772
1773         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1774         if (err)
1775                 goto err_port_admin_status_set;
1776
1777         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1778         if (err) {
1779                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1780                         mlxsw_sp_port->local_port);
1781                 goto err_port_buffers_init;
1782         }
1783
1784         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1785         if (err) {
1786                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1787                         mlxsw_sp_port->local_port);
1788                 goto err_port_ets_init;
1789         }
1790
1791         /* ETS and buffers must be initialized before DCB. */
1792         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1793         if (err) {
1794                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1795                         mlxsw_sp_port->local_port);
1796                 goto err_port_dcb_init;
1797         }
1798
1799         mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1800         err = register_netdev(dev);
1801         if (err) {
1802                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1803                         mlxsw_sp_port->local_port);
1804                 goto err_register_netdev;
1805         }
1806
1807         err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
1808                                    mlxsw_sp_port->local_port, dev,
1809                                    mlxsw_sp_port->split, module);
1810         if (err) {
1811                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1812                         mlxsw_sp_port->local_port);
1813                 goto err_core_port_init;
1814         }
1815
1816         err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1817         if (err)
1818                 goto err_port_vlan_init;
1819
1820         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1821         return 0;
1822
1823 err_port_vlan_init:
1824         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1825 err_core_port_init:
1826         unregister_netdev(dev);
1827 err_register_netdev:
1828 err_port_dcb_init:
1829 err_port_ets_init:
1830 err_port_buffers_init:
1831 err_port_admin_status_set:
1832 err_port_mtu_set:
1833 err_port_speed_by_width_set:
1834 err_port_swid_set:
1835 err_port_system_port_mapping_set:
1836 err_dev_addr_init:
1837         free_percpu(mlxsw_sp_port->pcpu_stats);
1838 err_alloc_stats:
1839         kfree(mlxsw_sp_port->untagged_vlans);
1840 err_port_untagged_vlans_alloc:
1841         kfree(mlxsw_sp_port->active_vlans);
1842 err_port_active_vlans_alloc:
1843         free_netdev(dev);
1844         return err;
1845 }
1846
1847 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1848 {
1849         struct net_device *dev = mlxsw_sp_port->dev;
1850         struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
1851
1852         list_for_each_entry_safe(mlxsw_sp_vport, tmp,
1853                                  &mlxsw_sp_port->vports_list, vport.list) {
1854                 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1855
1856                 /* vPorts created for VLAN devices should already be gone
1857                  * by now, since we unregistered the port netdev.
1858                  */
1859                 WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
1860                 mlxsw_sp_port_kill_vid(dev, 0, vid);
1861         }
1862 }
1863
1864 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1865 {
1866         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1867
1868         if (!mlxsw_sp_port)
1869                 return;
1870         mlxsw_sp->ports[local_port] = NULL;
1871         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1872         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1873         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1874         mlxsw_sp_port_vports_fini(mlxsw_sp_port);
1875         mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1876         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1877         mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1878         free_percpu(mlxsw_sp_port->pcpu_stats);
1879         kfree(mlxsw_sp_port->untagged_vlans);
1880         kfree(mlxsw_sp_port->active_vlans);
1881         free_netdev(mlxsw_sp_port->dev);
1882 }
1883
1884 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1885 {
1886         int i;
1887
1888         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1889                 mlxsw_sp_port_remove(mlxsw_sp, i);
1890         kfree(mlxsw_sp->ports);
1891 }
1892
1893 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1894 {
1895         u8 module, width, lane;
1896         size_t alloc_size;
1897         int i;
1898         int err;
1899
1900         alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1901         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1902         if (!mlxsw_sp->ports)
1903                 return -ENOMEM;
1904
1905         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1906                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1907                                                     &width, &lane);
1908                 if (err)
1909                         goto err_port_module_info_get;
1910                 if (!width)
1911                         continue;
1912                 mlxsw_sp->port_to_module[i] = module;
1913                 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
1914                                            lane);
1915                 if (err)
1916                         goto err_port_create;
1917         }
1918         return 0;
1919
1920 err_port_create:
1921 err_port_module_info_get:
1922         for (i--; i >= 1; i--)
1923                 mlxsw_sp_port_remove(mlxsw_sp, i);
1924         kfree(mlxsw_sp->ports);
1925         return err;
1926 }
1927
1928 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1929 {
1930         u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1931
1932         return local_port - offset;
1933 }
1934
1935 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1936                                       u8 module, unsigned int count)
1937 {
1938         u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1939         int err, i;
1940
1941         for (i = 0; i < count; i++) {
1942                 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
1943                                                width, i * width);
1944                 if (err)
1945                         goto err_port_module_map;
1946         }
1947
1948         for (i = 0; i < count; i++) {
1949                 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
1950                 if (err)
1951                         goto err_port_swid_set;
1952         }
1953
1954         for (i = 0; i < count; i++) {
1955                 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1956                                            module, width, i * width);
1957                 if (err)
1958                         goto err_port_create;
1959         }
1960
1961         return 0;
1962
1963 err_port_create:
1964         for (i--; i >= 0; i--)
1965                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1966         i = count;
1967 err_port_swid_set:
1968         for (i--; i >= 0; i--)
1969                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
1970                                          MLXSW_PORT_SWID_DISABLED_PORT);
1971         i = count;
1972 err_port_module_map:
1973         for (i--; i >= 0; i--)
1974                 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
1975         return err;
1976 }
1977
1978 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1979                                          u8 base_port, unsigned int count)
1980 {
1981         u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
1982         int i;
1983
1984         /* Split by four means we need to re-create two ports, otherwise
1985          * only one.
1986          */
1987         count = count / 2;
1988
1989         for (i = 0; i < count; i++) {
1990                 local_port = base_port + i * 2;
1991                 module = mlxsw_sp->port_to_module[local_port];
1992
1993                 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1994                                          0);
1995         }
1996
1997         for (i = 0; i < count; i++)
1998                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
1999
2000         for (i = 0; i < count; i++) {
2001                 local_port = base_port + i * 2;
2002                 module = mlxsw_sp->port_to_module[local_port];
2003
2004                 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
2005                                      width, 0);
2006         }
2007 }
2008
2009 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
2010                                unsigned int count)
2011 {
2012         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2013         struct mlxsw_sp_port *mlxsw_sp_port;
2014         u8 module, cur_width, base_port;
2015         int i;
2016         int err;
2017
2018         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2019         if (!mlxsw_sp_port) {
2020                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2021                         local_port);
2022                 return -EINVAL;
2023         }
2024
2025         module = mlxsw_sp_port->mapping.module;
2026         cur_width = mlxsw_sp_port->mapping.width;
2027
2028         if (count != 2 && count != 4) {
2029                 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
2030                 return -EINVAL;
2031         }
2032
2033         if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
2034                 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
2035                 return -EINVAL;
2036         }
2037
2038         /* Make sure we have enough slave (even) ports for the split. */
2039         if (count == 2) {
2040                 base_port = local_port;
2041                 if (mlxsw_sp->ports[base_port + 1]) {
2042                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2043                         return -EINVAL;
2044                 }
2045         } else {
2046                 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2047                 if (mlxsw_sp->ports[base_port + 1] ||
2048                     mlxsw_sp->ports[base_port + 3]) {
2049                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2050                         return -EINVAL;
2051                 }
2052         }
2053
2054         for (i = 0; i < count; i++)
2055                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2056
2057         err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2058         if (err) {
2059                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2060                 goto err_port_split_create;
2061         }
2062
2063         return 0;
2064
2065 err_port_split_create:
2066         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2067         return err;
2068 }
2069
2070 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2071 {
2072         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2073         struct mlxsw_sp_port *mlxsw_sp_port;
2074         u8 cur_width, base_port;
2075         unsigned int count;
2076         int i;
2077
2078         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2079         if (!mlxsw_sp_port) {
2080                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2081                         local_port);
2082                 return -EINVAL;
2083         }
2084
2085         if (!mlxsw_sp_port->split) {
2086                 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2087                 return -EINVAL;
2088         }
2089
2090         cur_width = mlxsw_sp_port->mapping.width;
2091         count = cur_width == 1 ? 4 : 2;
2092
2093         base_port = mlxsw_sp_cluster_base_port_get(local_port);
2094
2095         /* Determine which ports to remove. */
2096         if (count == 2 && local_port >= base_port + 2)
2097                 base_port = base_port + 2;
2098
2099         for (i = 0; i < count; i++)
2100                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2101
2102         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2103
2104         return 0;
2105 }
2106
2107 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2108                                      char *pude_pl, void *priv)
2109 {
2110         struct mlxsw_sp *mlxsw_sp = priv;
2111         struct mlxsw_sp_port *mlxsw_sp_port;
2112         enum mlxsw_reg_pude_oper_status status;
2113         u8 local_port;
2114
2115         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2116         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2117         if (!mlxsw_sp_port) {
2118                 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
2119                          local_port);
2120                 return;
2121         }
2122
2123         status = mlxsw_reg_pude_oper_status_get(pude_pl);
2124         if (status == MLXSW_PORT_OPER_STATUS_UP) {
2125                 netdev_info(mlxsw_sp_port->dev, "link up\n");
2126                 netif_carrier_on(mlxsw_sp_port->dev);
2127         } else {
2128                 netdev_info(mlxsw_sp_port->dev, "link down\n");
2129                 netif_carrier_off(mlxsw_sp_port->dev);
2130         }
2131 }
2132
2133 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
2134         .func = mlxsw_sp_pude_event_func,
2135         .trap_id = MLXSW_TRAP_ID_PUDE,
2136 };
2137
2138 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
2139                                    enum mlxsw_event_trap_id trap_id)
2140 {
2141         struct mlxsw_event_listener *el;
2142         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2143         int err;
2144
2145         switch (trap_id) {
2146         case MLXSW_TRAP_ID_PUDE:
2147                 el = &mlxsw_sp_pude_event;
2148                 break;
2149         }
2150         err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
2151         if (err)
2152                 return err;
2153
2154         mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
2155         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2156         if (err)
2157                 goto err_event_trap_set;
2158
2159         return 0;
2160
2161 err_event_trap_set:
2162         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2163         return err;
2164 }
2165
2166 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
2167                                       enum mlxsw_event_trap_id trap_id)
2168 {
2169         struct mlxsw_event_listener *el;
2170
2171         switch (trap_id) {
2172         case MLXSW_TRAP_ID_PUDE:
2173                 el = &mlxsw_sp_pude_event;
2174                 break;
2175         }
2176         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2177 }
2178
2179 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
2180                                       void *priv)
2181 {
2182         struct mlxsw_sp *mlxsw_sp = priv;
2183         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2184         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2185
2186         if (unlikely(!mlxsw_sp_port)) {
2187                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2188                                      local_port);
2189                 return;
2190         }
2191
2192         skb->dev = mlxsw_sp_port->dev;
2193
2194         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2195         u64_stats_update_begin(&pcpu_stats->syncp);
2196         pcpu_stats->rx_packets++;
2197         pcpu_stats->rx_bytes += skb->len;
2198         u64_stats_update_end(&pcpu_stats->syncp);
2199
2200         skb->protocol = eth_type_trans(skb, skb->dev);
2201         netif_receive_skb(skb);
2202 }
2203
2204 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2205         {
2206                 .func = mlxsw_sp_rx_listener_func,
2207                 .local_port = MLXSW_PORT_DONT_CARE,
2208                 .trap_id = MLXSW_TRAP_ID_FDB_MC,
2209         },
2210         /* Traps for specific L2 packet types, not trapped as FDB MC */
2211         {
2212                 .func = mlxsw_sp_rx_listener_func,
2213                 .local_port = MLXSW_PORT_DONT_CARE,
2214                 .trap_id = MLXSW_TRAP_ID_STP,
2215         },
2216         {
2217                 .func = mlxsw_sp_rx_listener_func,
2218                 .local_port = MLXSW_PORT_DONT_CARE,
2219                 .trap_id = MLXSW_TRAP_ID_LACP,
2220         },
2221         {
2222                 .func = mlxsw_sp_rx_listener_func,
2223                 .local_port = MLXSW_PORT_DONT_CARE,
2224                 .trap_id = MLXSW_TRAP_ID_EAPOL,
2225         },
2226         {
2227                 .func = mlxsw_sp_rx_listener_func,
2228                 .local_port = MLXSW_PORT_DONT_CARE,
2229                 .trap_id = MLXSW_TRAP_ID_LLDP,
2230         },
2231         {
2232                 .func = mlxsw_sp_rx_listener_func,
2233                 .local_port = MLXSW_PORT_DONT_CARE,
2234                 .trap_id = MLXSW_TRAP_ID_MMRP,
2235         },
2236         {
2237                 .func = mlxsw_sp_rx_listener_func,
2238                 .local_port = MLXSW_PORT_DONT_CARE,
2239                 .trap_id = MLXSW_TRAP_ID_MVRP,
2240         },
2241         {
2242                 .func = mlxsw_sp_rx_listener_func,
2243                 .local_port = MLXSW_PORT_DONT_CARE,
2244                 .trap_id = MLXSW_TRAP_ID_RPVST,
2245         },
2246         {
2247                 .func = mlxsw_sp_rx_listener_func,
2248                 .local_port = MLXSW_PORT_DONT_CARE,
2249                 .trap_id = MLXSW_TRAP_ID_DHCP,
2250         },
2251         {
2252                 .func = mlxsw_sp_rx_listener_func,
2253                 .local_port = MLXSW_PORT_DONT_CARE,
2254                 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
2255         },
2256         {
2257                 .func = mlxsw_sp_rx_listener_func,
2258                 .local_port = MLXSW_PORT_DONT_CARE,
2259                 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
2260         },
2261         {
2262                 .func = mlxsw_sp_rx_listener_func,
2263                 .local_port = MLXSW_PORT_DONT_CARE,
2264                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
2265         },
2266         {
2267                 .func = mlxsw_sp_rx_listener_func,
2268                 .local_port = MLXSW_PORT_DONT_CARE,
2269                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
2270         },
2271         {
2272                 .func = mlxsw_sp_rx_listener_func,
2273                 .local_port = MLXSW_PORT_DONT_CARE,
2274                 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2275         },
2276 };
2277
2278 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2279 {
2280         char htgt_pl[MLXSW_REG_HTGT_LEN];
2281         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2282         int i;
2283         int err;
2284
2285         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2286         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2287         if (err)
2288                 return err;
2289
2290         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2291         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2292         if (err)
2293                 return err;
2294
2295         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2296                 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2297                                                       &mlxsw_sp_rx_listener[i],
2298                                                       mlxsw_sp);
2299                 if (err)
2300                         goto err_rx_listener_register;
2301
2302                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2303                                     mlxsw_sp_rx_listener[i].trap_id);
2304                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2305                 if (err)
2306                         goto err_rx_trap_set;
2307         }
2308         return 0;
2309
2310 err_rx_trap_set:
2311         mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2312                                           &mlxsw_sp_rx_listener[i],
2313                                           mlxsw_sp);
2314 err_rx_listener_register:
2315         for (i--; i >= 0; i--) {
2316                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2317                                     mlxsw_sp_rx_listener[i].trap_id);
2318                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2319
2320                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2321                                                   &mlxsw_sp_rx_listener[i],
2322                                                   mlxsw_sp);
2323         }
2324         return err;
2325 }
2326
2327 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2328 {
2329         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2330         int i;
2331
2332         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2333                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2334                                     mlxsw_sp_rx_listener[i].trap_id);
2335                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2336
2337                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2338                                                   &mlxsw_sp_rx_listener[i],
2339                                                   mlxsw_sp);
2340         }
2341 }
2342
2343 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2344                                  enum mlxsw_reg_sfgc_type type,
2345                                  enum mlxsw_reg_sfgc_bridge_type bridge_type)
2346 {
2347         enum mlxsw_flood_table_type table_type;
2348         enum mlxsw_sp_flood_table flood_table;
2349         char sfgc_pl[MLXSW_REG_SFGC_LEN];
2350
2351         if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2352                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2353         else
2354                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2355
2356         if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2357                 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2358         else
2359                 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2360
2361         mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2362                             flood_table);
2363         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2364 }
2365
2366 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2367 {
2368         int type, err;
2369
2370         for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2371                 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2372                         continue;
2373
2374                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2375                                             MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2376                 if (err)
2377                         return err;
2378
2379                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2380                                             MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2381                 if (err)
2382                         return err;
2383         }
2384
2385         return 0;
2386 }
2387
2388 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2389 {
2390         char slcr_pl[MLXSW_REG_SLCR_LEN];
2391
2392         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2393                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2394                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2395                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2396                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2397                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2398                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2399                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2400                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2401         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2402 }
2403
2404 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2405                          const struct mlxsw_bus_info *mlxsw_bus_info)
2406 {
2407         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2408         int err;
2409
2410         mlxsw_sp->core = mlxsw_core;
2411         mlxsw_sp->bus_info = mlxsw_bus_info;
2412         INIT_LIST_HEAD(&mlxsw_sp->fids);
2413         INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
2414         INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
2415         INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2416
2417         err = mlxsw_sp_base_mac_get(mlxsw_sp);
2418         if (err) {
2419                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2420                 return err;
2421         }
2422
2423         err = mlxsw_sp_ports_create(mlxsw_sp);
2424         if (err) {
2425                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2426                 return err;
2427         }
2428
2429         err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2430         if (err) {
2431                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2432                 goto err_event_register;
2433         }
2434
2435         err = mlxsw_sp_traps_init(mlxsw_sp);
2436         if (err) {
2437                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2438                 goto err_rx_listener_register;
2439         }
2440
2441         err = mlxsw_sp_flood_init(mlxsw_sp);
2442         if (err) {
2443                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2444                 goto err_flood_init;
2445         }
2446
2447         err = mlxsw_sp_buffers_init(mlxsw_sp);
2448         if (err) {
2449                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2450                 goto err_buffers_init;
2451         }
2452
2453         err = mlxsw_sp_lag_init(mlxsw_sp);
2454         if (err) {
2455                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2456                 goto err_lag_init;
2457         }
2458
2459         err = mlxsw_sp_switchdev_init(mlxsw_sp);
2460         if (err) {
2461                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2462                 goto err_switchdev_init;
2463         }
2464
2465         return 0;
2466
2467 err_switchdev_init:
2468 err_lag_init:
2469         mlxsw_sp_buffers_fini(mlxsw_sp);
2470 err_buffers_init:
2471 err_flood_init:
2472         mlxsw_sp_traps_fini(mlxsw_sp);
2473 err_rx_listener_register:
2474         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2475 err_event_register:
2476         mlxsw_sp_ports_remove(mlxsw_sp);
2477         return err;
2478 }
2479
2480 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2481 {
2482         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2483
2484         mlxsw_sp_switchdev_fini(mlxsw_sp);
2485         mlxsw_sp_buffers_fini(mlxsw_sp);
2486         mlxsw_sp_traps_fini(mlxsw_sp);
2487         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2488         mlxsw_sp_ports_remove(mlxsw_sp);
2489         WARN_ON(!list_empty(&mlxsw_sp->fids));
2490 }
2491
2492 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2493         .used_max_vepa_channels         = 1,
2494         .max_vepa_channels              = 0,
2495         .used_max_lag                   = 1,
2496         .max_lag                        = MLXSW_SP_LAG_MAX,
2497         .used_max_port_per_lag          = 1,
2498         .max_port_per_lag               = MLXSW_SP_PORT_PER_LAG_MAX,
2499         .used_max_mid                   = 1,
2500         .max_mid                        = MLXSW_SP_MID_MAX,
2501         .used_max_pgt                   = 1,
2502         .max_pgt                        = 0,
2503         .used_max_system_port           = 1,
2504         .max_system_port                = 64,
2505         .used_max_vlan_groups           = 1,
2506         .max_vlan_groups                = 127,
2507         .used_max_regions               = 1,
2508         .max_regions                    = 400,
2509         .used_flood_tables              = 1,
2510         .used_flood_mode                = 1,
2511         .flood_mode                     = 3,
2512         .max_fid_offset_flood_tables    = 2,
2513         .fid_offset_flood_table_size    = VLAN_N_VID - 1,
2514         .max_fid_flood_tables           = 2,
2515         .fid_flood_table_size           = MLXSW_SP_VFID_MAX,
2516         .used_max_ib_mc                 = 1,
2517         .max_ib_mc                      = 0,
2518         .used_max_pkey                  = 1,
2519         .max_pkey                       = 0,
2520         .swid_config                    = {
2521                 {
2522                         .used_type      = 1,
2523                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
2524                 }
2525         },
2526 };
2527
2528 static struct mlxsw_driver mlxsw_sp_driver = {
2529         .kind                           = MLXSW_DEVICE_KIND_SPECTRUM,
2530         .owner                          = THIS_MODULE,
2531         .priv_size                      = sizeof(struct mlxsw_sp),
2532         .init                           = mlxsw_sp_init,
2533         .fini                           = mlxsw_sp_fini,
2534         .port_split                     = mlxsw_sp_port_split,
2535         .port_unsplit                   = mlxsw_sp_port_unsplit,
2536         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
2537         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
2538         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
2539         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
2540         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
2541         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
2542         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
2543         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
2544         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
2545         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
2546         .txhdr_construct                = mlxsw_sp_txhdr_construct,
2547         .txhdr_len                      = MLXSW_TXHDR_LEN,
2548         .profile                        = &mlxsw_sp_config_profile,
2549 };
2550
2551 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
2552                                          u16 fid)
2553 {
2554         if (mlxsw_sp_fid_is_vfid(fid))
2555                 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
2556         else
2557                 return test_bit(fid, lag_port->active_vlans);
2558 }
2559
2560 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
2561                                            u16 fid)
2562 {
2563         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2564         u8 local_port = mlxsw_sp_port->local_port;
2565         u16 lag_id = mlxsw_sp_port->lag_id;
2566         int i, count = 0;
2567
2568         if (!mlxsw_sp_port->lagged)
2569                 return true;
2570
2571         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2572                 struct mlxsw_sp_port *lag_port;
2573
2574                 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
2575                 if (!lag_port || lag_port->local_port == local_port)
2576                         continue;
2577                 if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
2578                         count++;
2579         }
2580
2581         return !count;
2582 }
2583
2584 static int
2585 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2586                                     u16 fid)
2587 {
2588         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2589         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2590
2591         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2592         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2593         mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2594                                                 mlxsw_sp_port->local_port);
2595
2596         netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
2597                    mlxsw_sp_port->local_port, fid);
2598
2599         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2600 }
2601
2602 static int
2603 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2604                                       u16 fid)
2605 {
2606         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2607         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2608
2609         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
2610         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2611         mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2612
2613         netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
2614                    mlxsw_sp_port->lag_id, fid);
2615
2616         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2617 }
2618
2619 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
2620 {
2621         if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
2622                 return 0;
2623
2624         if (mlxsw_sp_port->lagged)
2625                 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
2626                                                              fid);
2627         else
2628                 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
2629 }
2630
2631 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2632 {
2633         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2634 }
2635
2636 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2637                                          struct net_device *br_dev)
2638 {
2639         return !mlxsw_sp->master_bridge.dev ||
2640                mlxsw_sp->master_bridge.dev == br_dev;
2641 }
2642
2643 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2644                                        struct net_device *br_dev)
2645 {
2646         mlxsw_sp->master_bridge.dev = br_dev;
2647         mlxsw_sp->master_bridge.ref_count++;
2648 }
2649
2650 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
2651 {
2652         if (--mlxsw_sp->master_bridge.ref_count == 0)
2653                 mlxsw_sp->master_bridge.dev = NULL;
2654 }
2655
2656 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2657                                      struct net_device *br_dev)
2658 {
2659         struct net_device *dev = mlxsw_sp_port->dev;
2660         int err;
2661
2662         /* When port is not bridged untagged packets are tagged with
2663          * PVID=VID=1, thereby creating an implicit VLAN interface in
2664          * the device. Remove it and let bridge code take care of its
2665          * own VLANs.
2666          */
2667         err = mlxsw_sp_port_kill_vid(dev, 0, 1);
2668         if (err)
2669                 return err;
2670
2671         mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
2672
2673         mlxsw_sp_port->learning = 1;
2674         mlxsw_sp_port->learning_sync = 1;
2675         mlxsw_sp_port->uc_flood = 1;
2676         mlxsw_sp_port->bridged = 1;
2677
2678         return 0;
2679 }
2680
2681 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
2682 {
2683         struct net_device *dev = mlxsw_sp_port->dev;
2684
2685         mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2686
2687         mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
2688
2689         mlxsw_sp_port->learning = 0;
2690         mlxsw_sp_port->learning_sync = 0;
2691         mlxsw_sp_port->uc_flood = 0;
2692         mlxsw_sp_port->bridged = 0;
2693
2694         /* Add implicit VLAN interface in the device, so that untagged
2695          * packets will be classified to the default vFID.
2696          */
2697         mlxsw_sp_port_add_vid(dev, 0, 1);
2698 }
2699
2700 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2701 {
2702         char sldr_pl[MLXSW_REG_SLDR_LEN];
2703
2704         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
2705         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2706 }
2707
2708 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2709 {
2710         char sldr_pl[MLXSW_REG_SLDR_LEN];
2711
2712         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
2713         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2714 }
2715
2716 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2717                                      u16 lag_id, u8 port_index)
2718 {
2719         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2720         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2721
2722         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
2723                                       lag_id, port_index);
2724         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2725 }
2726
2727 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2728                                         u16 lag_id)
2729 {
2730         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2731         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2732
2733         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
2734                                          lag_id);
2735         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2736 }
2737
2738 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
2739                                         u16 lag_id)
2740 {
2741         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2742         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2743
2744         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
2745                                         lag_id);
2746         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2747 }
2748
2749 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
2750                                          u16 lag_id)
2751 {
2752         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2753         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2754
2755         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
2756                                          lag_id);
2757         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2758 }
2759
2760 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2761                                   struct net_device *lag_dev,
2762                                   u16 *p_lag_id)
2763 {
2764         struct mlxsw_sp_upper *lag;
2765         int free_lag_id = -1;
2766         int i;
2767
2768         for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
2769                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
2770                 if (lag->ref_count) {
2771                         if (lag->dev == lag_dev) {
2772                                 *p_lag_id = i;
2773                                 return 0;
2774                         }
2775                 } else if (free_lag_id < 0) {
2776                         free_lag_id = i;
2777                 }
2778         }
2779         if (free_lag_id < 0)
2780                 return -EBUSY;
2781         *p_lag_id = free_lag_id;
2782         return 0;
2783 }
2784
2785 static bool
2786 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2787                           struct net_device *lag_dev,
2788                           struct netdev_lag_upper_info *lag_upper_info)
2789 {
2790         u16 lag_id;
2791
2792         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2793                 return false;
2794         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2795                 return false;
2796         return true;
2797 }
2798
2799 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2800                                        u16 lag_id, u8 *p_port_index)
2801 {
2802         int i;
2803
2804         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2805                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2806                         *p_port_index = i;
2807                         return 0;
2808                 }
2809         }
2810         return -EBUSY;
2811 }
2812
2813 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2814                                   struct net_device *lag_dev)
2815 {
2816         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2817         struct mlxsw_sp_upper *lag;
2818         u16 lag_id;
2819         u8 port_index;
2820         int err;
2821
2822         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2823         if (err)
2824                 return err;
2825         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2826         if (!lag->ref_count) {
2827                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2828                 if (err)
2829                         return err;
2830                 lag->dev = lag_dev;
2831         }
2832
2833         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2834         if (err)
2835                 return err;
2836         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2837         if (err)
2838                 goto err_col_port_add;
2839         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2840         if (err)
2841                 goto err_col_port_enable;
2842
2843         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2844                                    mlxsw_sp_port->local_port);
2845         mlxsw_sp_port->lag_id = lag_id;
2846         mlxsw_sp_port->lagged = 1;
2847         lag->ref_count++;
2848         return 0;
2849
2850 err_col_port_enable:
2851         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2852 err_col_port_add:
2853         if (!lag->ref_count)
2854                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2855         return err;
2856 }
2857
2858 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2859                                     struct net_device *lag_dev)
2860 {
2861         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2862         u16 lag_id = mlxsw_sp_port->lag_id;
2863         struct mlxsw_sp_upper *lag;
2864
2865         if (!mlxsw_sp_port->lagged)
2866                 return;
2867         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2868         WARN_ON(lag->ref_count == 0);
2869
2870         mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2871         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2872
2873         if (mlxsw_sp_port->bridged) {
2874                 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
2875                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
2876         }
2877
2878         if (lag->ref_count == 1)
2879                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2880
2881         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2882                                      mlxsw_sp_port->local_port);
2883         mlxsw_sp_port->lagged = 0;
2884         lag->ref_count--;
2885 }
2886
2887 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2888                                       u16 lag_id)
2889 {
2890         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2891         char sldr_pl[MLXSW_REG_SLDR_LEN];
2892
2893         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
2894                                          mlxsw_sp_port->local_port);
2895         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2896 }
2897
2898 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2899                                          u16 lag_id)
2900 {
2901         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2902         char sldr_pl[MLXSW_REG_SLDR_LEN];
2903
2904         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
2905                                             mlxsw_sp_port->local_port);
2906         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2907 }
2908
2909 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
2910                                        bool lag_tx_enabled)
2911 {
2912         if (lag_tx_enabled)
2913                 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
2914                                                   mlxsw_sp_port->lag_id);
2915         else
2916                 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
2917                                                      mlxsw_sp_port->lag_id);
2918 }
2919
2920 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2921                                      struct netdev_lag_lower_state_info *info)
2922 {
2923         return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2924 }
2925
2926 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2927                                    struct net_device *vlan_dev)
2928 {
2929         struct mlxsw_sp_port *mlxsw_sp_vport;
2930         u16 vid = vlan_dev_vlan_id(vlan_dev);
2931
2932         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2933         if (WARN_ON(!mlxsw_sp_vport))
2934                 return -EINVAL;
2935
2936         mlxsw_sp_vport->dev = vlan_dev;
2937
2938         return 0;
2939 }
2940
2941 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2942                                       struct net_device *vlan_dev)
2943 {
2944         struct mlxsw_sp_port *mlxsw_sp_vport;
2945         u16 vid = vlan_dev_vlan_id(vlan_dev);
2946
2947         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2948         if (WARN_ON(!mlxsw_sp_vport))
2949                 return;
2950
2951         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
2952 }
2953
2954 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
2955                                                unsigned long event, void *ptr)
2956 {
2957         struct netdev_notifier_changeupper_info *info;
2958         struct mlxsw_sp_port *mlxsw_sp_port;
2959         struct net_device *upper_dev;
2960         struct mlxsw_sp *mlxsw_sp;
2961         int err = 0;
2962
2963         mlxsw_sp_port = netdev_priv(dev);
2964         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2965         info = ptr;
2966
2967         switch (event) {
2968         case NETDEV_PRECHANGEUPPER:
2969                 upper_dev = info->upper_dev;
2970                 if (!is_vlan_dev(upper_dev) &&
2971                     !netif_is_lag_master(upper_dev) &&
2972                     !netif_is_bridge_master(upper_dev))
2973                         return -EINVAL;
2974                 if (!info->linking)
2975                         break;
2976                 /* HW limitation forbids to put ports to multiple bridges. */
2977                 if (netif_is_bridge_master(upper_dev) &&
2978                     !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
2979                         return -EINVAL;
2980                 if (netif_is_lag_master(upper_dev) &&
2981                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
2982                                                info->upper_info))
2983                         return -EINVAL;
2984                 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
2985                         return -EINVAL;
2986                 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
2987                     !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
2988                         return -EINVAL;
2989                 break;
2990         case NETDEV_CHANGEUPPER:
2991                 upper_dev = info->upper_dev;
2992                 if (is_vlan_dev(upper_dev)) {
2993                         if (info->linking)
2994                                 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
2995                                                               upper_dev);
2996                         else
2997                                  mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
2998                                                            upper_dev);
2999                 } else if (netif_is_bridge_master(upper_dev)) {
3000                         if (info->linking)
3001                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
3002                                                                 upper_dev);
3003                         else
3004                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3005                 } else if (netif_is_lag_master(upper_dev)) {
3006                         if (info->linking)
3007                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3008                                                              upper_dev);
3009                         else
3010                                 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3011                                                         upper_dev);
3012                 } else {
3013                         err = -EINVAL;
3014                         WARN_ON(1);
3015                 }
3016                 break;
3017         }
3018
3019         return err;
3020 }
3021
3022 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3023                                                unsigned long event, void *ptr)
3024 {
3025         struct netdev_notifier_changelowerstate_info *info;
3026         struct mlxsw_sp_port *mlxsw_sp_port;
3027         int err;
3028
3029         mlxsw_sp_port = netdev_priv(dev);
3030         info = ptr;
3031
3032         switch (event) {
3033         case NETDEV_CHANGELOWERSTATE:
3034                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
3035                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
3036                                                         info->lower_state_info);
3037                         if (err)
3038                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
3039                 }
3040                 break;
3041         }
3042
3043         return 0;
3044 }
3045
3046 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
3047                                          unsigned long event, void *ptr)
3048 {
3049         switch (event) {
3050         case NETDEV_PRECHANGEUPPER:
3051         case NETDEV_CHANGEUPPER:
3052                 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
3053         case NETDEV_CHANGELOWERSTATE:
3054                 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
3055         }
3056
3057         return 0;
3058 }
3059
3060 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3061                                         unsigned long event, void *ptr)
3062 {
3063         struct net_device *dev;
3064         struct list_head *iter;
3065         int ret;
3066
3067         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3068                 if (mlxsw_sp_port_dev_check(dev)) {
3069                         ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3070                         if (ret)
3071                                 return ret;
3072                 }
3073         }
3074
3075         return 0;
3076 }
3077
3078 static struct mlxsw_sp_fid *
3079 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
3080                       const struct net_device *br_dev)
3081 {
3082         struct mlxsw_sp_fid *f;
3083
3084         list_for_each_entry(f, &mlxsw_sp->br_vfids.list, list) {
3085                 if (f->dev == br_dev)
3086                         return f;
3087         }
3088
3089         return NULL;
3090 }
3091
3092 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
3093 {
3094         return vfid - MLXSW_SP_VFID_PORT_MAX;
3095 }
3096
3097 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
3098 {
3099         return MLXSW_SP_VFID_PORT_MAX + br_vfid;
3100 }
3101
3102 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
3103 {
3104         return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
3105                                    MLXSW_SP_VFID_BR_MAX);
3106 }
3107
3108 static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
3109
3110 static struct mlxsw_sp_fid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
3111                                                     struct net_device *br_dev)
3112 {
3113         struct device *dev = mlxsw_sp->bus_info->dev;
3114         struct mlxsw_sp_fid *f;
3115         u16 vfid, fid;
3116         int err;
3117
3118         vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
3119         if (vfid == MLXSW_SP_VFID_MAX) {
3120                 dev_err(dev, "No available vFIDs\n");
3121                 return ERR_PTR(-ERANGE);
3122         }
3123
3124         fid = mlxsw_sp_vfid_to_fid(vfid);
3125         err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
3126         if (err) {
3127                 dev_err(dev, "Failed to create FID=%d\n", fid);
3128                 return ERR_PTR(err);
3129         }
3130
3131         f = kzalloc(sizeof(*f), GFP_KERNEL);
3132         if (!f)
3133                 goto err_allocate_vfid;
3134
3135         f->leave = mlxsw_sp_vport_br_vfid_leave;
3136         f->fid = fid;
3137         f->dev = br_dev;
3138
3139         list_add(&f->list, &mlxsw_sp->br_vfids.list);
3140         set_bit(mlxsw_sp_vfid_to_br_vfid(vfid), mlxsw_sp->br_vfids.mapped);
3141
3142         return f;
3143
3144 err_allocate_vfid:
3145         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3146         return ERR_PTR(-ENOMEM);
3147 }
3148
3149 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
3150                                      struct mlxsw_sp_fid *f)
3151 {
3152         u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
3153         u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid);
3154
3155         clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
3156         list_del(&f->list);
3157
3158         mlxsw_sp_vfid_op(mlxsw_sp, f->fid, false);
3159
3160         kfree(f);
3161 }
3162
3163 static int mlxsw_sp_vport_br_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3164                                        struct net_device *br_dev)
3165 {
3166         struct mlxsw_sp_fid *f;
3167         int err;
3168
3169         f = mlxsw_sp_br_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
3170         if (!f) {
3171                 f = mlxsw_sp_br_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
3172                 if (IS_ERR(f))
3173                         return PTR_ERR(f);
3174         }
3175
3176         err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
3177         if (err)
3178                 goto err_vport_flood_set;
3179
3180         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
3181         if (err)
3182                 goto err_vport_fid_map;
3183
3184         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
3185         f->ref_count++;
3186
3187         netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
3188
3189         return 0;
3190
3191 err_vport_fid_map:
3192         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3193 err_vport_flood_set:
3194         if (!f->ref_count)
3195                 mlxsw_sp_br_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3196         return err;
3197 }
3198
3199 static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3200 {
3201         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3202
3203         netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
3204
3205         mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
3206
3207         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3208
3209         mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
3210
3211         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
3212         if (--f->ref_count == 0)
3213                 mlxsw_sp_br_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3214 }
3215
3216 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3217                                       struct net_device *br_dev)
3218 {
3219         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3220         struct net_device *dev = mlxsw_sp_vport->dev;
3221         int err;
3222
3223         mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
3224
3225         err = mlxsw_sp_vport_br_vfid_join(mlxsw_sp_vport, br_dev);
3226         if (err) {
3227                 netdev_err(dev, "Failed to join vFID\n");
3228                 goto err_vport_br_vfid_join;
3229         }
3230
3231         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3232         if (err) {
3233                 netdev_err(dev, "Failed to enable learning\n");
3234                 goto err_port_vid_learning_set;
3235         }
3236
3237         mlxsw_sp_vport->learning = 1;
3238         mlxsw_sp_vport->learning_sync = 1;
3239         mlxsw_sp_vport->uc_flood = 1;
3240         mlxsw_sp_vport->bridged = 1;
3241
3242         return 0;
3243
3244 err_port_vid_learning_set:
3245         mlxsw_sp_vport_br_vfid_leave(mlxsw_sp_vport);
3246 err_vport_br_vfid_join:
3247         mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
3248         return err;
3249 }
3250
3251 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3252 {
3253         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3254
3255         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3256
3257         mlxsw_sp_vport_br_vfid_leave(mlxsw_sp_vport);
3258
3259         mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
3260
3261         mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
3262                                     MLXSW_REG_SPMS_STATE_FORWARDING);
3263
3264         mlxsw_sp_vport->learning = 0;
3265         mlxsw_sp_vport->learning_sync = 0;
3266         mlxsw_sp_vport->uc_flood = 0;
3267         mlxsw_sp_vport->bridged = 0;
3268 }
3269
3270 static bool
3271 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3272                                   const struct net_device *br_dev)
3273 {
3274         struct mlxsw_sp_port *mlxsw_sp_vport;
3275
3276         list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3277                             vport.list) {
3278                 struct net_device *dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
3279
3280                 if (dev && dev == br_dev)
3281                         return false;
3282         }
3283
3284         return true;
3285 }
3286
3287 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3288                                           unsigned long event, void *ptr,
3289                                           u16 vid)
3290 {
3291         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3292         struct netdev_notifier_changeupper_info *info = ptr;
3293         struct mlxsw_sp_port *mlxsw_sp_vport;
3294         struct net_device *upper_dev;
3295         int err = 0;
3296
3297         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3298
3299         switch (event) {
3300         case NETDEV_PRECHANGEUPPER:
3301                 upper_dev = info->upper_dev;
3302                 if (!netif_is_bridge_master(upper_dev))
3303                         return -EINVAL;
3304                 if (!info->linking)
3305                         break;
3306                 /* We can't have multiple VLAN interfaces configured on
3307                  * the same port and being members in the same bridge.
3308                  */
3309                 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3310                                                        upper_dev))
3311                         return -EINVAL;
3312                 break;
3313         case NETDEV_CHANGEUPPER:
3314                 upper_dev = info->upper_dev;
3315                 if (info->linking) {
3316                         if (WARN_ON(!mlxsw_sp_vport))
3317                                 return -EINVAL;
3318                         err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3319                                                          upper_dev);
3320                 } else {
3321                         if (!mlxsw_sp_vport)
3322                                 return 0;
3323                         mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
3324                 }
3325         }
3326
3327         return err;
3328 }
3329
3330 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3331                                               unsigned long event, void *ptr,
3332                                               u16 vid)
3333 {
3334         struct net_device *dev;
3335         struct list_head *iter;
3336         int ret;
3337
3338         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3339                 if (mlxsw_sp_port_dev_check(dev)) {
3340                         ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3341                                                              vid);
3342                         if (ret)
3343                                 return ret;
3344                 }
3345         }
3346
3347         return 0;
3348 }
3349
3350 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
3351                                          unsigned long event, void *ptr)
3352 {
3353         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3354         u16 vid = vlan_dev_vlan_id(vlan_dev);
3355
3356         if (mlxsw_sp_port_dev_check(real_dev))
3357                 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
3358                                                       vid);
3359         else if (netif_is_lag_master(real_dev))
3360                 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3361                                                           vid);
3362
3363         return 0;
3364 }
3365
3366 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3367                                     unsigned long event, void *ptr)
3368 {
3369         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3370         int err = 0;
3371
3372         if (mlxsw_sp_port_dev_check(dev))
3373                 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3374         else if (netif_is_lag_master(dev))
3375                 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3376         else if (is_vlan_dev(dev))
3377                 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3378
3379         return notifier_from_errno(err);
3380 }
3381
3382 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3383         .notifier_call = mlxsw_sp_netdevice_event,
3384 };
3385
3386 static int __init mlxsw_sp_module_init(void)
3387 {
3388         int err;
3389
3390         register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3391         err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3392         if (err)
3393                 goto err_core_driver_register;
3394         return 0;
3395
3396 err_core_driver_register:
3397         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3398         return err;
3399 }
3400
3401 static void __exit mlxsw_sp_module_exit(void)
3402 {
3403         mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3404         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3405 }
3406
3407 module_init(mlxsw_sp_module_init);
3408 module_exit(mlxsw_sp_module_exit);
3409
3410 MODULE_LICENSE("Dual BSD/GPL");
3411 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3412 MODULE_DESCRIPTION("Mellanox Spectrum driver");
3413 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);