Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_switchdev.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <linux/rtnetlink.h>
49 #include <net/switchdev.h>
50
51 #include "spectrum.h"
52 #include "core.h"
53 #include "reg.h"
54
55 static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
56                                         u16 vid)
57 {
58         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
59         u16 fid = vid;
60
61         fid = f ? f->fid : fid;
62
63         if (!fid)
64                 fid = mlxsw_sp_port->pvid;
65
66         return fid;
67 }
68
69 static struct mlxsw_sp_port *
70 mlxsw_sp_port_orig_get(struct net_device *dev,
71                        struct mlxsw_sp_port *mlxsw_sp_port)
72 {
73         struct mlxsw_sp_port *mlxsw_sp_vport;
74         u16 vid;
75
76         if (!is_vlan_dev(dev))
77                 return mlxsw_sp_port;
78
79         vid = vlan_dev_vlan_id(dev);
80         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
81         WARN_ON(!mlxsw_sp_vport);
82
83         return mlxsw_sp_vport;
84 }
85
86 static int mlxsw_sp_port_attr_get(struct net_device *dev,
87                                   struct switchdev_attr *attr)
88 {
89         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
90         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
91
92         mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
93         if (!mlxsw_sp_port)
94                 return -EINVAL;
95
96         switch (attr->id) {
97         case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
98                 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
99                 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
100                        attr->u.ppid.id_len);
101                 break;
102         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
103                 attr->u.brport_flags =
104                         (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
105                         (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
106                         (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
107                 break;
108         default:
109                 return -EOPNOTSUPP;
110         }
111
112         return 0;
113 }
114
115 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
116                                        u8 state)
117 {
118         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
119         enum mlxsw_reg_spms_state spms_state;
120         char *spms_pl;
121         u16 vid;
122         int err;
123
124         switch (state) {
125         case BR_STATE_FORWARDING:
126                 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
127                 break;
128         case BR_STATE_LEARNING:
129                 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
130                 break;
131         case BR_STATE_LISTENING: /* fall-through */
132         case BR_STATE_DISABLED: /* fall-through */
133         case BR_STATE_BLOCKING:
134                 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
135                 break;
136         default:
137                 BUG();
138         }
139
140         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
141         if (!spms_pl)
142                 return -ENOMEM;
143         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
144
145         if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
146                 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
147                 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
148         } else {
149                 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
150                         mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
151         }
152
153         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
154         kfree(spms_pl);
155         return err;
156 }
157
158 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
159                                             struct switchdev_trans *trans,
160                                             u8 state)
161 {
162         if (switchdev_trans_ph_prepare(trans))
163                 return 0;
164
165         mlxsw_sp_port->stp_state = state;
166         return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
167 }
168
169 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
170                                      u16 idx_begin, u16 idx_end, bool uc_set,
171                                      bool bm_set)
172 {
173         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
174         u16 local_port = mlxsw_sp_port->local_port;
175         enum mlxsw_flood_table_type table_type;
176         u16 range = idx_end - idx_begin + 1;
177         char *sftr_pl;
178         int err;
179
180         if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
181                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
182         else
183                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
184
185         sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
186         if (!sftr_pl)
187                 return -ENOMEM;
188
189         mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
190                             table_type, range, local_port, uc_set);
191         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
192         if (err)
193                 goto buffer_out;
194
195         mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
196                             table_type, range, local_port, bm_set);
197         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
198         if (err)
199                 goto err_flood_bm_set;
200
201         goto buffer_out;
202
203 err_flood_bm_set:
204         mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
205                             table_type, range, local_port, !uc_set);
206         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
207 buffer_out:
208         kfree(sftr_pl);
209         return err;
210 }
211
212 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
213                                       bool set)
214 {
215         struct net_device *dev = mlxsw_sp_port->dev;
216         u16 vid, last_visited_vid;
217         int err;
218
219         if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
220                 u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid;
221                 u16 vfid = mlxsw_sp_fid_to_vfid(fid);
222
223                 return  __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid,
224                                                   set, true);
225         }
226
227         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
228                 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
229                                                 true);
230                 if (err) {
231                         last_visited_vid = vid;
232                         goto err_port_flood_set;
233                 }
234         }
235
236         return 0;
237
238 err_port_flood_set:
239         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
240                 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
241         netdev_err(dev, "Failed to configure unicast flooding\n");
242         return err;
243 }
244
245 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
246                              bool set)
247 {
248         u16 vfid;
249
250         /* In case of vFIDs, index into the flooding table is relative to
251          * the start of the vFIDs range.
252          */
253         vfid = mlxsw_sp_fid_to_vfid(fid);
254         return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set);
255 }
256
257 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
258                                            struct switchdev_trans *trans,
259                                            unsigned long brport_flags)
260 {
261         unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
262         bool set;
263         int err;
264
265         if (!mlxsw_sp_port->bridged)
266                 return -EINVAL;
267
268         if (switchdev_trans_ph_prepare(trans))
269                 return 0;
270
271         if ((uc_flood ^ brport_flags) & BR_FLOOD) {
272                 set = mlxsw_sp_port->uc_flood ? false : true;
273                 err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
274                 if (err)
275                         return err;
276         }
277
278         mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
279         mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
280         mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
281
282         return 0;
283 }
284
285 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
286 {
287         char sfdat_pl[MLXSW_REG_SFDAT_LEN];
288         int err;
289
290         mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
291         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
292         if (err)
293                 return err;
294         mlxsw_sp->ageing_time = ageing_time;
295         return 0;
296 }
297
298 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
299                                             struct switchdev_trans *trans,
300                                             unsigned long ageing_clock_t)
301 {
302         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
303         unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
304         u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
305
306         if (switchdev_trans_ph_prepare(trans)) {
307                 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
308                     ageing_time > MLXSW_SP_MAX_AGEING_TIME)
309                         return -ERANGE;
310                 else
311                         return 0;
312         }
313
314         return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
315 }
316
317 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
318                                           struct switchdev_trans *trans,
319                                           struct net_device *orig_dev,
320                                           bool vlan_enabled)
321 {
322         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
323
324         /* SWITCHDEV_TRANS_PREPARE phase */
325         if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) {
326                 netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n");
327                 return -EINVAL;
328         }
329
330         return 0;
331 }
332
333 static int mlxsw_sp_port_attr_set(struct net_device *dev,
334                                   const struct switchdev_attr *attr,
335                                   struct switchdev_trans *trans)
336 {
337         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
338         int err = 0;
339
340         mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
341         if (!mlxsw_sp_port)
342                 return -EINVAL;
343
344         switch (attr->id) {
345         case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
346                 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
347                                                        attr->u.stp_state);
348                 break;
349         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
350                 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
351                                                       attr->u.brport_flags);
352                 break;
353         case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
354                 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
355                                                        attr->u.ageing_time);
356                 break;
357         case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
358                 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
359                                                      attr->orig_dev,
360                                                      attr->u.vlan_filtering);
361                 break;
362         default:
363                 err = -EOPNOTSUPP;
364                 break;
365         }
366
367         return err;
368 }
369
370 static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
371 {
372         char sfmr_pl[MLXSW_REG_SFMR_LEN];
373
374         mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid);
375         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
376 }
377
378 static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid)
379 {
380         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
381         char svfa_pl[MLXSW_REG_SVFA_LEN];
382
383         mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid);
384         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
385 }
386
387 static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
388 {
389         struct mlxsw_sp_fid *f;
390
391         f = kzalloc(sizeof(*f), GFP_KERNEL);
392         if (!f)
393                 return NULL;
394
395         f->fid = fid;
396
397         return f;
398 }
399
400 struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
401 {
402         struct mlxsw_sp_fid *f;
403         int err;
404
405         err = mlxsw_sp_fid_op(mlxsw_sp, fid, true);
406         if (err)
407                 return ERR_PTR(err);
408
409         /* Although all the ports member in the FID might be using a
410          * {Port, VID} to FID mapping, we create a global VID-to-FID
411          * mapping. This allows a port to transition to VLAN mode,
412          * knowing the global mapping exists.
413          */
414         err = mlxsw_sp_fid_map(mlxsw_sp, fid, true);
415         if (err)
416                 goto err_fid_map;
417
418         f = mlxsw_sp_fid_alloc(fid);
419         if (!f) {
420                 err = -ENOMEM;
421                 goto err_allocate_fid;
422         }
423
424         list_add(&f->list, &mlxsw_sp->fids);
425
426         return f;
427
428 err_allocate_fid:
429         mlxsw_sp_fid_map(mlxsw_sp, fid, false);
430 err_fid_map:
431         mlxsw_sp_fid_op(mlxsw_sp, fid, false);
432         return ERR_PTR(err);
433 }
434
435 void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
436 {
437         u16 fid = f->fid;
438
439         list_del(&f->list);
440
441         if (f->r)
442                 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
443
444         kfree(f);
445
446         mlxsw_sp_fid_map(mlxsw_sp, fid, false);
447
448         mlxsw_sp_fid_op(mlxsw_sp, fid, false);
449 }
450
451 static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
452                                     u16 fid)
453 {
454         struct mlxsw_sp_fid *f;
455
456         if (test_bit(fid, mlxsw_sp_port->active_vlans))
457                 return 0;
458
459         f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
460         if (!f) {
461                 f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
462                 if (IS_ERR(f))
463                         return PTR_ERR(f);
464         }
465
466         f->ref_count++;
467
468         netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid);
469
470         return 0;
471 }
472
473 static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
474                                       u16 fid)
475 {
476         struct mlxsw_sp_fid *f;
477
478         f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
479         if (WARN_ON(!f))
480                 return;
481
482         netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid);
483
484         mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid);
485
486         if (--f->ref_count == 0)
487                 mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f);
488 }
489
490 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
491                                  bool valid)
492 {
493         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
494
495         /* If port doesn't have vPorts, then it can use the global
496          * VID-to-FID mapping.
497          */
498         if (list_empty(&mlxsw_sp_port->vports_list))
499                 return 0;
500
501         return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid);
502 }
503
504 static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
505                                   u16 fid_begin, u16 fid_end)
506 {
507         int fid, err;
508
509         for (fid = fid_begin; fid <= fid_end; fid++) {
510                 err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid);
511                 if (err)
512                         goto err_port_fid_join;
513         }
514
515         err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
516                                         mlxsw_sp_port->uc_flood, true);
517         if (err)
518                 goto err_port_flood_set;
519
520         for (fid = fid_begin; fid <= fid_end; fid++) {
521                 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true);
522                 if (err)
523                         goto err_port_fid_map;
524         }
525
526         return 0;
527
528 err_port_fid_map:
529         for (fid--; fid >= fid_begin; fid--)
530                 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
531         __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
532                                   false);
533 err_port_flood_set:
534         fid = fid_end;
535 err_port_fid_join:
536         for (fid--; fid >= fid_begin; fid--)
537                 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
538         return err;
539 }
540
541 static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
542                                     u16 fid_begin, u16 fid_end)
543 {
544         int fid;
545
546         for (fid = fid_begin; fid <= fid_end; fid++)
547                 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
548
549         __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
550                                   false);
551
552         for (fid = fid_begin; fid <= fid_end; fid++)
553                 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
554 }
555
556 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
557                                     u16 vid)
558 {
559         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
560         char spvid_pl[MLXSW_REG_SPVID_LEN];
561
562         mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
563         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
564 }
565
566 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
567                                             bool allow)
568 {
569         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
570         char spaft_pl[MLXSW_REG_SPAFT_LEN];
571
572         mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
573         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
574 }
575
576 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
577 {
578         struct net_device *dev = mlxsw_sp_port->dev;
579         int err;
580
581         if (!vid) {
582                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
583                 if (err) {
584                         netdev_err(dev, "Failed to disallow untagged traffic\n");
585                         return err;
586                 }
587         } else {
588                 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
589                 if (err) {
590                         netdev_err(dev, "Failed to set PVID\n");
591                         return err;
592                 }
593
594                 /* Only allow if not already allowed. */
595                 if (!mlxsw_sp_port->pvid) {
596                         err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
597                                                                true);
598                         if (err) {
599                                 netdev_err(dev, "Failed to allow untagged traffic\n");
600                                 goto err_port_allow_untagged_set;
601                         }
602                 }
603         }
604
605         mlxsw_sp_port->pvid = vid;
606         return 0;
607
608 err_port_allow_untagged_set:
609         __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
610         return err;
611 }
612
613 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
614                                      u16 vid_begin, u16 vid_end, bool is_member,
615                                      bool untagged)
616 {
617         u16 vid, vid_e;
618         int err;
619
620         for (vid = vid_begin; vid <= vid_end;
621              vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
622                 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
623                             vid_end);
624
625                 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
626                                              is_member, untagged);
627                 if (err)
628                         return err;
629         }
630
631         return 0;
632 }
633
634 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
635                                      u16 vid_begin, u16 vid_end,
636                                      bool flag_untagged, bool flag_pvid)
637 {
638         struct net_device *dev = mlxsw_sp_port->dev;
639         u16 vid, old_pvid;
640         int err;
641
642         if (!mlxsw_sp_port->bridged)
643                 return -EINVAL;
644
645         err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end);
646         if (err) {
647                 netdev_err(dev, "Failed to join FIDs\n");
648                 return err;
649         }
650
651         err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
652                                         true, flag_untagged);
653         if (err) {
654                 netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
655                            vid_end);
656                 goto err_port_vlans_set;
657         }
658
659         old_pvid = mlxsw_sp_port->pvid;
660         if (flag_pvid && old_pvid != vid_begin) {
661                 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin);
662                 if (err) {
663                         netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
664                         goto err_port_pvid_set;
665                 }
666         } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
667                 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
668                 if (err) {
669                         netdev_err(dev, "Unable to del PVID\n");
670                         goto err_port_pvid_set;
671                 }
672         }
673
674         /* Changing activity bits only if HW operation succeded */
675         for (vid = vid_begin; vid <= vid_end; vid++) {
676                 set_bit(vid, mlxsw_sp_port->active_vlans);
677                 if (flag_untagged)
678                         set_bit(vid, mlxsw_sp_port->untagged_vlans);
679                 else
680                         clear_bit(vid, mlxsw_sp_port->untagged_vlans);
681         }
682
683         /* STP state change must be done after we set active VLANs */
684         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
685                                           mlxsw_sp_port->stp_state);
686         if (err) {
687                 netdev_err(dev, "Failed to set STP state\n");
688                 goto err_port_stp_state_set;
689         }
690
691         return 0;
692
693 err_port_stp_state_set:
694         for (vid = vid_begin; vid <= vid_end; vid++)
695                 clear_bit(vid, mlxsw_sp_port->active_vlans);
696         if (old_pvid != mlxsw_sp_port->pvid)
697                 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
698 err_port_pvid_set:
699         __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
700                                   false);
701 err_port_vlans_set:
702         mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
703         return err;
704 }
705
706 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
707                                    const struct switchdev_obj_port_vlan *vlan,
708                                    struct switchdev_trans *trans)
709 {
710         bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
711         bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
712
713         if (switchdev_trans_ph_prepare(trans))
714                 return 0;
715
716         return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
717                                          vlan->vid_begin, vlan->vid_end,
718                                          flag_untagged, flag_pvid);
719 }
720
721 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
722 {
723         return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
724                          MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
725 }
726
727 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
728 {
729         return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
730                         MLXSW_REG_SFD_OP_WRITE_REMOVE;
731 }
732
733 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
734                                      const char *mac, u16 fid, bool adding,
735                                      enum mlxsw_reg_sfd_rec_action action,
736                                      bool dynamic)
737 {
738         char *sfd_pl;
739         int err;
740
741         sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
742         if (!sfd_pl)
743                 return -ENOMEM;
744
745         mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
746         mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
747                               mac, fid, action, local_port);
748         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
749         kfree(sfd_pl);
750
751         return err;
752 }
753
754 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
755                                    const char *mac, u16 fid, bool adding,
756                                    bool dynamic)
757 {
758         return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
759                                          MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
760 }
761
762 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
763                         bool adding)
764 {
765         return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
766                                          MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
767                                          false);
768 }
769
770 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
771                                        const char *mac, u16 fid, u16 lag_vid,
772                                        bool adding, bool dynamic)
773 {
774         char *sfd_pl;
775         int err;
776
777         sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
778         if (!sfd_pl)
779                 return -ENOMEM;
780
781         mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
782         mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
783                                   mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
784                                   lag_vid, lag_id);
785         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
786         kfree(sfd_pl);
787
788         return err;
789 }
790
791 static int
792 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
793                              const struct switchdev_obj_port_fdb *fdb,
794                              struct switchdev_trans *trans)
795 {
796         u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
797         u16 lag_vid = 0;
798
799         if (switchdev_trans_ph_prepare(trans))
800                 return 0;
801
802         if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
803                 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
804         }
805
806         if (!mlxsw_sp_port->lagged)
807                 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
808                                                mlxsw_sp_port->local_port,
809                                                fdb->addr, fid, true, false);
810         else
811                 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
812                                                    mlxsw_sp_port->lag_id,
813                                                    fdb->addr, fid, lag_vid,
814                                                    true, false);
815 }
816
817 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
818                                 u16 fid, u16 mid, bool adding)
819 {
820         char *sfd_pl;
821         int err;
822
823         sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
824         if (!sfd_pl)
825                 return -ENOMEM;
826
827         mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
828         mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
829                               MLXSW_REG_SFD_REC_ACTION_NOP, mid);
830         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
831         kfree(sfd_pl);
832         return err;
833 }
834
835 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
836                                   bool add, bool clear_all_ports)
837 {
838         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
839         char *smid_pl;
840         int err, i;
841
842         smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
843         if (!smid_pl)
844                 return -ENOMEM;
845
846         mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add);
847         if (clear_all_ports) {
848                 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
849                         if (mlxsw_sp->ports[i])
850                                 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
851         }
852         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
853         kfree(smid_pl);
854         return err;
855 }
856
857 static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
858                                               const unsigned char *addr,
859                                               u16 vid)
860 {
861         struct mlxsw_sp_mid *mid;
862
863         list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
864                 if (ether_addr_equal(mid->addr, addr) && mid->vid == vid)
865                         return mid;
866         }
867         return NULL;
868 }
869
870 static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
871                                                 const unsigned char *addr,
872                                                 u16 vid)
873 {
874         struct mlxsw_sp_mid *mid;
875         u16 mid_idx;
876
877         mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped,
878                                       MLXSW_SP_MID_MAX);
879         if (mid_idx == MLXSW_SP_MID_MAX)
880                 return NULL;
881
882         mid = kzalloc(sizeof(*mid), GFP_KERNEL);
883         if (!mid)
884                 return NULL;
885
886         set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
887         ether_addr_copy(mid->addr, addr);
888         mid->vid = vid;
889         mid->mid = mid_idx;
890         mid->ref_count = 0;
891         list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
892
893         return mid;
894 }
895
896 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp,
897                                  struct mlxsw_sp_mid *mid)
898 {
899         if (--mid->ref_count == 0) {
900                 list_del(&mid->list);
901                 clear_bit(mid->mid, mlxsw_sp->br_mids.mapped);
902                 kfree(mid);
903                 return 1;
904         }
905         return 0;
906 }
907
908 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
909                                  const struct switchdev_obj_port_mdb *mdb,
910                                  struct switchdev_trans *trans)
911 {
912         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
913         struct net_device *dev = mlxsw_sp_port->dev;
914         struct mlxsw_sp_mid *mid;
915         u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
916         int err = 0;
917
918         if (switchdev_trans_ph_prepare(trans))
919                 return 0;
920
921         mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
922         if (!mid) {
923                 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid);
924                 if (!mid) {
925                         netdev_err(dev, "Unable to allocate MC group\n");
926                         return -ENOMEM;
927                 }
928         }
929         mid->ref_count++;
930
931         err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true,
932                                      mid->ref_count == 1);
933         if (err) {
934                 netdev_err(dev, "Unable to set SMID\n");
935                 goto err_out;
936         }
937
938         if (mid->ref_count == 1) {
939                 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid,
940                                            true);
941                 if (err) {
942                         netdev_err(dev, "Unable to set MC SFD\n");
943                         goto err_out;
944                 }
945         }
946
947         return 0;
948
949 err_out:
950         __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid);
951         return err;
952 }
953
954 static int mlxsw_sp_port_obj_add(struct net_device *dev,
955                                  const struct switchdev_obj *obj,
956                                  struct switchdev_trans *trans)
957 {
958         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
959         int err = 0;
960
961         mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
962         if (!mlxsw_sp_port)
963                 return -EINVAL;
964
965         switch (obj->id) {
966         case SWITCHDEV_OBJ_ID_PORT_VLAN:
967                 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
968                         return 0;
969
970                 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
971                                               SWITCHDEV_OBJ_PORT_VLAN(obj),
972                                               trans);
973                 break;
974         case SWITCHDEV_OBJ_ID_IPV4_FIB:
975                 err = mlxsw_sp_router_fib4_add(mlxsw_sp_port,
976                                                SWITCHDEV_OBJ_IPV4_FIB(obj),
977                                                trans);
978                 break;
979         case SWITCHDEV_OBJ_ID_PORT_FDB:
980                 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
981                                                    SWITCHDEV_OBJ_PORT_FDB(obj),
982                                                    trans);
983                 break;
984         case SWITCHDEV_OBJ_ID_PORT_MDB:
985                 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
986                                             SWITCHDEV_OBJ_PORT_MDB(obj),
987                                             trans);
988                 break;
989         default:
990                 err = -EOPNOTSUPP;
991                 break;
992         }
993
994         return err;
995 }
996
997 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
998                                      u16 vid_begin, u16 vid_end)
999 {
1000         struct net_device *dev = mlxsw_sp_port->dev;
1001         u16 vid, pvid;
1002         int err;
1003
1004         if (!mlxsw_sp_port->bridged)
1005                 return -EINVAL;
1006
1007         err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
1008                                         false, false);
1009         if (err) {
1010                 netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin,
1011                            vid_end);
1012                 return err;
1013         }
1014
1015         pvid = mlxsw_sp_port->pvid;
1016         if (pvid >= vid_begin && pvid <= vid_end) {
1017                 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
1018                 if (err) {
1019                         netdev_err(dev, "Unable to del PVID %d\n", pvid);
1020                         return err;
1021                 }
1022         }
1023
1024         mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
1025
1026         /* Changing activity bits only if HW operation succeded */
1027         for (vid = vid_begin; vid <= vid_end; vid++)
1028                 clear_bit(vid, mlxsw_sp_port->active_vlans);
1029
1030         return 0;
1031 }
1032
1033 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1034                                    const struct switchdev_obj_port_vlan *vlan)
1035 {
1036         return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
1037                                          vlan->vid_end);
1038 }
1039
1040 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
1041 {
1042         u16 vid;
1043
1044         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
1045                 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
1046 }
1047
1048 static int
1049 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
1050                              const struct switchdev_obj_port_fdb *fdb)
1051 {
1052         u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
1053         u16 lag_vid = 0;
1054
1055         if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1056                 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1057         }
1058
1059         if (!mlxsw_sp_port->lagged)
1060                 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
1061                                                mlxsw_sp_port->local_port,
1062                                                fdb->addr, fid,
1063                                                false, false);
1064         else
1065                 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
1066                                                    mlxsw_sp_port->lag_id,
1067                                                    fdb->addr, fid, lag_vid,
1068                                                    false, false);
1069 }
1070
1071 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1072                                  const struct switchdev_obj_port_mdb *mdb)
1073 {
1074         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1075         struct net_device *dev = mlxsw_sp_port->dev;
1076         struct mlxsw_sp_mid *mid;
1077         u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
1078         u16 mid_idx;
1079         int err = 0;
1080
1081         mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
1082         if (!mid) {
1083                 netdev_err(dev, "Unable to remove port from MC DB\n");
1084                 return -EINVAL;
1085         }
1086
1087         err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false);
1088         if (err)
1089                 netdev_err(dev, "Unable to remove port from SMID\n");
1090
1091         mid_idx = mid->mid;
1092         if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) {
1093                 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx,
1094                                            false);
1095                 if (err)
1096                         netdev_err(dev, "Unable to remove MC SFD\n");
1097         }
1098
1099         return err;
1100 }
1101
1102 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1103                                  const struct switchdev_obj *obj)
1104 {
1105         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1106         int err = 0;
1107
1108         mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1109         if (!mlxsw_sp_port)
1110                 return -EINVAL;
1111
1112         switch (obj->id) {
1113         case SWITCHDEV_OBJ_ID_PORT_VLAN:
1114                 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
1115                         return 0;
1116
1117                 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1118                                               SWITCHDEV_OBJ_PORT_VLAN(obj));
1119                 break;
1120         case SWITCHDEV_OBJ_ID_IPV4_FIB:
1121                 err = mlxsw_sp_router_fib4_del(mlxsw_sp_port,
1122                                                SWITCHDEV_OBJ_IPV4_FIB(obj));
1123                 break;
1124         case SWITCHDEV_OBJ_ID_PORT_FDB:
1125                 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
1126                                                    SWITCHDEV_OBJ_PORT_FDB(obj));
1127                 break;
1128         case SWITCHDEV_OBJ_ID_PORT_MDB:
1129                 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1130                                             SWITCHDEV_OBJ_PORT_MDB(obj));
1131                 break;
1132         default:
1133                 err = -EOPNOTSUPP;
1134                 break;
1135         }
1136
1137         return err;
1138 }
1139
1140 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1141                                                    u16 lag_id)
1142 {
1143         struct mlxsw_sp_port *mlxsw_sp_port;
1144         int i;
1145
1146         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
1147                 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1148                 if (mlxsw_sp_port)
1149                         return mlxsw_sp_port;
1150         }
1151         return NULL;
1152 }
1153
1154 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1155                                   struct switchdev_obj_port_fdb *fdb,
1156                                   switchdev_obj_dump_cb_t *cb,
1157                                   struct net_device *orig_dev)
1158 {
1159         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1160         struct mlxsw_sp_port *tmp;
1161         struct mlxsw_sp_fid *f;
1162         u16 vport_fid;
1163         char *sfd_pl;
1164         char mac[ETH_ALEN];
1165         u16 fid;
1166         u8 local_port;
1167         u16 lag_id;
1168         u8 num_rec;
1169         int stored_err = 0;
1170         int i;
1171         int err;
1172
1173         sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1174         if (!sfd_pl)
1175                 return -ENOMEM;
1176
1177         f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
1178         vport_fid = f ? f->fid : 0;
1179
1180         mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
1181         do {
1182                 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
1183                 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1184                 if (err)
1185                         goto out;
1186
1187                 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1188
1189                 /* Even in case of error, we have to run the dump to the end
1190                  * so the session in firmware is finished.
1191                  */
1192                 if (stored_err)
1193                         continue;
1194
1195                 for (i = 0; i < num_rec; i++) {
1196                         switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
1197                         case MLXSW_REG_SFD_REC_TYPE_UNICAST:
1198                                 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
1199                                                         &local_port);
1200                                 if (local_port == mlxsw_sp_port->local_port) {
1201                                         if (vport_fid && vport_fid == fid)
1202                                                 fdb->vid = 0;
1203                                         else if (!vport_fid &&
1204                                                  !mlxsw_sp_fid_is_vfid(fid))
1205                                                 fdb->vid = fid;
1206                                         else
1207                                                 continue;
1208                                         ether_addr_copy(fdb->addr, mac);
1209                                         fdb->ndm_state = NUD_REACHABLE;
1210                                         err = cb(&fdb->obj);
1211                                         if (err)
1212                                                 stored_err = err;
1213                                 }
1214                                 break;
1215                         case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
1216                                 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
1217                                                             mac, &fid, &lag_id);
1218                                 tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1219                                 if (tmp && tmp->local_port ==
1220                                     mlxsw_sp_port->local_port) {
1221                                         /* LAG records can only point to LAG
1222                                          * devices or VLAN devices on top.
1223                                          */
1224                                         if (!netif_is_lag_master(orig_dev) &&
1225                                             !is_vlan_dev(orig_dev))
1226                                                 continue;
1227                                         if (vport_fid && vport_fid == fid)
1228                                                 fdb->vid = 0;
1229                                         else if (!vport_fid &&
1230                                                  !mlxsw_sp_fid_is_vfid(fid))
1231                                                 fdb->vid = fid;
1232                                         else
1233                                                 continue;
1234                                         ether_addr_copy(fdb->addr, mac);
1235                                         fdb->ndm_state = NUD_REACHABLE;
1236                                         err = cb(&fdb->obj);
1237                                         if (err)
1238                                                 stored_err = err;
1239                                 }
1240                                 break;
1241                         }
1242                 }
1243         } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
1244
1245 out:
1246         kfree(sfd_pl);
1247         return stored_err ? stored_err : err;
1248 }
1249
1250 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1251                                    struct switchdev_obj_port_vlan *vlan,
1252                                    switchdev_obj_dump_cb_t *cb)
1253 {
1254         u16 vid;
1255         int err = 0;
1256
1257         if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1258                 vlan->flags = 0;
1259                 vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1260                 vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1261                 return cb(&vlan->obj);
1262         }
1263
1264         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1265                 vlan->flags = 0;
1266                 if (vid == mlxsw_sp_port->pvid)
1267                         vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1268                 if (test_bit(vid, mlxsw_sp_port->untagged_vlans))
1269                         vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1270                 vlan->vid_begin = vid;
1271                 vlan->vid_end = vid;
1272                 err = cb(&vlan->obj);
1273                 if (err)
1274                         break;
1275         }
1276         return err;
1277 }
1278
1279 static int mlxsw_sp_port_obj_dump(struct net_device *dev,
1280                                   struct switchdev_obj *obj,
1281                                   switchdev_obj_dump_cb_t *cb)
1282 {
1283         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1284         int err = 0;
1285
1286         mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1287         if (!mlxsw_sp_port)
1288                 return -EINVAL;
1289
1290         switch (obj->id) {
1291         case SWITCHDEV_OBJ_ID_PORT_VLAN:
1292                 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
1293                                               SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
1294                 break;
1295         case SWITCHDEV_OBJ_ID_PORT_FDB:
1296                 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
1297                                              SWITCHDEV_OBJ_PORT_FDB(obj), cb,
1298                                              obj->orig_dev);
1299                 break;
1300         default:
1301                 err = -EOPNOTSUPP;
1302                 break;
1303         }
1304
1305         return err;
1306 }
1307
1308 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1309         .switchdev_port_attr_get        = mlxsw_sp_port_attr_get,
1310         .switchdev_port_attr_set        = mlxsw_sp_port_attr_set,
1311         .switchdev_port_obj_add         = mlxsw_sp_port_obj_add,
1312         .switchdev_port_obj_del         = mlxsw_sp_port_obj_del,
1313         .switchdev_port_obj_dump        = mlxsw_sp_port_obj_dump,
1314 };
1315
1316 static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
1317                                         char *mac, u16 vid,
1318                                         struct net_device *dev)
1319 {
1320         struct switchdev_notifier_fdb_info info;
1321         unsigned long notifier_type;
1322
1323         if (learning_sync) {
1324                 info.addr = mac;
1325                 info.vid = vid;
1326                 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
1327                 call_switchdev_notifiers(notifier_type, dev, &info.info);
1328         }
1329 }
1330
1331 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1332                                             char *sfn_pl, int rec_index,
1333                                             bool adding)
1334 {
1335         struct mlxsw_sp_port *mlxsw_sp_port;
1336         char mac[ETH_ALEN];
1337         u8 local_port;
1338         u16 vid, fid;
1339         bool do_notification = true;
1340         int err;
1341
1342         mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
1343         mlxsw_sp_port = mlxsw_sp->ports[local_port];
1344         if (!mlxsw_sp_port) {
1345                 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
1346                 goto just_remove;
1347         }
1348
1349         if (mlxsw_sp_fid_is_vfid(fid)) {
1350                 struct mlxsw_sp_port *mlxsw_sp_vport;
1351
1352                 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1353                                                                  fid);
1354                 if (!mlxsw_sp_vport) {
1355                         netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1356                         goto just_remove;
1357                 }
1358                 vid = 0;
1359                 /* Override the physical port with the vPort. */
1360                 mlxsw_sp_port = mlxsw_sp_vport;
1361         } else {
1362                 vid = fid;
1363         }
1364
1365         adding = adding && mlxsw_sp_port->learning;
1366
1367 do_fdb_op:
1368         err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
1369                                       adding, true);
1370         if (err) {
1371                 if (net_ratelimit())
1372                         netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1373                 return;
1374         }
1375
1376         if (!do_notification)
1377                 return;
1378         mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
1379                                     adding, mac, vid, mlxsw_sp_port->dev);
1380         return;
1381
1382 just_remove:
1383         adding = false;
1384         do_notification = false;
1385         goto do_fdb_op;
1386 }
1387
1388 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
1389                                                 char *sfn_pl, int rec_index,
1390                                                 bool adding)
1391 {
1392         struct mlxsw_sp_port *mlxsw_sp_port;
1393         struct net_device *dev;
1394         char mac[ETH_ALEN];
1395         u16 lag_vid = 0;
1396         u16 lag_id;
1397         u16 vid, fid;
1398         bool do_notification = true;
1399         int err;
1400
1401         mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
1402         mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1403         if (!mlxsw_sp_port) {
1404                 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
1405                 goto just_remove;
1406         }
1407
1408         if (mlxsw_sp_fid_is_vfid(fid)) {
1409                 struct mlxsw_sp_port *mlxsw_sp_vport;
1410
1411                 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1412                                                                  fid);
1413                 if (!mlxsw_sp_vport) {
1414                         netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1415                         goto just_remove;
1416                 }
1417
1418                 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1419                 dev = mlxsw_sp_vport->dev;
1420                 vid = 0;
1421                 /* Override the physical port with the vPort. */
1422                 mlxsw_sp_port = mlxsw_sp_vport;
1423         } else {
1424                 dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
1425                 vid = fid;
1426         }
1427
1428         adding = adding && mlxsw_sp_port->learning;
1429
1430 do_fdb_op:
1431         err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
1432                                           adding, true);
1433         if (err) {
1434                 if (net_ratelimit())
1435                         netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1436                 return;
1437         }
1438
1439         if (!do_notification)
1440                 return;
1441         mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
1442                                     vid, dev);
1443         return;
1444
1445 just_remove:
1446         adding = false;
1447         do_notification = false;
1448         goto do_fdb_op;
1449 }
1450
1451 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
1452                                             char *sfn_pl, int rec_index)
1453 {
1454         switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
1455         case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
1456                 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1457                                                 rec_index, true);
1458                 break;
1459         case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
1460                 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1461                                                 rec_index, false);
1462                 break;
1463         case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
1464                 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1465                                                     rec_index, true);
1466                 break;
1467         case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
1468                 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1469                                                     rec_index, false);
1470                 break;
1471         }
1472 }
1473
1474 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
1475 {
1476         mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw,
1477                                msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
1478 }
1479
1480 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
1481 {
1482         struct mlxsw_sp *mlxsw_sp;
1483         char *sfn_pl;
1484         u8 num_rec;
1485         int i;
1486         int err;
1487
1488         sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
1489         if (!sfn_pl)
1490                 return;
1491
1492         mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
1493
1494         rtnl_lock();
1495         do {
1496                 mlxsw_reg_sfn_pack(sfn_pl);
1497                 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
1498                 if (err) {
1499                         dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
1500                         break;
1501                 }
1502                 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
1503                 for (i = 0; i < num_rec; i++)
1504                         mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
1505
1506         } while (num_rec);
1507         rtnl_unlock();
1508
1509         kfree(sfn_pl);
1510         mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1511 }
1512
1513 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
1514 {
1515         int err;
1516
1517         err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
1518         if (err) {
1519                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
1520                 return err;
1521         }
1522         INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
1523         mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
1524         mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1525         return 0;
1526 }
1527
1528 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
1529 {
1530         cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
1531 }
1532
1533 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1534 {
1535         return mlxsw_sp_fdb_init(mlxsw_sp);
1536 }
1537
1538 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1539 {
1540         mlxsw_sp_fdb_fini(mlxsw_sp);
1541 }
1542
1543 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
1544 {
1545         mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
1546 }
1547
1548 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1549 {
1550 }