2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
5 * Copyright (c) 2015 CMC Electronics, Inc.
6 * Added support for VLAN Table Unit operations
8 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/delay.h>
17 #include <linux/etherdevice.h>
18 #include <linux/ethtool.h>
19 #include <linux/if_bridge.h>
20 #include <linux/jiffies.h>
21 #include <linux/list.h>
22 #include <linux/mdio.h>
23 #include <linux/module.h>
24 #include <linux/netdevice.h>
25 #include <linux/gpio/consumer.h>
26 #include <linux/phy.h>
28 #include <net/switchdev.h>
29 #include "mv88e6xxx.h"
31 static void assert_smi_lock(struct mv88e6xxx_priv_state *ps)
33 if (unlikely(!mutex_is_locked(&ps->smi_mutex))) {
34 dev_err(ps->dev, "SMI lock not held!\n");
39 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
40 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
41 * will be directly accessible on some {device address,register address}
42 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
43 * will only respond to SMI transactions to that specific address, and
44 * an indirect addressing mechanism needs to be used to access its
47 static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
52 for (i = 0; i < 16; i++) {
53 ret = mdiobus_read_nested(bus, sw_addr, SMI_CMD);
57 if ((ret & SMI_CMD_BUSY) == 0)
64 static int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr,
70 return mdiobus_read_nested(bus, addr, reg);
72 /* Wait for the bus to become free. */
73 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
77 /* Transmit the read command. */
78 ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
79 SMI_CMD_OP_22_READ | (addr << 5) | reg);
83 /* Wait for the read command to complete. */
84 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
89 ret = mdiobus_read_nested(bus, sw_addr, SMI_DATA);
96 static int _mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps,
103 ret = __mv88e6xxx_reg_read(ps->bus, ps->sw_addr, addr, reg);
107 dev_dbg(ps->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
113 int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, int addr, int reg)
117 mutex_lock(&ps->smi_mutex);
118 ret = _mv88e6xxx_reg_read(ps, addr, reg);
119 mutex_unlock(&ps->smi_mutex);
124 static int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
130 return mdiobus_write_nested(bus, addr, reg, val);
132 /* Wait for the bus to become free. */
133 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
137 /* Transmit the data to write. */
138 ret = mdiobus_write_nested(bus, sw_addr, SMI_DATA, val);
142 /* Transmit the write command. */
143 ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
144 SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
148 /* Wait for the write command to complete. */
149 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
156 static int _mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
161 dev_dbg(ps->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
164 return __mv88e6xxx_reg_write(ps->bus, ps->sw_addr, addr, reg, val);
167 int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
172 mutex_lock(&ps->smi_mutex);
173 ret = _mv88e6xxx_reg_write(ps, addr, reg, val);
174 mutex_unlock(&ps->smi_mutex);
179 static int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
181 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
184 err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_01,
185 (addr[0] << 8) | addr[1]);
189 err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_23,
190 (addr[2] << 8) | addr[3]);
194 return mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_45,
195 (addr[4] << 8) | addr[5]);
198 static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
200 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
204 for (i = 0; i < 6; i++) {
207 /* Write the MAC address byte. */
208 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
209 GLOBAL2_SWITCH_MAC_BUSY |
214 /* Wait for the write to complete. */
215 for (j = 0; j < 16; j++) {
216 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2,
221 if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
231 int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
233 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
235 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SWITCH_MAC))
236 return mv88e6xxx_set_addr_indirect(ds, addr);
238 return mv88e6xxx_set_addr_direct(ds, addr);
241 static int _mv88e6xxx_phy_read(struct mv88e6xxx_priv_state *ps, int addr,
245 return _mv88e6xxx_reg_read(ps, addr, regnum);
249 static int _mv88e6xxx_phy_write(struct mv88e6xxx_priv_state *ps, int addr,
253 return _mv88e6xxx_reg_write(ps, addr, regnum, val);
257 static int mv88e6xxx_ppu_disable(struct mv88e6xxx_priv_state *ps)
260 unsigned long timeout;
262 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
266 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
267 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
271 timeout = jiffies + 1 * HZ;
272 while (time_before(jiffies, timeout)) {
273 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
277 usleep_range(1000, 2000);
278 if ((ret & GLOBAL_STATUS_PPU_MASK) !=
279 GLOBAL_STATUS_PPU_POLLING)
286 static int mv88e6xxx_ppu_enable(struct mv88e6xxx_priv_state *ps)
289 unsigned long timeout;
291 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
295 err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
296 ret | GLOBAL_CONTROL_PPU_ENABLE);
300 timeout = jiffies + 1 * HZ;
301 while (time_before(jiffies, timeout)) {
302 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
306 usleep_range(1000, 2000);
307 if ((ret & GLOBAL_STATUS_PPU_MASK) ==
308 GLOBAL_STATUS_PPU_POLLING)
315 static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
317 struct mv88e6xxx_priv_state *ps;
319 ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
320 if (mutex_trylock(&ps->ppu_mutex)) {
321 if (mv88e6xxx_ppu_enable(ps) == 0)
322 ps->ppu_disabled = 0;
323 mutex_unlock(&ps->ppu_mutex);
327 static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
329 struct mv88e6xxx_priv_state *ps = (void *)_ps;
331 schedule_work(&ps->ppu_work);
334 static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_priv_state *ps)
338 mutex_lock(&ps->ppu_mutex);
340 /* If the PHY polling unit is enabled, disable it so that
341 * we can access the PHY registers. If it was already
342 * disabled, cancel the timer that is going to re-enable
345 if (!ps->ppu_disabled) {
346 ret = mv88e6xxx_ppu_disable(ps);
348 mutex_unlock(&ps->ppu_mutex);
351 ps->ppu_disabled = 1;
353 del_timer(&ps->ppu_timer);
360 static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_priv_state *ps)
362 /* Schedule a timer to re-enable the PHY polling unit. */
363 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
364 mutex_unlock(&ps->ppu_mutex);
367 void mv88e6xxx_ppu_state_init(struct mv88e6xxx_priv_state *ps)
369 mutex_init(&ps->ppu_mutex);
370 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
371 init_timer(&ps->ppu_timer);
372 ps->ppu_timer.data = (unsigned long)ps;
373 ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
376 static int mv88e6xxx_phy_read_ppu(struct mv88e6xxx_priv_state *ps, int addr,
381 ret = mv88e6xxx_ppu_access_get(ps);
383 ret = _mv88e6xxx_reg_read(ps, addr, regnum);
384 mv88e6xxx_ppu_access_put(ps);
390 static int mv88e6xxx_phy_write_ppu(struct mv88e6xxx_priv_state *ps, int addr,
395 ret = mv88e6xxx_ppu_access_get(ps);
397 ret = _mv88e6xxx_reg_write(ps, addr, regnum, val);
398 mv88e6xxx_ppu_access_put(ps);
404 static bool mv88e6xxx_6065_family(struct mv88e6xxx_priv_state *ps)
406 return ps->info->family == MV88E6XXX_FAMILY_6065;
409 static bool mv88e6xxx_6095_family(struct mv88e6xxx_priv_state *ps)
411 return ps->info->family == MV88E6XXX_FAMILY_6095;
414 static bool mv88e6xxx_6097_family(struct mv88e6xxx_priv_state *ps)
416 return ps->info->family == MV88E6XXX_FAMILY_6097;
419 static bool mv88e6xxx_6165_family(struct mv88e6xxx_priv_state *ps)
421 return ps->info->family == MV88E6XXX_FAMILY_6165;
424 static bool mv88e6xxx_6185_family(struct mv88e6xxx_priv_state *ps)
426 return ps->info->family == MV88E6XXX_FAMILY_6185;
429 static bool mv88e6xxx_6320_family(struct mv88e6xxx_priv_state *ps)
431 return ps->info->family == MV88E6XXX_FAMILY_6320;
434 static bool mv88e6xxx_6351_family(struct mv88e6xxx_priv_state *ps)
436 return ps->info->family == MV88E6XXX_FAMILY_6351;
439 static bool mv88e6xxx_6352_family(struct mv88e6xxx_priv_state *ps)
441 return ps->info->family == MV88E6XXX_FAMILY_6352;
444 static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_priv_state *ps)
446 return ps->info->num_databases;
449 static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_priv_state *ps)
451 /* Does the device have dedicated FID registers for ATU and VTU ops? */
452 if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
453 mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps))
459 /* We expect the switch to perform auto negotiation if there is a real
460 * phy. However, in the case of a fixed link phy, we force the port
461 * settings from the fixed link settings.
463 static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
464 struct phy_device *phydev)
466 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
470 if (!phy_is_pseudo_fixed_link(phydev))
473 mutex_lock(&ps->smi_mutex);
475 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
479 reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
480 PORT_PCS_CTRL_FORCE_LINK |
481 PORT_PCS_CTRL_DUPLEX_FULL |
482 PORT_PCS_CTRL_FORCE_DUPLEX |
483 PORT_PCS_CTRL_UNFORCED);
485 reg |= PORT_PCS_CTRL_FORCE_LINK;
487 reg |= PORT_PCS_CTRL_LINK_UP;
489 if (mv88e6xxx_6065_family(ps) && phydev->speed > SPEED_100)
492 switch (phydev->speed) {
494 reg |= PORT_PCS_CTRL_1000;
497 reg |= PORT_PCS_CTRL_100;
500 reg |= PORT_PCS_CTRL_10;
503 pr_info("Unknown speed");
507 reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
508 if (phydev->duplex == DUPLEX_FULL)
509 reg |= PORT_PCS_CTRL_DUPLEX_FULL;
511 if ((mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps)) &&
512 (port >= ps->info->num_ports - 2)) {
513 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
514 reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
515 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
516 reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
517 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
518 reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
519 PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
521 _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_PCS_CTRL, reg);
524 mutex_unlock(&ps->smi_mutex);
527 static int _mv88e6xxx_stats_wait(struct mv88e6xxx_priv_state *ps)
532 for (i = 0; i < 10; i++) {
533 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_OP);
534 if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
541 static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_priv_state *ps,
546 if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
547 port = (port + 1) << 5;
549 /* Snapshot the hardware statistics counters for this port. */
550 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
551 GLOBAL_STATS_OP_CAPTURE_PORT |
552 GLOBAL_STATS_OP_HIST_RX_TX | port);
556 /* Wait for the snapshotting to complete. */
557 ret = _mv88e6xxx_stats_wait(ps);
564 static void _mv88e6xxx_stats_read(struct mv88e6xxx_priv_state *ps,
572 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
573 GLOBAL_STATS_OP_READ_CAPTURED |
574 GLOBAL_STATS_OP_HIST_RX_TX | stat);
578 ret = _mv88e6xxx_stats_wait(ps);
582 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
588 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
595 static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
596 { "in_good_octets", 8, 0x00, BANK0, },
597 { "in_bad_octets", 4, 0x02, BANK0, },
598 { "in_unicast", 4, 0x04, BANK0, },
599 { "in_broadcasts", 4, 0x06, BANK0, },
600 { "in_multicasts", 4, 0x07, BANK0, },
601 { "in_pause", 4, 0x16, BANK0, },
602 { "in_undersize", 4, 0x18, BANK0, },
603 { "in_fragments", 4, 0x19, BANK0, },
604 { "in_oversize", 4, 0x1a, BANK0, },
605 { "in_jabber", 4, 0x1b, BANK0, },
606 { "in_rx_error", 4, 0x1c, BANK0, },
607 { "in_fcs_error", 4, 0x1d, BANK0, },
608 { "out_octets", 8, 0x0e, BANK0, },
609 { "out_unicast", 4, 0x10, BANK0, },
610 { "out_broadcasts", 4, 0x13, BANK0, },
611 { "out_multicasts", 4, 0x12, BANK0, },
612 { "out_pause", 4, 0x15, BANK0, },
613 { "excessive", 4, 0x11, BANK0, },
614 { "collisions", 4, 0x1e, BANK0, },
615 { "deferred", 4, 0x05, BANK0, },
616 { "single", 4, 0x14, BANK0, },
617 { "multiple", 4, 0x17, BANK0, },
618 { "out_fcs_error", 4, 0x03, BANK0, },
619 { "late", 4, 0x1f, BANK0, },
620 { "hist_64bytes", 4, 0x08, BANK0, },
621 { "hist_65_127bytes", 4, 0x09, BANK0, },
622 { "hist_128_255bytes", 4, 0x0a, BANK0, },
623 { "hist_256_511bytes", 4, 0x0b, BANK0, },
624 { "hist_512_1023bytes", 4, 0x0c, BANK0, },
625 { "hist_1024_max_bytes", 4, 0x0d, BANK0, },
626 { "sw_in_discards", 4, 0x10, PORT, },
627 { "sw_in_filtered", 2, 0x12, PORT, },
628 { "sw_out_filtered", 2, 0x13, PORT, },
629 { "in_discards", 4, 0x00 | GLOBAL_STATS_OP_BANK_1, BANK1, },
630 { "in_filtered", 4, 0x01 | GLOBAL_STATS_OP_BANK_1, BANK1, },
631 { "in_accepted", 4, 0x02 | GLOBAL_STATS_OP_BANK_1, BANK1, },
632 { "in_bad_accepted", 4, 0x03 | GLOBAL_STATS_OP_BANK_1, BANK1, },
633 { "in_good_avb_class_a", 4, 0x04 | GLOBAL_STATS_OP_BANK_1, BANK1, },
634 { "in_good_avb_class_b", 4, 0x05 | GLOBAL_STATS_OP_BANK_1, BANK1, },
635 { "in_bad_avb_class_a", 4, 0x06 | GLOBAL_STATS_OP_BANK_1, BANK1, },
636 { "in_bad_avb_class_b", 4, 0x07 | GLOBAL_STATS_OP_BANK_1, BANK1, },
637 { "tcam_counter_0", 4, 0x08 | GLOBAL_STATS_OP_BANK_1, BANK1, },
638 { "tcam_counter_1", 4, 0x09 | GLOBAL_STATS_OP_BANK_1, BANK1, },
639 { "tcam_counter_2", 4, 0x0a | GLOBAL_STATS_OP_BANK_1, BANK1, },
640 { "tcam_counter_3", 4, 0x0b | GLOBAL_STATS_OP_BANK_1, BANK1, },
641 { "in_da_unknown", 4, 0x0e | GLOBAL_STATS_OP_BANK_1, BANK1, },
642 { "in_management", 4, 0x0f | GLOBAL_STATS_OP_BANK_1, BANK1, },
643 { "out_queue_0", 4, 0x10 | GLOBAL_STATS_OP_BANK_1, BANK1, },
644 { "out_queue_1", 4, 0x11 | GLOBAL_STATS_OP_BANK_1, BANK1, },
645 { "out_queue_2", 4, 0x12 | GLOBAL_STATS_OP_BANK_1, BANK1, },
646 { "out_queue_3", 4, 0x13 | GLOBAL_STATS_OP_BANK_1, BANK1, },
647 { "out_queue_4", 4, 0x14 | GLOBAL_STATS_OP_BANK_1, BANK1, },
648 { "out_queue_5", 4, 0x15 | GLOBAL_STATS_OP_BANK_1, BANK1, },
649 { "out_queue_6", 4, 0x16 | GLOBAL_STATS_OP_BANK_1, BANK1, },
650 { "out_queue_7", 4, 0x17 | GLOBAL_STATS_OP_BANK_1, BANK1, },
651 { "out_cut_through", 4, 0x18 | GLOBAL_STATS_OP_BANK_1, BANK1, },
652 { "out_octets_a", 4, 0x1a | GLOBAL_STATS_OP_BANK_1, BANK1, },
653 { "out_octets_b", 4, 0x1b | GLOBAL_STATS_OP_BANK_1, BANK1, },
654 { "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, },
657 static bool mv88e6xxx_has_stat(struct mv88e6xxx_priv_state *ps,
658 struct mv88e6xxx_hw_stat *stat)
660 switch (stat->type) {
664 return mv88e6xxx_6320_family(ps);
666 return mv88e6xxx_6095_family(ps) ||
667 mv88e6xxx_6185_family(ps) ||
668 mv88e6xxx_6097_family(ps) ||
669 mv88e6xxx_6165_family(ps) ||
670 mv88e6xxx_6351_family(ps) ||
671 mv88e6xxx_6352_family(ps);
676 static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps,
677 struct mv88e6xxx_hw_stat *s,
687 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), s->reg);
692 if (s->sizeof_stat == 4) {
693 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port),
702 _mv88e6xxx_stats_read(ps, s->reg, &low);
703 if (s->sizeof_stat == 8)
704 _mv88e6xxx_stats_read(ps, s->reg + 1, &high);
706 value = (((u64)high) << 16) | low;
710 static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
713 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
714 struct mv88e6xxx_hw_stat *stat;
717 for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
718 stat = &mv88e6xxx_hw_stats[i];
719 if (mv88e6xxx_has_stat(ps, stat)) {
720 memcpy(data + j * ETH_GSTRING_LEN, stat->string,
727 static int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
729 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
730 struct mv88e6xxx_hw_stat *stat;
733 for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
734 stat = &mv88e6xxx_hw_stats[i];
735 if (mv88e6xxx_has_stat(ps, stat))
741 static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
744 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
745 struct mv88e6xxx_hw_stat *stat;
749 mutex_lock(&ps->smi_mutex);
751 ret = _mv88e6xxx_stats_snapshot(ps, port);
753 mutex_unlock(&ps->smi_mutex);
756 for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
757 stat = &mv88e6xxx_hw_stats[i];
758 if (mv88e6xxx_has_stat(ps, stat)) {
759 data[j] = _mv88e6xxx_get_ethtool_stat(ps, stat, port);
764 mutex_unlock(&ps->smi_mutex);
767 static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
769 return 32 * sizeof(u16);
772 static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
773 struct ethtool_regs *regs, void *_p)
775 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
781 memset(p, 0xff, 32 * sizeof(u16));
783 mutex_lock(&ps->smi_mutex);
785 for (i = 0; i < 32; i++) {
788 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), i);
793 mutex_unlock(&ps->smi_mutex);
796 static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset,
799 unsigned long timeout = jiffies + HZ / 10;
801 while (time_before(jiffies, timeout)) {
804 ret = _mv88e6xxx_reg_read(ps, reg, offset);
810 usleep_range(1000, 2000);
815 static int mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg,
816 int offset, u16 mask)
820 mutex_lock(&ps->smi_mutex);
821 ret = _mv88e6xxx_wait(ps, reg, offset, mask);
822 mutex_unlock(&ps->smi_mutex);
827 static int _mv88e6xxx_phy_wait(struct mv88e6xxx_priv_state *ps)
829 return _mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
830 GLOBAL2_SMI_OP_BUSY);
833 static int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
835 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
837 return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
838 GLOBAL2_EEPROM_OP_LOAD);
841 static int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
843 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
845 return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
846 GLOBAL2_EEPROM_OP_BUSY);
849 static int mv88e6xxx_read_eeprom_word(struct dsa_switch *ds, int addr)
851 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
854 mutex_lock(&ps->eeprom_mutex);
856 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
857 GLOBAL2_EEPROM_OP_READ |
858 (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
862 ret = mv88e6xxx_eeprom_busy_wait(ds);
866 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
868 mutex_unlock(&ps->eeprom_mutex);
872 static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
874 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
876 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
877 return ps->eeprom_len;
882 static int mv88e6xxx_get_eeprom(struct dsa_switch *ds,
883 struct ethtool_eeprom *eeprom, u8 *data)
885 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
890 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
893 offset = eeprom->offset;
897 eeprom->magic = 0xc3ec4951;
899 ret = mv88e6xxx_eeprom_load_wait(ds);
906 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
910 *data++ = (word >> 8) & 0xff;
920 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
924 *data++ = word & 0xff;
925 *data++ = (word >> 8) & 0xff;
935 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
939 *data++ = word & 0xff;
949 static int mv88e6xxx_eeprom_is_readonly(struct dsa_switch *ds)
951 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
954 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
958 if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN))
964 static int mv88e6xxx_write_eeprom_word(struct dsa_switch *ds, int addr,
967 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
970 mutex_lock(&ps->eeprom_mutex);
972 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
976 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
977 GLOBAL2_EEPROM_OP_WRITE |
978 (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
982 ret = mv88e6xxx_eeprom_busy_wait(ds);
984 mutex_unlock(&ps->eeprom_mutex);
988 static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
989 struct ethtool_eeprom *eeprom, u8 *data)
991 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
996 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
999 if (eeprom->magic != 0xc3ec4951)
1002 ret = mv88e6xxx_eeprom_is_readonly(ds);
1006 offset = eeprom->offset;
1010 ret = mv88e6xxx_eeprom_load_wait(ds);
1017 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
1021 word = (*data++ << 8) | (word & 0xff);
1023 ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
1036 word |= *data++ << 8;
1038 ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
1050 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
1054 word = (word & 0xff00) | *data++;
1056 ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
1068 static int _mv88e6xxx_atu_wait(struct mv88e6xxx_priv_state *ps)
1070 return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_ATU_OP,
1071 GLOBAL_ATU_OP_BUSY);
1074 static int _mv88e6xxx_phy_read_indirect(struct mv88e6xxx_priv_state *ps,
1075 int addr, int regnum)
1079 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
1080 GLOBAL2_SMI_OP_22_READ | (addr << 5) |
1085 ret = _mv88e6xxx_phy_wait(ps);
1089 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA);
1094 static int _mv88e6xxx_phy_write_indirect(struct mv88e6xxx_priv_state *ps,
1095 int addr, int regnum, u16 val)
1099 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
1103 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
1104 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
1107 return _mv88e6xxx_phy_wait(ps);
1110 static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
1111 struct ethtool_eee *e)
1113 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1116 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE))
1119 mutex_lock(&ps->smi_mutex);
1121 reg = _mv88e6xxx_phy_read_indirect(ps, port, 16);
1125 e->eee_enabled = !!(reg & 0x0200);
1126 e->tx_lpi_enabled = !!(reg & 0x0100);
1128 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
1132 e->eee_active = !!(reg & PORT_STATUS_EEE);
1136 mutex_unlock(&ps->smi_mutex);
1140 static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
1141 struct phy_device *phydev, struct ethtool_eee *e)
1143 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1147 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE))
1150 mutex_lock(&ps->smi_mutex);
1152 ret = _mv88e6xxx_phy_read_indirect(ps, port, 16);
1156 reg = ret & ~0x0300;
1159 if (e->tx_lpi_enabled)
1162 ret = _mv88e6xxx_phy_write_indirect(ps, port, 16, reg);
1164 mutex_unlock(&ps->smi_mutex);
1169 static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_priv_state *ps, u16 fid, u16 cmd)
1173 if (mv88e6xxx_has_fid_reg(ps)) {
1174 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_FID, fid);
1177 } else if (mv88e6xxx_num_databases(ps) == 256) {
1178 /* ATU DBNum[7:4] are located in ATU Control 15:12 */
1179 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL);
1183 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
1185 ((fid << 8) & 0xf000));
1189 /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
1193 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
1197 return _mv88e6xxx_atu_wait(ps);
1200 static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_priv_state *ps,
1201 struct mv88e6xxx_atu_entry *entry)
1203 u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
1205 if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1206 unsigned int mask, shift;
1209 data |= GLOBAL_ATU_DATA_TRUNK;
1210 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
1211 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
1213 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
1214 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1217 data |= (entry->portv_trunkid << shift) & mask;
1220 return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_DATA, data);
1223 static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_priv_state *ps,
1224 struct mv88e6xxx_atu_entry *entry,
1230 err = _mv88e6xxx_atu_wait(ps);
1234 err = _mv88e6xxx_atu_data_write(ps, entry);
1239 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
1240 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
1242 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL :
1243 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
1246 return _mv88e6xxx_atu_cmd(ps, entry->fid, op);
1249 static int _mv88e6xxx_atu_flush(struct mv88e6xxx_priv_state *ps,
1250 u16 fid, bool static_too)
1252 struct mv88e6xxx_atu_entry entry = {
1254 .state = 0, /* EntryState bits must be 0 */
1257 return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
1260 static int _mv88e6xxx_atu_move(struct mv88e6xxx_priv_state *ps, u16 fid,
1261 int from_port, int to_port, bool static_too)
1263 struct mv88e6xxx_atu_entry entry = {
1268 /* EntryState bits must be 0xF */
1269 entry.state = GLOBAL_ATU_DATA_STATE_MASK;
1271 /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
1272 entry.portv_trunkid = (to_port & 0x0f) << 4;
1273 entry.portv_trunkid |= from_port & 0x0f;
1275 return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
1278 static int _mv88e6xxx_atu_remove(struct mv88e6xxx_priv_state *ps, u16 fid,
1279 int port, bool static_too)
1281 /* Destination port 0xF means remove the entries */
1282 return _mv88e6xxx_atu_move(ps, fid, port, 0x0f, static_too);
1285 static const char * const mv88e6xxx_port_state_names[] = {
1286 [PORT_CONTROL_STATE_DISABLED] = "Disabled",
1287 [PORT_CONTROL_STATE_BLOCKING] = "Blocking/Listening",
1288 [PORT_CONTROL_STATE_LEARNING] = "Learning",
1289 [PORT_CONTROL_STATE_FORWARDING] = "Forwarding",
1292 static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port,
1295 struct dsa_switch *ds = ps->ds;
1299 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL);
1303 oldstate = reg & PORT_CONTROL_STATE_MASK;
1305 if (oldstate != state) {
1306 /* Flush forwarding database if we're moving a port
1307 * from Learning or Forwarding state to Disabled or
1308 * Blocking or Listening state.
1310 if ((oldstate == PORT_CONTROL_STATE_LEARNING ||
1311 oldstate == PORT_CONTROL_STATE_FORWARDING)
1312 && (state == PORT_CONTROL_STATE_DISABLED ||
1313 state == PORT_CONTROL_STATE_BLOCKING)) {
1314 ret = _mv88e6xxx_atu_remove(ps, 0, port, false);
1319 reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
1320 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL,
1325 netdev_dbg(ds->ports[port], "PortState %s (was %s)\n",
1326 mv88e6xxx_port_state_names[state],
1327 mv88e6xxx_port_state_names[oldstate]);
1333 static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps,
1336 struct net_device *bridge = ps->ports[port].bridge_dev;
1337 const u16 mask = (1 << ps->info->num_ports) - 1;
1338 struct dsa_switch *ds = ps->ds;
1339 u16 output_ports = 0;
1343 /* allow CPU port or DSA link(s) to send frames to every port */
1344 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
1345 output_ports = mask;
1347 for (i = 0; i < ps->info->num_ports; ++i) {
1348 /* allow sending frames to every group member */
1349 if (bridge && ps->ports[i].bridge_dev == bridge)
1350 output_ports |= BIT(i);
1352 /* allow sending frames to CPU port and DSA link(s) */
1353 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
1354 output_ports |= BIT(i);
1358 /* prevent frames from going back out of the port they came in on */
1359 output_ports &= ~BIT(port);
1361 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
1366 reg |= output_ports & mask;
1368 return _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN, reg);
1371 static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
1374 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1378 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_PORTSTATE))
1382 case BR_STATE_DISABLED:
1383 stp_state = PORT_CONTROL_STATE_DISABLED;
1385 case BR_STATE_BLOCKING:
1386 case BR_STATE_LISTENING:
1387 stp_state = PORT_CONTROL_STATE_BLOCKING;
1389 case BR_STATE_LEARNING:
1390 stp_state = PORT_CONTROL_STATE_LEARNING;
1392 case BR_STATE_FORWARDING:
1394 stp_state = PORT_CONTROL_STATE_FORWARDING;
1398 mutex_lock(&ps->smi_mutex);
1399 err = _mv88e6xxx_port_state(ps, port, stp_state);
1400 mutex_unlock(&ps->smi_mutex);
1403 netdev_err(ds->ports[port], "failed to update state to %s\n",
1404 mv88e6xxx_port_state_names[stp_state]);
1407 static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port,
1410 struct dsa_switch *ds = ps->ds;
1414 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_DEFAULT_VLAN);
1418 pvid = ret & PORT_DEFAULT_VLAN_MASK;
1421 ret &= ~PORT_DEFAULT_VLAN_MASK;
1422 ret |= *new & PORT_DEFAULT_VLAN_MASK;
1424 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
1425 PORT_DEFAULT_VLAN, ret);
1429 netdev_dbg(ds->ports[port], "DefaultVID %d (was %d)\n", *new,
1439 static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_priv_state *ps,
1440 int port, u16 *pvid)
1442 return _mv88e6xxx_port_pvid(ps, port, NULL, pvid);
1445 static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_priv_state *ps,
1448 return _mv88e6xxx_port_pvid(ps, port, &pvid, NULL);
1451 static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_priv_state *ps)
1453 return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_VTU_OP,
1454 GLOBAL_VTU_OP_BUSY);
1457 static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_priv_state *ps, u16 op)
1461 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_OP, op);
1465 return _mv88e6xxx_vtu_wait(ps);
1468 static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_priv_state *ps)
1472 ret = _mv88e6xxx_vtu_wait(ps);
1476 return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_FLUSH_ALL);
1479 static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps,
1480 struct mv88e6xxx_vtu_stu_entry *entry,
1481 unsigned int nibble_offset)
1487 for (i = 0; i < 3; ++i) {
1488 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1489 GLOBAL_VTU_DATA_0_3 + i);
1496 for (i = 0; i < ps->info->num_ports; ++i) {
1497 unsigned int shift = (i % 4) * 4 + nibble_offset;
1498 u16 reg = regs[i / 4];
1500 entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
1506 static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_priv_state *ps,
1507 struct mv88e6xxx_vtu_stu_entry *entry)
1509 return _mv88e6xxx_vtu_stu_data_read(ps, entry, 0);
1512 static int mv88e6xxx_stu_data_read(struct mv88e6xxx_priv_state *ps,
1513 struct mv88e6xxx_vtu_stu_entry *entry)
1515 return _mv88e6xxx_vtu_stu_data_read(ps, entry, 2);
1518 static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps,
1519 struct mv88e6xxx_vtu_stu_entry *entry,
1520 unsigned int nibble_offset)
1522 u16 regs[3] = { 0 };
1526 for (i = 0; i < ps->info->num_ports; ++i) {
1527 unsigned int shift = (i % 4) * 4 + nibble_offset;
1528 u8 data = entry->data[i];
1530 regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
1533 for (i = 0; i < 3; ++i) {
1534 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL,
1535 GLOBAL_VTU_DATA_0_3 + i, regs[i]);
1543 static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_priv_state *ps,
1544 struct mv88e6xxx_vtu_stu_entry *entry)
1546 return _mv88e6xxx_vtu_stu_data_write(ps, entry, 0);
1549 static int mv88e6xxx_stu_data_write(struct mv88e6xxx_priv_state *ps,
1550 struct mv88e6xxx_vtu_stu_entry *entry)
1552 return _mv88e6xxx_vtu_stu_data_write(ps, entry, 2);
1555 static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_priv_state *ps, u16 vid)
1557 return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID,
1558 vid & GLOBAL_VTU_VID_MASK);
1561 static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps,
1562 struct mv88e6xxx_vtu_stu_entry *entry)
1564 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1567 ret = _mv88e6xxx_vtu_wait(ps);
1571 ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_VTU_GET_NEXT);
1575 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
1579 next.vid = ret & GLOBAL_VTU_VID_MASK;
1580 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1583 ret = mv88e6xxx_vtu_data_read(ps, &next);
1587 if (mv88e6xxx_has_fid_reg(ps)) {
1588 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1593 next.fid = ret & GLOBAL_VTU_FID_MASK;
1594 } else if (mv88e6xxx_num_databases(ps) == 256) {
1595 /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
1596 * VTU DBNum[3:0] are located in VTU Operation 3:0
1598 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1603 next.fid = (ret & 0xf00) >> 4;
1604 next.fid |= ret & 0xf;
1607 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_STU)) {
1608 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1613 next.sid = ret & GLOBAL_VTU_SID_MASK;
1621 static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
1622 struct switchdev_obj_port_vlan *vlan,
1623 int (*cb)(struct switchdev_obj *obj))
1625 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1626 struct mv88e6xxx_vtu_stu_entry next;
1630 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
1633 mutex_lock(&ps->smi_mutex);
1635 err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
1639 err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
1644 err = _mv88e6xxx_vtu_getnext(ps, &next);
1651 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
1654 /* reinit and dump this VLAN obj */
1655 vlan->vid_begin = vlan->vid_end = next.vid;
1658 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
1659 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1661 if (next.vid == pvid)
1662 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1664 err = cb(&vlan->obj);
1667 } while (next.vid < GLOBAL_VTU_VID_MASK);
1670 mutex_unlock(&ps->smi_mutex);
1675 static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps,
1676 struct mv88e6xxx_vtu_stu_entry *entry)
1678 u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE;
1682 ret = _mv88e6xxx_vtu_wait(ps);
1689 /* Write port member tags */
1690 ret = mv88e6xxx_vtu_data_write(ps, entry);
1694 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_STU)) {
1695 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1696 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1701 if (mv88e6xxx_has_fid_reg(ps)) {
1702 reg = entry->fid & GLOBAL_VTU_FID_MASK;
1703 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_FID, reg);
1706 } else if (mv88e6xxx_num_databases(ps) == 256) {
1707 /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
1708 * VTU DBNum[3:0] are located in VTU Operation 3:0
1710 op |= (entry->fid & 0xf0) << 8;
1711 op |= entry->fid & 0xf;
1714 reg = GLOBAL_VTU_VID_VALID;
1716 reg |= entry->vid & GLOBAL_VTU_VID_MASK;
1717 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1721 return _mv88e6xxx_vtu_cmd(ps, op);
1724 static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_priv_state *ps, u8 sid,
1725 struct mv88e6xxx_vtu_stu_entry *entry)
1727 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1730 ret = _mv88e6xxx_vtu_wait(ps);
1734 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID,
1735 sid & GLOBAL_VTU_SID_MASK);
1739 ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_GET_NEXT);
1743 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_SID);
1747 next.sid = ret & GLOBAL_VTU_SID_MASK;
1749 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
1753 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1756 ret = mv88e6xxx_stu_data_read(ps, &next);
1765 static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_priv_state *ps,
1766 struct mv88e6xxx_vtu_stu_entry *entry)
1771 ret = _mv88e6xxx_vtu_wait(ps);
1778 /* Write port states */
1779 ret = mv88e6xxx_stu_data_write(ps, entry);
1783 reg = GLOBAL_VTU_VID_VALID;
1785 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1789 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1790 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1794 return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_LOAD_PURGE);
1797 static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port,
1800 struct dsa_switch *ds = ps->ds;
1805 if (mv88e6xxx_num_databases(ps) == 4096)
1807 else if (mv88e6xxx_num_databases(ps) == 256)
1812 /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
1813 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
1817 fid = (ret & PORT_BASE_VLAN_FID_3_0_MASK) >> 12;
1820 ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
1821 ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
1823 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN,
1829 /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
1830 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_1);
1834 fid |= (ret & upper_mask) << 4;
1838 ret |= (*new >> 4) & upper_mask;
1840 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1,
1845 netdev_dbg(ds->ports[port], "FID %d (was %d)\n", *new, fid);
1854 static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_priv_state *ps,
1857 return _mv88e6xxx_port_fid(ps, port, NULL, fid);
1860 static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_priv_state *ps,
1863 return _mv88e6xxx_port_fid(ps, port, &fid, NULL);
1866 static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid)
1868 DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
1869 struct mv88e6xxx_vtu_stu_entry vlan;
1872 bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
1874 /* Set every FID bit used by the (un)bridged ports */
1875 for (i = 0; i < ps->info->num_ports; ++i) {
1876 err = _mv88e6xxx_port_fid_get(ps, i, fid);
1880 set_bit(*fid, fid_bitmap);
1883 /* Set every FID bit used by the VLAN entries */
1884 err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
1889 err = _mv88e6xxx_vtu_getnext(ps, &vlan);
1896 set_bit(vlan.fid, fid_bitmap);
1897 } while (vlan.vid < GLOBAL_VTU_VID_MASK);
1899 /* The reset value 0x000 is used to indicate that multiple address
1900 * databases are not needed. Return the next positive available.
1902 *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
1903 if (unlikely(*fid >= mv88e6xxx_num_databases(ps)))
1906 /* Clear the database */
1907 return _mv88e6xxx_atu_flush(ps, *fid, true);
1910 static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid,
1911 struct mv88e6xxx_vtu_stu_entry *entry)
1913 struct dsa_switch *ds = ps->ds;
1914 struct mv88e6xxx_vtu_stu_entry vlan = {
1920 err = _mv88e6xxx_fid_new(ps, &vlan.fid);
1924 /* exclude all ports except the CPU and DSA ports */
1925 for (i = 0; i < ps->info->num_ports; ++i)
1926 vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
1927 ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
1928 : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1930 if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
1931 mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps)) {
1932 struct mv88e6xxx_vtu_stu_entry vstp;
1934 /* Adding a VTU entry requires a valid STU entry. As VSTP is not
1935 * implemented, only one STU entry is needed to cover all VTU
1936 * entries. Thus, validate the SID 0.
1939 err = _mv88e6xxx_stu_getnext(ps, GLOBAL_VTU_SID_MASK, &vstp);
1943 if (vstp.sid != vlan.sid || !vstp.valid) {
1944 memset(&vstp, 0, sizeof(vstp));
1946 vstp.sid = vlan.sid;
1948 err = _mv88e6xxx_stu_loadpurge(ps, &vstp);
1958 static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid,
1959 struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
1966 err = _mv88e6xxx_vtu_vid_write(ps, vid - 1);
1970 err = _mv88e6xxx_vtu_getnext(ps, entry);
1974 if (entry->vid != vid || !entry->valid) {
1977 /* -ENOENT would've been more appropriate, but switchdev expects
1978 * -EOPNOTSUPP to inform bridge about an eventual software VLAN.
1981 err = _mv88e6xxx_vtu_new(ps, vid, entry);
1987 static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
1988 u16 vid_begin, u16 vid_end)
1990 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1991 struct mv88e6xxx_vtu_stu_entry vlan;
1997 mutex_lock(&ps->smi_mutex);
1999 err = _mv88e6xxx_vtu_vid_write(ps, vid_begin - 1);
2004 err = _mv88e6xxx_vtu_getnext(ps, &vlan);
2011 if (vlan.vid > vid_end)
2014 for (i = 0; i < ps->info->num_ports; ++i) {
2015 if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
2019 GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
2022 if (ps->ports[i].bridge_dev ==
2023 ps->ports[port].bridge_dev)
2024 break; /* same bridge, check next VLAN */
2026 netdev_warn(ds->ports[port],
2027 "hardware VLAN %d already used by %s\n",
2029 netdev_name(ps->ports[i].bridge_dev));
2033 } while (vlan.vid < vid_end);
2036 mutex_unlock(&ps->smi_mutex);
2041 static const char * const mv88e6xxx_port_8021q_mode_names[] = {
2042 [PORT_CONTROL_2_8021Q_DISABLED] = "Disabled",
2043 [PORT_CONTROL_2_8021Q_FALLBACK] = "Fallback",
2044 [PORT_CONTROL_2_8021Q_CHECK] = "Check",
2045 [PORT_CONTROL_2_8021Q_SECURE] = "Secure",
2048 static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
2049 bool vlan_filtering)
2051 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2052 u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
2053 PORT_CONTROL_2_8021Q_DISABLED;
2056 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
2059 mutex_lock(&ps->smi_mutex);
2061 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_2);
2065 old = ret & PORT_CONTROL_2_8021Q_MASK;
2068 ret &= ~PORT_CONTROL_2_8021Q_MASK;
2069 ret |= new & PORT_CONTROL_2_8021Q_MASK;
2071 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_2,
2076 netdev_dbg(ds->ports[port], "802.1Q Mode %s (was %s)\n",
2077 mv88e6xxx_port_8021q_mode_names[new],
2078 mv88e6xxx_port_8021q_mode_names[old]);
2083 mutex_unlock(&ps->smi_mutex);
2088 static int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
2089 const struct switchdev_obj_port_vlan *vlan,
2090 struct switchdev_trans *trans)
2092 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2095 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
2098 /* If the requested port doesn't belong to the same bridge as the VLAN
2099 * members, do not support it (yet) and fallback to software VLAN.
2101 err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin,
2106 /* We don't need any dynamic resource from the kernel (yet),
2107 * so skip the prepare phase.
2112 static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_priv_state *ps, int port,
2113 u16 vid, bool untagged)
2115 struct mv88e6xxx_vtu_stu_entry vlan;
2118 err = _mv88e6xxx_vtu_get(ps, vid, &vlan, true);
2122 vlan.data[port] = untagged ?
2123 GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
2124 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
2126 return _mv88e6xxx_vtu_loadpurge(ps, &vlan);
2129 static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
2130 const struct switchdev_obj_port_vlan *vlan,
2131 struct switchdev_trans *trans)
2133 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2134 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
2135 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
2138 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
2141 mutex_lock(&ps->smi_mutex);
2143 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
2144 if (_mv88e6xxx_port_vlan_add(ps, port, vid, untagged))
2145 netdev_err(ds->ports[port], "failed to add VLAN %d%c\n",
2146 vid, untagged ? 'u' : 't');
2148 if (pvid && _mv88e6xxx_port_pvid_set(ps, port, vlan->vid_end))
2149 netdev_err(ds->ports[port], "failed to set PVID %d\n",
2152 mutex_unlock(&ps->smi_mutex);
2155 static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps,
2158 struct dsa_switch *ds = ps->ds;
2159 struct mv88e6xxx_vtu_stu_entry vlan;
2162 err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
2166 /* Tell switchdev if this VLAN is handled in software */
2167 if (vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
2170 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
2172 /* keep the VLAN unless all ports are excluded */
2174 for (i = 0; i < ps->info->num_ports; ++i) {
2175 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
2178 if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
2184 err = _mv88e6xxx_vtu_loadpurge(ps, &vlan);
2188 return _mv88e6xxx_atu_remove(ps, vlan.fid, port, false);
2191 static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
2192 const struct switchdev_obj_port_vlan *vlan)
2194 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2198 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
2201 mutex_lock(&ps->smi_mutex);
2203 err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
2207 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
2208 err = _mv88e6xxx_port_vlan_del(ps, port, vid);
2213 err = _mv88e6xxx_port_pvid_set(ps, port, 0);
2220 mutex_unlock(&ps->smi_mutex);
2225 static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_priv_state *ps,
2226 const unsigned char *addr)
2230 for (i = 0; i < 3; i++) {
2231 ret = _mv88e6xxx_reg_write(
2232 ps, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
2233 (addr[i * 2] << 8) | addr[i * 2 + 1]);
2241 static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_priv_state *ps,
2242 unsigned char *addr)
2246 for (i = 0; i < 3; i++) {
2247 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
2248 GLOBAL_ATU_MAC_01 + i);
2251 addr[i * 2] = ret >> 8;
2252 addr[i * 2 + 1] = ret & 0xff;
2258 static int _mv88e6xxx_atu_load(struct mv88e6xxx_priv_state *ps,
2259 struct mv88e6xxx_atu_entry *entry)
2263 ret = _mv88e6xxx_atu_wait(ps);
2267 ret = _mv88e6xxx_atu_mac_write(ps, entry->mac);
2271 ret = _mv88e6xxx_atu_data_write(ps, entry);
2275 return _mv88e6xxx_atu_cmd(ps, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
2278 static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port,
2279 const unsigned char *addr, u16 vid,
2282 struct mv88e6xxx_atu_entry entry = { 0 };
2283 struct mv88e6xxx_vtu_stu_entry vlan;
2286 /* Null VLAN ID corresponds to the port private database */
2288 err = _mv88e6xxx_port_fid_get(ps, port, &vlan.fid);
2290 err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
2294 entry.fid = vlan.fid;
2295 entry.state = state;
2296 ether_addr_copy(entry.mac, addr);
2297 if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
2298 entry.trunk = false;
2299 entry.portv_trunkid = BIT(port);
2302 return _mv88e6xxx_atu_load(ps, &entry);
2305 static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
2306 const struct switchdev_obj_port_fdb *fdb,
2307 struct switchdev_trans *trans)
2309 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2311 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
2314 /* We don't need any dynamic resource from the kernel (yet),
2315 * so skip the prepare phase.
2320 static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
2321 const struct switchdev_obj_port_fdb *fdb,
2322 struct switchdev_trans *trans)
2324 int state = is_multicast_ether_addr(fdb->addr) ?
2325 GLOBAL_ATU_DATA_STATE_MC_STATIC :
2326 GLOBAL_ATU_DATA_STATE_UC_STATIC;
2327 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2329 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
2332 mutex_lock(&ps->smi_mutex);
2333 if (_mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid, state))
2334 netdev_err(ds->ports[port], "failed to load MAC address\n");
2335 mutex_unlock(&ps->smi_mutex);
2338 static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
2339 const struct switchdev_obj_port_fdb *fdb)
2341 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2344 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
2347 mutex_lock(&ps->smi_mutex);
2348 ret = _mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid,
2349 GLOBAL_ATU_DATA_STATE_UNUSED);
2350 mutex_unlock(&ps->smi_mutex);
2355 static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state *ps, u16 fid,
2356 struct mv88e6xxx_atu_entry *entry)
2358 struct mv88e6xxx_atu_entry next = { 0 };
2363 ret = _mv88e6xxx_atu_wait(ps);
2367 ret = _mv88e6xxx_atu_cmd(ps, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
2371 ret = _mv88e6xxx_atu_mac_read(ps, next.mac);
2375 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_DATA);
2379 next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
2380 if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
2381 unsigned int mask, shift;
2383 if (ret & GLOBAL_ATU_DATA_TRUNK) {
2385 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
2386 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
2389 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
2390 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
2393 next.portv_trunkid = (ret & mask) >> shift;
2400 static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_priv_state *ps,
2401 u16 fid, u16 vid, int port,
2402 struct switchdev_obj_port_fdb *fdb,
2403 int (*cb)(struct switchdev_obj *obj))
2405 struct mv88e6xxx_atu_entry addr = {
2406 .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
2410 err = _mv88e6xxx_atu_mac_write(ps, addr.mac);
2415 err = _mv88e6xxx_atu_getnext(ps, fid, &addr);
2419 if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
2422 if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
2423 bool is_static = addr.state ==
2424 (is_multicast_ether_addr(addr.mac) ?
2425 GLOBAL_ATU_DATA_STATE_MC_STATIC :
2426 GLOBAL_ATU_DATA_STATE_UC_STATIC);
2429 ether_addr_copy(fdb->addr, addr.mac);
2430 fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
2432 err = cb(&fdb->obj);
2436 } while (!is_broadcast_ether_addr(addr.mac));
2441 static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
2442 struct switchdev_obj_port_fdb *fdb,
2443 int (*cb)(struct switchdev_obj *obj))
2445 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2446 struct mv88e6xxx_vtu_stu_entry vlan = {
2447 .vid = GLOBAL_VTU_VID_MASK, /* all ones */
2452 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
2455 mutex_lock(&ps->smi_mutex);
2457 /* Dump port's default Filtering Information Database (VLAN ID 0) */
2458 err = _mv88e6xxx_port_fid_get(ps, port, &fid);
2462 err = _mv88e6xxx_port_fdb_dump_one(ps, fid, 0, port, fdb, cb);
2466 /* Dump VLANs' Filtering Information Databases */
2467 err = _mv88e6xxx_vtu_vid_write(ps, vlan.vid);
2472 err = _mv88e6xxx_vtu_getnext(ps, &vlan);
2479 err = _mv88e6xxx_port_fdb_dump_one(ps, vlan.fid, vlan.vid, port,
2483 } while (vlan.vid < GLOBAL_VTU_VID_MASK);
2486 mutex_unlock(&ps->smi_mutex);
2491 static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
2492 struct net_device *bridge)
2494 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2497 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE))
2500 mutex_lock(&ps->smi_mutex);
2502 /* Assign the bridge and remap each port's VLANTable */
2503 ps->ports[port].bridge_dev = bridge;
2505 for (i = 0; i < ps->info->num_ports; ++i) {
2506 if (ps->ports[i].bridge_dev == bridge) {
2507 err = _mv88e6xxx_port_based_vlan_map(ps, i);
2513 mutex_unlock(&ps->smi_mutex);
2518 static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
2520 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2521 struct net_device *bridge = ps->ports[port].bridge_dev;
2524 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE))
2527 mutex_lock(&ps->smi_mutex);
2529 /* Unassign the bridge and remap each port's VLANTable */
2530 ps->ports[port].bridge_dev = NULL;
2532 for (i = 0; i < ps->info->num_ports; ++i)
2533 if (i == port || ps->ports[i].bridge_dev == bridge)
2534 if (_mv88e6xxx_port_based_vlan_map(ps, i))
2535 netdev_warn(ds->ports[i], "failed to remap\n");
2537 mutex_unlock(&ps->smi_mutex);
2540 static int _mv88e6xxx_phy_page_write(struct mv88e6xxx_priv_state *ps,
2541 int port, int page, int reg, int val)
2545 ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
2547 goto restore_page_0;
2549 ret = _mv88e6xxx_phy_write_indirect(ps, port, reg, val);
2551 _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
2556 static int _mv88e6xxx_phy_page_read(struct mv88e6xxx_priv_state *ps,
2557 int port, int page, int reg)
2561 ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
2563 goto restore_page_0;
2565 ret = _mv88e6xxx_phy_read_indirect(ps, port, reg);
2567 _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
2572 static int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps)
2574 bool ppu_active = mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE);
2575 u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
2576 struct gpio_desc *gpiod = ps->reset;
2577 unsigned long timeout;
2581 /* Set all ports to the disabled state. */
2582 for (i = 0; i < ps->info->num_ports; i++) {
2583 ret = _mv88e6xxx_reg_read(ps, REG_PORT(i), PORT_CONTROL);
2587 ret = _mv88e6xxx_reg_write(ps, REG_PORT(i), PORT_CONTROL,
2593 /* Wait for transmit queues to drain. */
2594 usleep_range(2000, 4000);
2596 /* If there is a gpio connected to the reset pin, toggle it */
2598 gpiod_set_value_cansleep(gpiod, 1);
2599 usleep_range(10000, 20000);
2600 gpiod_set_value_cansleep(gpiod, 0);
2601 usleep_range(10000, 20000);
2604 /* Reset the switch. Keep the PPU active if requested. The PPU
2605 * needs to be active to support indirect phy register access
2606 * through global registers 0x18 and 0x19.
2609 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc000);
2611 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc400);
2615 /* Wait up to one second for reset to complete. */
2616 timeout = jiffies + 1 * HZ;
2617 while (time_before(jiffies, timeout)) {
2618 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 0x00);
2622 if ((ret & is_reset) == is_reset)
2624 usleep_range(1000, 2000);
2626 if (time_after(jiffies, timeout))
2634 static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state *ps)
2638 ret = _mv88e6xxx_phy_page_read(ps, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
2643 if (ret & BMCR_PDOWN) {
2645 ret = _mv88e6xxx_phy_page_write(ps, REG_FIBER_SERDES,
2646 PAGE_FIBER_SERDES, MII_BMCR,
2653 static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
2655 struct dsa_switch *ds = ps->ds;
2659 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2660 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2661 mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
2662 mv88e6xxx_6065_family(ps) || mv88e6xxx_6320_family(ps)) {
2663 /* MAC Forcing register: don't force link, speed,
2664 * duplex or flow control state to any particular
2665 * values on physical ports, but force the CPU port
2666 * and all DSA ports to their maximum bandwidth and
2669 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
2670 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
2671 reg &= ~PORT_PCS_CTRL_UNFORCED;
2672 reg |= PORT_PCS_CTRL_FORCE_LINK |
2673 PORT_PCS_CTRL_LINK_UP |
2674 PORT_PCS_CTRL_DUPLEX_FULL |
2675 PORT_PCS_CTRL_FORCE_DUPLEX;
2676 if (mv88e6xxx_6065_family(ps))
2677 reg |= PORT_PCS_CTRL_100;
2679 reg |= PORT_PCS_CTRL_1000;
2681 reg |= PORT_PCS_CTRL_UNFORCED;
2684 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2685 PORT_PCS_CTRL, reg);
2690 /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
2691 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
2692 * tunneling, determine priority by looking at 802.1p and IP
2693 * priority fields (IP prio has precedence), and set STP state
2696 * If this is the CPU link, use DSA or EDSA tagging depending
2697 * on which tagging mode was configured.
2699 * If this is a link to another switch, use DSA tagging mode.
2701 * If this is the upstream port for this switch, enable
2702 * forwarding of unknown unicasts and multicasts.
2705 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2706 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2707 mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
2708 mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps))
2709 reg = PORT_CONTROL_IGMP_MLD_SNOOP |
2710 PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
2711 PORT_CONTROL_STATE_FORWARDING;
2712 if (dsa_is_cpu_port(ds, port)) {
2713 if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
2714 reg |= PORT_CONTROL_DSA_TAG;
2715 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2716 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2717 mv88e6xxx_6320_family(ps)) {
2718 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2719 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
2721 reg |= PORT_CONTROL_FRAME_MODE_DSA;
2722 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2723 PORT_CONTROL_FORWARD_UNKNOWN_MC;
2726 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2727 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2728 mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
2729 mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps)) {
2730 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2731 reg |= PORT_CONTROL_EGRESS_ADD_TAG;
2734 if (dsa_is_dsa_port(ds, port)) {
2735 if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
2736 reg |= PORT_CONTROL_DSA_TAG;
2737 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2738 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2739 mv88e6xxx_6320_family(ps)) {
2740 reg |= PORT_CONTROL_FRAME_MODE_DSA;
2743 if (port == dsa_upstream_port(ds))
2744 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2745 PORT_CONTROL_FORWARD_UNKNOWN_MC;
2748 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2754 /* If this port is connected to a SerDes, make sure the SerDes is not
2757 if (mv88e6xxx_6352_family(ps)) {
2758 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
2761 ret &= PORT_STATUS_CMODE_MASK;
2762 if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
2763 (ret == PORT_STATUS_CMODE_1000BASE_X) ||
2764 (ret == PORT_STATUS_CMODE_SGMII)) {
2765 ret = mv88e6xxx_power_on_serdes(ps);
2771 /* Port Control 2: don't force a good FCS, set the maximum frame size to
2772 * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
2773 * untagged frames on this port, do a destination address lookup on all
2774 * received packets as usual, disable ARP mirroring and don't send a
2775 * copy of all transmitted/received frames on this port to the CPU.
2778 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2779 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2780 mv88e6xxx_6095_family(ps) || mv88e6xxx_6320_family(ps) ||
2781 mv88e6xxx_6185_family(ps))
2782 reg = PORT_CONTROL_2_MAP_DA;
2784 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2785 mv88e6xxx_6165_family(ps) || mv88e6xxx_6320_family(ps))
2786 reg |= PORT_CONTROL_2_JUMBO_10240;
2788 if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps)) {
2789 /* Set the upstream port this port should use */
2790 reg |= dsa_upstream_port(ds);
2791 /* enable forwarding of unknown multicast addresses to
2794 if (port == dsa_upstream_port(ds))
2795 reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
2798 reg |= PORT_CONTROL_2_8021Q_DISABLED;
2801 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2802 PORT_CONTROL_2, reg);
2807 /* Port Association Vector: when learning source addresses
2808 * of packets, add the address to the address database using
2809 * a port bitmap that has only the bit for this port set and
2810 * the other bits clear.
2813 /* Disable learning for CPU port */
2814 if (dsa_is_cpu_port(ds, port))
2817 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
2821 /* Egress rate control 2: disable egress rate control. */
2822 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_RATE_CONTROL_2,
2827 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2828 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2829 mv88e6xxx_6320_family(ps)) {
2830 /* Do not limit the period of time that this port can
2831 * be paused for by the remote end or the period of
2832 * time that this port can pause the remote end.
2834 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2835 PORT_PAUSE_CTRL, 0x0000);
2839 /* Port ATU control: disable limiting the number of
2840 * address database entries that this port is allowed
2843 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2844 PORT_ATU_CONTROL, 0x0000);
2845 /* Priority Override: disable DA, SA and VTU priority
2848 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2849 PORT_PRI_OVERRIDE, 0x0000);
2853 /* Port Ethertype: use the Ethertype DSA Ethertype
2856 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2857 PORT_ETH_TYPE, ETH_P_EDSA);
2860 /* Tag Remap: use an identity 802.1p prio -> switch
2863 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2864 PORT_TAG_REGMAP_0123, 0x3210);
2868 /* Tag Remap 2: use an identity 802.1p prio -> switch
2871 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2872 PORT_TAG_REGMAP_4567, 0x7654);
2877 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2878 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2879 mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
2880 mv88e6xxx_6320_family(ps)) {
2881 /* Rate Control: disable ingress rate limiting. */
2882 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2883 PORT_RATE_CONTROL, 0x0001);
2888 /* Port Control 1: disable trunking, disable sending
2889 * learning messages to this port.
2891 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1, 0x0000);
2895 /* Port based VLAN map: give each port the same default address
2896 * database, and allow bidirectional communication between the
2897 * CPU and DSA port(s), and the other ports.
2899 ret = _mv88e6xxx_port_fid_set(ps, port, 0);
2903 ret = _mv88e6xxx_port_based_vlan_map(ps, port);
2907 /* Default VLAN ID and priority: don't set a default VLAN
2908 * ID, and set the default packet priority to zero.
2910 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_DEFAULT_VLAN,
2918 static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
2920 struct dsa_switch *ds = ps->ds;
2921 u32 upstream_port = dsa_upstream_port(ds);
2926 /* Enable the PHY Polling Unit if present, don't discard any packets,
2927 * and mask all interrupt sources.
2930 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU) ||
2931 mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE))
2932 reg |= GLOBAL_CONTROL_PPU_ENABLE;
2934 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, reg);
2938 /* Configure the upstream port, and configure it as the port to which
2939 * ingress and egress and ARP monitor frames are to be sent.
2941 reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
2942 upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
2943 upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
2944 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
2948 /* Disable remote management, and set the switch's DSA device number. */
2949 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL_2,
2950 GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
2951 (ds->index & 0x1f));
2955 /* Set the default address aging time to 5 minutes, and
2956 * enable address learn messages to be sent to all message
2959 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
2960 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
2964 /* Configure the IP ToS mapping registers. */
2965 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
2968 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
2971 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
2974 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
2977 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
2980 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
2983 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
2986 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
2990 /* Configure the IEEE 802.1p priority mapping register. */
2991 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
2995 /* Send all frames with destination addresses matching
2996 * 01:80:c2:00:00:0x to the CPU port.
2998 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
3002 /* Ignore removed tag data on doubly tagged packets, disable
3003 * flow control messages, force flow control priority to the
3004 * highest, and send all special multicast frames to the CPU
3005 * port at the highest priority.
3007 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
3008 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
3009 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
3013 /* Program the DSA routing table. */
3014 for (i = 0; i < 32; i++) {
3017 if (ps->ds->cd->rtable &&
3018 i != ps->ds->index && i < ps->ds->dst->pd->nr_chips)
3019 nexthop = ps->ds->cd->rtable[i] & 0x1f;
3021 err = _mv88e6xxx_reg_write(
3023 GLOBAL2_DEVICE_MAPPING,
3024 GLOBAL2_DEVICE_MAPPING_UPDATE |
3025 (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) | nexthop);
3030 /* Clear all trunk masks. */
3031 for (i = 0; i < 8; i++) {
3032 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
3034 (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
3035 ((1 << ps->info->num_ports) - 1));
3040 /* Clear all trunk mappings. */
3041 for (i = 0; i < 16; i++) {
3042 err = _mv88e6xxx_reg_write(
3044 GLOBAL2_TRUNK_MAPPING,
3045 GLOBAL2_TRUNK_MAPPING_UPDATE |
3046 (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
3051 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
3052 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
3053 mv88e6xxx_6320_family(ps)) {
3054 /* Send all frames with destination addresses matching
3055 * 01:80:c2:00:00:2x to the CPU port.
3057 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
3058 GLOBAL2_MGMT_EN_2X, 0xffff);
3062 /* Initialise cross-chip port VLAN table to reset
3065 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
3066 GLOBAL2_PVT_ADDR, 0x9000);
3070 /* Clear the priority override table. */
3071 for (i = 0; i < 16; i++) {
3072 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
3073 GLOBAL2_PRIO_OVERRIDE,
3080 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
3081 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
3082 mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
3083 mv88e6xxx_6320_family(ps)) {
3084 /* Disable ingress rate limiting by resetting all
3085 * ingress rate limit registers to their initial
3088 for (i = 0; i < ps->info->num_ports; i++) {
3089 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
3097 /* Clear the statistics counters for all ports */
3098 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
3099 GLOBAL_STATS_OP_FLUSH_ALL);
3103 /* Wait for the flush to complete. */
3104 err = _mv88e6xxx_stats_wait(ps);
3108 /* Clear all ATU entries */
3109 err = _mv88e6xxx_atu_flush(ps, 0, true);
3113 /* Clear all the VTU and STU entries */
3114 err = _mv88e6xxx_vtu_stu_flush(ps);
3121 static int mv88e6xxx_setup(struct dsa_switch *ds)
3123 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3129 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
3130 mutex_init(&ps->eeprom_mutex);
3132 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
3133 mv88e6xxx_ppu_state_init(ps);
3135 mutex_lock(&ps->smi_mutex);
3137 err = mv88e6xxx_switch_reset(ps);
3141 err = mv88e6xxx_setup_global(ps);
3145 for (i = 0; i < ps->info->num_ports; i++) {
3146 err = mv88e6xxx_setup_port(ps, i);
3152 mutex_unlock(&ps->smi_mutex);
3157 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
3159 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3162 mutex_lock(&ps->smi_mutex);
3163 ret = _mv88e6xxx_phy_page_read(ps, port, page, reg);
3164 mutex_unlock(&ps->smi_mutex);
3169 int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
3172 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3175 mutex_lock(&ps->smi_mutex);
3176 ret = _mv88e6xxx_phy_page_write(ps, port, page, reg, val);
3177 mutex_unlock(&ps->smi_mutex);
3182 static int mv88e6xxx_port_to_phy_addr(struct mv88e6xxx_priv_state *ps,
3185 if (port >= 0 && port < ps->info->num_ports)
3190 static int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
3192 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3193 int addr = mv88e6xxx_port_to_phy_addr(ps, port);
3199 mutex_lock(&ps->smi_mutex);
3201 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
3202 ret = mv88e6xxx_phy_read_ppu(ps, addr, regnum);
3203 else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
3204 ret = _mv88e6xxx_phy_read_indirect(ps, addr, regnum);
3206 ret = _mv88e6xxx_phy_read(ps, addr, regnum);
3208 mutex_unlock(&ps->smi_mutex);
3212 static int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum,
3215 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3216 int addr = mv88e6xxx_port_to_phy_addr(ps, port);
3222 mutex_lock(&ps->smi_mutex);
3224 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
3225 ret = mv88e6xxx_phy_write_ppu(ps, addr, regnum, val);
3226 else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
3227 ret = _mv88e6xxx_phy_write_indirect(ps, addr, regnum, val);
3229 ret = _mv88e6xxx_phy_write(ps, addr, regnum, val);
3231 mutex_unlock(&ps->smi_mutex);
3235 #ifdef CONFIG_NET_DSA_HWMON
3237 static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
3239 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3245 mutex_lock(&ps->smi_mutex);
3247 ret = _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x6);
3251 /* Enable temperature sensor */
3252 ret = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
3256 ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret | (1 << 5));
3260 /* Wait for temperature to stabilize */
3261 usleep_range(10000, 12000);
3263 val = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
3269 /* Disable temperature sensor */
3270 ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret & ~(1 << 5));
3274 *temp = ((val & 0x1f) - 5) * 5;
3277 _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x0);
3278 mutex_unlock(&ps->smi_mutex);
3282 static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
3284 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3285 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3290 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
3294 *temp = (ret & 0xff) - 25;
3299 static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
3301 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3303 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP))
3306 if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
3307 return mv88e63xx_get_temp(ds, temp);
3309 return mv88e61xx_get_temp(ds, temp);
3312 static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
3314 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3315 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3318 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
3323 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
3327 *temp = (((ret >> 8) & 0x1f) * 5) - 25;
3332 static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
3334 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3335 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3338 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
3341 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
3344 temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
3345 return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
3346 (ret & 0xe0ff) | (temp << 8));
3349 static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
3351 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3352 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3355 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
3360 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
3364 *alarm = !!(ret & 0x40);
3368 #endif /* CONFIG_NET_DSA_HWMON */
3370 static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3372 .prod_num = PORT_SWITCH_ID_PROD_NUM_6085,
3373 .family = MV88E6XXX_FAMILY_6097,
3374 .name = "Marvell 88E6085",
3375 .num_databases = 4096,
3377 .flags = MV88E6XXX_FLAGS_FAMILY_6097,
3381 .prod_num = PORT_SWITCH_ID_PROD_NUM_6095,
3382 .family = MV88E6XXX_FAMILY_6095,
3383 .name = "Marvell 88E6095/88E6095F",
3384 .num_databases = 256,
3386 .flags = MV88E6XXX_FLAGS_FAMILY_6095,
3390 .prod_num = PORT_SWITCH_ID_PROD_NUM_6123,
3391 .family = MV88E6XXX_FAMILY_6165,
3392 .name = "Marvell 88E6123",
3393 .num_databases = 4096,
3395 .flags = MV88E6XXX_FLAGS_FAMILY_6165,
3399 .prod_num = PORT_SWITCH_ID_PROD_NUM_6131,
3400 .family = MV88E6XXX_FAMILY_6185,
3401 .name = "Marvell 88E6131",
3402 .num_databases = 256,
3404 .flags = MV88E6XXX_FLAGS_FAMILY_6185,
3408 .prod_num = PORT_SWITCH_ID_PROD_NUM_6161,
3409 .family = MV88E6XXX_FAMILY_6165,
3410 .name = "Marvell 88E6161",
3411 .num_databases = 4096,
3413 .flags = MV88E6XXX_FLAGS_FAMILY_6165,
3417 .prod_num = PORT_SWITCH_ID_PROD_NUM_6165,
3418 .family = MV88E6XXX_FAMILY_6165,
3419 .name = "Marvell 88E6165",
3420 .num_databases = 4096,
3422 .flags = MV88E6XXX_FLAGS_FAMILY_6165,
3426 .prod_num = PORT_SWITCH_ID_PROD_NUM_6171,
3427 .family = MV88E6XXX_FAMILY_6351,
3428 .name = "Marvell 88E6171",
3429 .num_databases = 4096,
3431 .flags = MV88E6XXX_FLAGS_FAMILY_6351,
3435 .prod_num = PORT_SWITCH_ID_PROD_NUM_6172,
3436 .family = MV88E6XXX_FAMILY_6352,
3437 .name = "Marvell 88E6172",
3438 .num_databases = 4096,
3440 .flags = MV88E6XXX_FLAGS_FAMILY_6352,
3444 .prod_num = PORT_SWITCH_ID_PROD_NUM_6175,
3445 .family = MV88E6XXX_FAMILY_6351,
3446 .name = "Marvell 88E6175",
3447 .num_databases = 4096,
3449 .flags = MV88E6XXX_FLAGS_FAMILY_6351,
3453 .prod_num = PORT_SWITCH_ID_PROD_NUM_6176,
3454 .family = MV88E6XXX_FAMILY_6352,
3455 .name = "Marvell 88E6176",
3456 .num_databases = 4096,
3458 .flags = MV88E6XXX_FLAGS_FAMILY_6352,
3462 .prod_num = PORT_SWITCH_ID_PROD_NUM_6185,
3463 .family = MV88E6XXX_FAMILY_6185,
3464 .name = "Marvell 88E6185",
3465 .num_databases = 256,
3467 .flags = MV88E6XXX_FLAGS_FAMILY_6185,
3471 .prod_num = PORT_SWITCH_ID_PROD_NUM_6240,
3472 .family = MV88E6XXX_FAMILY_6352,
3473 .name = "Marvell 88E6240",
3474 .num_databases = 4096,
3476 .flags = MV88E6XXX_FLAGS_FAMILY_6352,
3480 .prod_num = PORT_SWITCH_ID_PROD_NUM_6320,
3481 .family = MV88E6XXX_FAMILY_6320,
3482 .name = "Marvell 88E6320",
3483 .num_databases = 4096,
3485 .flags = MV88E6XXX_FLAGS_FAMILY_6320,
3489 .prod_num = PORT_SWITCH_ID_PROD_NUM_6321,
3490 .family = MV88E6XXX_FAMILY_6320,
3491 .name = "Marvell 88E6321",
3492 .num_databases = 4096,
3494 .flags = MV88E6XXX_FLAGS_FAMILY_6320,
3498 .prod_num = PORT_SWITCH_ID_PROD_NUM_6350,
3499 .family = MV88E6XXX_FAMILY_6351,
3500 .name = "Marvell 88E6350",
3501 .num_databases = 4096,
3503 .flags = MV88E6XXX_FLAGS_FAMILY_6351,
3507 .prod_num = PORT_SWITCH_ID_PROD_NUM_6351,
3508 .family = MV88E6XXX_FAMILY_6351,
3509 .name = "Marvell 88E6351",
3510 .num_databases = 4096,
3512 .flags = MV88E6XXX_FLAGS_FAMILY_6351,
3516 .prod_num = PORT_SWITCH_ID_PROD_NUM_6352,
3517 .family = MV88E6XXX_FAMILY_6352,
3518 .name = "Marvell 88E6352",
3519 .num_databases = 4096,
3521 .flags = MV88E6XXX_FLAGS_FAMILY_6352,
3525 static const struct mv88e6xxx_info *
3526 mv88e6xxx_lookup_info(unsigned int prod_num, const struct mv88e6xxx_info *table,
3531 for (i = 0; i < num; ++i)
3532 if (table[i].prod_num == prod_num)
3538 static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
3539 struct device *host_dev, int sw_addr,
3542 const struct mv88e6xxx_info *info;
3543 struct mv88e6xxx_priv_state *ps;
3544 struct mii_bus *bus;
3546 int id, prod_num, rev;
3548 bus = dsa_host_dev_to_mii_bus(host_dev);
3552 id = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
3556 prod_num = (id & 0xfff0) >> 4;
3559 info = mv88e6xxx_lookup_info(prod_num, mv88e6xxx_table,
3560 ARRAY_SIZE(mv88e6xxx_table));
3566 ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL);
3571 ps->sw_addr = sw_addr;
3573 mutex_init(&ps->smi_mutex);
3577 dev_info(&ps->bus->dev, "switch 0x%x probed: %s, revision %u\n",
3578 prod_num, name, rev);
3583 struct dsa_switch_driver mv88e6xxx_switch_driver = {
3584 .tag_protocol = DSA_TAG_PROTO_EDSA,
3585 .probe = mv88e6xxx_drv_probe,
3586 .setup = mv88e6xxx_setup,
3587 .set_addr = mv88e6xxx_set_addr,
3588 .phy_read = mv88e6xxx_phy_read,
3589 .phy_write = mv88e6xxx_phy_write,
3590 .adjust_link = mv88e6xxx_adjust_link,
3591 .get_strings = mv88e6xxx_get_strings,
3592 .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
3593 .get_sset_count = mv88e6xxx_get_sset_count,
3594 .set_eee = mv88e6xxx_set_eee,
3595 .get_eee = mv88e6xxx_get_eee,
3596 #ifdef CONFIG_NET_DSA_HWMON
3597 .get_temp = mv88e6xxx_get_temp,
3598 .get_temp_limit = mv88e6xxx_get_temp_limit,
3599 .set_temp_limit = mv88e6xxx_set_temp_limit,
3600 .get_temp_alarm = mv88e6xxx_get_temp_alarm,
3602 .get_eeprom_len = mv88e6xxx_get_eeprom_len,
3603 .get_eeprom = mv88e6xxx_get_eeprom,
3604 .set_eeprom = mv88e6xxx_set_eeprom,
3605 .get_regs_len = mv88e6xxx_get_regs_len,
3606 .get_regs = mv88e6xxx_get_regs,
3607 .port_bridge_join = mv88e6xxx_port_bridge_join,
3608 .port_bridge_leave = mv88e6xxx_port_bridge_leave,
3609 .port_stp_state_set = mv88e6xxx_port_stp_state_set,
3610 .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
3611 .port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
3612 .port_vlan_add = mv88e6xxx_port_vlan_add,
3613 .port_vlan_del = mv88e6xxx_port_vlan_del,
3614 .port_vlan_dump = mv88e6xxx_port_vlan_dump,
3615 .port_fdb_prepare = mv88e6xxx_port_fdb_prepare,
3616 .port_fdb_add = mv88e6xxx_port_fdb_add,
3617 .port_fdb_del = mv88e6xxx_port_fdb_del,
3618 .port_fdb_dump = mv88e6xxx_port_fdb_dump,
3621 int mv88e6xxx_probe(struct mdio_device *mdiodev)
3623 struct device *dev = &mdiodev->dev;
3624 struct device_node *np = dev->of_node;
3625 struct mv88e6xxx_priv_state *ps;
3626 int id, prod_num, rev;
3627 struct dsa_switch *ds;
3631 ds = devm_kzalloc(dev, sizeof(*ds) + sizeof(*ps), GFP_KERNEL);
3635 ps = (struct mv88e6xxx_priv_state *)(ds + 1);
3640 ps->bus = mdiodev->bus;
3641 ps->sw_addr = mdiodev->addr;
3642 mutex_init(&ps->smi_mutex);
3644 get_device(&ps->bus->dev);
3646 ds->drv = &mv88e6xxx_switch_driver;
3648 id = mv88e6xxx_reg_read(ps, REG_PORT(0), PORT_SWITCH_ID);
3652 prod_num = (id & 0xfff0) >> 4;
3655 ps->info = mv88e6xxx_lookup_info(prod_num, mv88e6xxx_table,
3656 ARRAY_SIZE(mv88e6xxx_table));
3660 ps->reset = devm_gpiod_get(&mdiodev->dev, "reset", GPIOD_ASIS);
3661 if (IS_ERR(ps->reset)) {
3662 err = PTR_ERR(ps->reset);
3663 if (err == -ENOENT) {
3664 /* Optional, so not an error */
3671 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM) &&
3672 !of_property_read_u32(np, "eeprom-length", &eeprom_len))
3673 ps->eeprom_len = eeprom_len;
3675 dev_set_drvdata(dev, ds);
3677 dev_info(dev, "switch 0x%x probed: %s, revision %u\n",
3678 prod_num, ps->info->name, rev);
3683 static void mv88e6xxx_remove(struct mdio_device *mdiodev)
3685 struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
3686 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3688 put_device(&ps->bus->dev);
3691 static const struct of_device_id mv88e6xxx_of_match[] = {
3692 { .compatible = "marvell,mv88e6085" },
3696 MODULE_DEVICE_TABLE(of, mv88e6xxx_of_match);
3698 static struct mdio_driver mv88e6xxx_driver = {
3699 .probe = mv88e6xxx_probe,
3700 .remove = mv88e6xxx_remove,
3702 .name = "mv88e6085",
3703 .of_match_table = mv88e6xxx_of_match,
3707 static int __init mv88e6xxx_init(void)
3709 register_switch_driver(&mv88e6xxx_switch_driver);
3710 return mdio_driver_register(&mv88e6xxx_driver);
3712 module_init(mv88e6xxx_init);
3714 static void __exit mv88e6xxx_cleanup(void)
3716 mdio_driver_unregister(&mv88e6xxx_driver);
3717 unregister_switch_driver(&mv88e6xxx_switch_driver);
3719 module_exit(mv88e6xxx_cleanup);
3721 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
3722 MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
3723 MODULE_LICENSE("GPL");