2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
5 * Copyright (c) 2015 CMC Electronics, Inc.
6 * Added support for VLAN Table Unit operations
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/delay.h>
15 #include <linux/etherdevice.h>
16 #include <linux/ethtool.h>
17 #include <linux/if_bridge.h>
18 #include <linux/jiffies.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/netdevice.h>
22 #include <linux/phy.h>
24 #include <net/switchdev.h>
25 #include "mv88e6xxx.h"
27 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
28 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
29 * will be directly accessible on some {device address,register address}
30 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
31 * will only respond to SMI transactions to that specific address, and
32 * an indirect addressing mechanism needs to be used to access its
35 static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
40 for (i = 0; i < 16; i++) {
41 ret = mdiobus_read_nested(bus, sw_addr, SMI_CMD);
45 if ((ret & SMI_CMD_BUSY) == 0)
52 int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
57 return mdiobus_read_nested(bus, addr, reg);
59 /* Wait for the bus to become free. */
60 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
64 /* Transmit the read command. */
65 ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
66 SMI_CMD_OP_22_READ | (addr << 5) | reg);
70 /* Wait for the read command to complete. */
71 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
76 ret = mdiobus_read_nested(bus, sw_addr, SMI_DATA);
83 /* Must be called with SMI mutex held */
84 static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
86 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
92 ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
96 dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
102 int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
104 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
107 mutex_lock(&ps->smi_mutex);
108 ret = _mv88e6xxx_reg_read(ds, addr, reg);
109 mutex_unlock(&ps->smi_mutex);
114 int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
120 return mdiobus_write_nested(bus, addr, reg, val);
122 /* Wait for the bus to become free. */
123 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
127 /* Transmit the data to write. */
128 ret = mdiobus_write_nested(bus, sw_addr, SMI_DATA, val);
132 /* Transmit the write command. */
133 ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
134 SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
138 /* Wait for the write command to complete. */
139 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
146 /* Must be called with SMI mutex held */
147 static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
150 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
155 dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
158 return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
161 int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
163 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
166 mutex_lock(&ps->smi_mutex);
167 ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
168 mutex_unlock(&ps->smi_mutex);
173 int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
175 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
176 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
177 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
182 int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
187 for (i = 0; i < 6; i++) {
190 /* Write the MAC address byte. */
191 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
192 GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
194 /* Wait for the write to complete. */
195 for (j = 0; j < 16; j++) {
196 ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
197 if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
207 /* Must be called with SMI mutex held */
208 static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
211 return _mv88e6xxx_reg_read(ds, addr, regnum);
215 /* Must be called with SMI mutex held */
216 static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
220 return _mv88e6xxx_reg_write(ds, addr, regnum, val);
224 #ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
225 static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
228 unsigned long timeout;
230 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
231 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
232 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
234 timeout = jiffies + 1 * HZ;
235 while (time_before(jiffies, timeout)) {
236 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
237 usleep_range(1000, 2000);
238 if ((ret & GLOBAL_STATUS_PPU_MASK) !=
239 GLOBAL_STATUS_PPU_POLLING)
246 static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
249 unsigned long timeout;
251 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
252 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
254 timeout = jiffies + 1 * HZ;
255 while (time_before(jiffies, timeout)) {
256 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
257 usleep_range(1000, 2000);
258 if ((ret & GLOBAL_STATUS_PPU_MASK) ==
259 GLOBAL_STATUS_PPU_POLLING)
266 static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
268 struct mv88e6xxx_priv_state *ps;
270 ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
271 if (mutex_trylock(&ps->ppu_mutex)) {
272 struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
274 if (mv88e6xxx_ppu_enable(ds) == 0)
275 ps->ppu_disabled = 0;
276 mutex_unlock(&ps->ppu_mutex);
280 static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
282 struct mv88e6xxx_priv_state *ps = (void *)_ps;
284 schedule_work(&ps->ppu_work);
287 static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
289 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
292 mutex_lock(&ps->ppu_mutex);
294 /* If the PHY polling unit is enabled, disable it so that
295 * we can access the PHY registers. If it was already
296 * disabled, cancel the timer that is going to re-enable
299 if (!ps->ppu_disabled) {
300 ret = mv88e6xxx_ppu_disable(ds);
302 mutex_unlock(&ps->ppu_mutex);
305 ps->ppu_disabled = 1;
307 del_timer(&ps->ppu_timer);
314 static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
316 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
318 /* Schedule a timer to re-enable the PHY polling unit. */
319 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
320 mutex_unlock(&ps->ppu_mutex);
323 void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
325 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
327 mutex_init(&ps->ppu_mutex);
328 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
329 init_timer(&ps->ppu_timer);
330 ps->ppu_timer.data = (unsigned long)ps;
331 ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
334 int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
338 ret = mv88e6xxx_ppu_access_get(ds);
340 ret = mv88e6xxx_reg_read(ds, addr, regnum);
341 mv88e6xxx_ppu_access_put(ds);
347 int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
352 ret = mv88e6xxx_ppu_access_get(ds);
354 ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
355 mv88e6xxx_ppu_access_put(ds);
362 static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
364 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
367 case PORT_SWITCH_ID_6031:
368 case PORT_SWITCH_ID_6061:
369 case PORT_SWITCH_ID_6035:
370 case PORT_SWITCH_ID_6065:
376 static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
378 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
381 case PORT_SWITCH_ID_6092:
382 case PORT_SWITCH_ID_6095:
388 static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
390 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
393 case PORT_SWITCH_ID_6046:
394 case PORT_SWITCH_ID_6085:
395 case PORT_SWITCH_ID_6096:
396 case PORT_SWITCH_ID_6097:
402 static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
404 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
407 case PORT_SWITCH_ID_6123:
408 case PORT_SWITCH_ID_6161:
409 case PORT_SWITCH_ID_6165:
415 static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
417 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
420 case PORT_SWITCH_ID_6121:
421 case PORT_SWITCH_ID_6122:
422 case PORT_SWITCH_ID_6152:
423 case PORT_SWITCH_ID_6155:
424 case PORT_SWITCH_ID_6182:
425 case PORT_SWITCH_ID_6185:
426 case PORT_SWITCH_ID_6108:
427 case PORT_SWITCH_ID_6131:
433 static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
435 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
438 case PORT_SWITCH_ID_6320:
439 case PORT_SWITCH_ID_6321:
445 static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
447 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
450 case PORT_SWITCH_ID_6171:
451 case PORT_SWITCH_ID_6175:
452 case PORT_SWITCH_ID_6350:
453 case PORT_SWITCH_ID_6351:
459 static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
461 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
464 case PORT_SWITCH_ID_6172:
465 case PORT_SWITCH_ID_6176:
466 case PORT_SWITCH_ID_6240:
467 case PORT_SWITCH_ID_6352:
473 /* We expect the switch to perform auto negotiation if there is a real
474 * phy. However, in the case of a fixed link phy, we force the port
475 * settings from the fixed link settings.
477 void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
478 struct phy_device *phydev)
480 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
484 if (!phy_is_pseudo_fixed_link(phydev))
487 mutex_lock(&ps->smi_mutex);
489 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
493 reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
494 PORT_PCS_CTRL_FORCE_LINK |
495 PORT_PCS_CTRL_DUPLEX_FULL |
496 PORT_PCS_CTRL_FORCE_DUPLEX |
497 PORT_PCS_CTRL_UNFORCED);
499 reg |= PORT_PCS_CTRL_FORCE_LINK;
501 reg |= PORT_PCS_CTRL_LINK_UP;
503 if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100)
506 switch (phydev->speed) {
508 reg |= PORT_PCS_CTRL_1000;
511 reg |= PORT_PCS_CTRL_100;
514 reg |= PORT_PCS_CTRL_10;
517 pr_info("Unknown speed");
521 reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
522 if (phydev->duplex == DUPLEX_FULL)
523 reg |= PORT_PCS_CTRL_DUPLEX_FULL;
525 if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) &&
526 (port >= ps->num_ports - 2)) {
527 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
528 reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
529 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
530 reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
531 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
532 reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
533 PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
535 _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg);
538 mutex_unlock(&ps->smi_mutex);
541 /* Must be called with SMI mutex held */
542 static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
547 for (i = 0; i < 10; i++) {
548 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
549 if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
556 /* Must be called with SMI mutex held */
557 static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
561 if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
562 port = (port + 1) << 5;
564 /* Snapshot the hardware statistics counters for this port. */
565 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
566 GLOBAL_STATS_OP_CAPTURE_PORT |
567 GLOBAL_STATS_OP_HIST_RX_TX | port);
571 /* Wait for the snapshotting to complete. */
572 ret = _mv88e6xxx_stats_wait(ds);
579 /* Must be called with SMI mutex held */
580 static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
587 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
588 GLOBAL_STATS_OP_READ_CAPTURED |
589 GLOBAL_STATS_OP_HIST_RX_TX | stat);
593 ret = _mv88e6xxx_stats_wait(ds);
597 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
603 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
610 static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
611 { "in_good_octets", 8, 0x00, },
612 { "in_bad_octets", 4, 0x02, },
613 { "in_unicast", 4, 0x04, },
614 { "in_broadcasts", 4, 0x06, },
615 { "in_multicasts", 4, 0x07, },
616 { "in_pause", 4, 0x16, },
617 { "in_undersize", 4, 0x18, },
618 { "in_fragments", 4, 0x19, },
619 { "in_oversize", 4, 0x1a, },
620 { "in_jabber", 4, 0x1b, },
621 { "in_rx_error", 4, 0x1c, },
622 { "in_fcs_error", 4, 0x1d, },
623 { "out_octets", 8, 0x0e, },
624 { "out_unicast", 4, 0x10, },
625 { "out_broadcasts", 4, 0x13, },
626 { "out_multicasts", 4, 0x12, },
627 { "out_pause", 4, 0x15, },
628 { "excessive", 4, 0x11, },
629 { "collisions", 4, 0x1e, },
630 { "deferred", 4, 0x05, },
631 { "single", 4, 0x14, },
632 { "multiple", 4, 0x17, },
633 { "out_fcs_error", 4, 0x03, },
634 { "late", 4, 0x1f, },
635 { "hist_64bytes", 4, 0x08, },
636 { "hist_65_127bytes", 4, 0x09, },
637 { "hist_128_255bytes", 4, 0x0a, },
638 { "hist_256_511bytes", 4, 0x0b, },
639 { "hist_512_1023bytes", 4, 0x0c, },
640 { "hist_1024_max_bytes", 4, 0x0d, },
641 /* Not all devices have the following counters */
642 { "sw_in_discards", 4, 0x110, },
643 { "sw_in_filtered", 2, 0x112, },
644 { "sw_out_filtered", 2, 0x113, },
648 static bool have_sw_in_discards(struct dsa_switch *ds)
650 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
653 case PORT_SWITCH_ID_6095: case PORT_SWITCH_ID_6161:
654 case PORT_SWITCH_ID_6165: case PORT_SWITCH_ID_6171:
655 case PORT_SWITCH_ID_6172: case PORT_SWITCH_ID_6176:
656 case PORT_SWITCH_ID_6182: case PORT_SWITCH_ID_6185:
657 case PORT_SWITCH_ID_6352:
664 static void _mv88e6xxx_get_strings(struct dsa_switch *ds,
666 struct mv88e6xxx_hw_stat *stats,
667 int port, uint8_t *data)
671 for (i = 0; i < nr_stats; i++) {
672 memcpy(data + i * ETH_GSTRING_LEN,
673 stats[i].string, ETH_GSTRING_LEN);
677 static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
679 struct mv88e6xxx_hw_stat *stats,
682 struct mv88e6xxx_hw_stat *s = stats + stat;
688 if (s->reg >= 0x100) {
689 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
695 if (s->sizeof_stat == 4) {
696 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
703 _mv88e6xxx_stats_read(ds, s->reg, &low);
704 if (s->sizeof_stat == 8)
705 _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
707 value = (((u64)high) << 16) | low;
711 static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
713 struct mv88e6xxx_hw_stat *stats,
714 int port, uint64_t *data)
716 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
720 mutex_lock(&ps->smi_mutex);
722 ret = _mv88e6xxx_stats_snapshot(ds, port);
724 mutex_unlock(&ps->smi_mutex);
728 /* Read each of the counters. */
729 for (i = 0; i < nr_stats; i++)
730 data[i] = _mv88e6xxx_get_ethtool_stat(ds, i, stats, port);
732 mutex_unlock(&ps->smi_mutex);
735 /* All the statistics in the table */
737 mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
739 if (have_sw_in_discards(ds))
740 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
741 mv88e6xxx_hw_stats, port, data);
743 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
744 mv88e6xxx_hw_stats, port, data);
747 int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
749 if (have_sw_in_discards(ds))
750 return ARRAY_SIZE(mv88e6xxx_hw_stats);
751 return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
755 mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
756 int port, uint64_t *data)
758 if (have_sw_in_discards(ds))
759 _mv88e6xxx_get_ethtool_stats(
760 ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
761 mv88e6xxx_hw_stats, port, data);
763 _mv88e6xxx_get_ethtool_stats(
764 ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
765 mv88e6xxx_hw_stats, port, data);
768 int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
770 return 32 * sizeof(u16);
773 void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
774 struct ethtool_regs *regs, void *_p)
781 memset(p, 0xff, 32 * sizeof(u16));
783 for (i = 0; i < 32; i++) {
786 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
792 /* Must be called with SMI lock held */
793 static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
796 unsigned long timeout = jiffies + HZ / 10;
798 while (time_before(jiffies, timeout)) {
801 ret = _mv88e6xxx_reg_read(ds, reg, offset);
807 usleep_range(1000, 2000);
812 static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
814 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
817 mutex_lock(&ps->smi_mutex);
818 ret = _mv88e6xxx_wait(ds, reg, offset, mask);
819 mutex_unlock(&ps->smi_mutex);
824 static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
826 return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
827 GLOBAL2_SMI_OP_BUSY);
830 int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
832 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
833 GLOBAL2_EEPROM_OP_LOAD);
836 int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
838 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
839 GLOBAL2_EEPROM_OP_BUSY);
842 /* Must be called with SMI lock held */
843 static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
845 return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
849 /* Must be called with SMI mutex held */
850 static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
855 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
856 GLOBAL2_SMI_OP_22_READ | (addr << 5) |
861 ret = _mv88e6xxx_phy_wait(ds);
865 return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
868 /* Must be called with SMI mutex held */
869 static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
874 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
878 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
879 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
882 return _mv88e6xxx_phy_wait(ds);
885 int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
887 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
890 mutex_lock(&ps->smi_mutex);
892 reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
896 e->eee_enabled = !!(reg & 0x0200);
897 e->tx_lpi_enabled = !!(reg & 0x0100);
899 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
903 e->eee_active = !!(reg & PORT_STATUS_EEE);
907 mutex_unlock(&ps->smi_mutex);
911 int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
912 struct phy_device *phydev, struct ethtool_eee *e)
914 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
918 mutex_lock(&ps->smi_mutex);
920 ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
927 if (e->tx_lpi_enabled)
930 ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
932 mutex_unlock(&ps->smi_mutex);
937 static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 cmd)
941 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
945 return _mv88e6xxx_atu_wait(ds);
948 static int _mv88e6xxx_atu_data_write(struct dsa_switch *ds,
949 struct mv88e6xxx_atu_entry *entry)
951 u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
953 if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
954 unsigned int mask, shift;
957 data |= GLOBAL_ATU_DATA_TRUNK;
958 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
959 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
961 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
962 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
965 data |= (entry->portv_trunkid << shift) & mask;
968 return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, data);
971 static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds,
972 struct mv88e6xxx_atu_entry *entry,
978 err = _mv88e6xxx_atu_wait(ds);
982 err = _mv88e6xxx_atu_data_write(ds, entry);
987 err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID,
992 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
993 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
995 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL :
996 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
999 return _mv88e6xxx_atu_cmd(ds, op);
1002 static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too)
1004 struct mv88e6xxx_atu_entry entry = {
1006 .state = 0, /* EntryState bits must be 0 */
1009 return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
1012 static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port,
1013 int to_port, bool static_too)
1015 struct mv88e6xxx_atu_entry entry = {
1020 /* EntryState bits must be 0xF */
1021 entry.state = GLOBAL_ATU_DATA_STATE_MASK;
1023 /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
1024 entry.portv_trunkid = (to_port & 0x0f) << 4;
1025 entry.portv_trunkid |= from_port & 0x0f;
1027 return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
1030 static int _mv88e6xxx_atu_remove(struct dsa_switch *ds, u16 fid, int port,
1033 /* Destination port 0xF means remove the entries */
1034 return _mv88e6xxx_atu_move(ds, fid, port, 0x0f, static_too);
1037 static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
1039 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1043 mutex_lock(&ps->smi_mutex);
1045 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
1051 oldstate = reg & PORT_CONTROL_STATE_MASK;
1052 if (oldstate != state) {
1053 /* Flush forwarding database if we're moving a port
1054 * from Learning or Forwarding state to Disabled or
1055 * Blocking or Listening state.
1057 if (oldstate >= PORT_CONTROL_STATE_LEARNING &&
1058 state <= PORT_CONTROL_STATE_BLOCKING) {
1059 ret = _mv88e6xxx_atu_remove(ds, 0, port, false);
1063 reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
1064 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
1069 mutex_unlock(&ps->smi_mutex);
1073 static int _mv88e6xxx_port_vlan_map_set(struct dsa_switch *ds, int port,
1076 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1077 const u16 mask = (1 << ps->num_ports) - 1;
1080 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
1085 reg |= output_ports & mask;
1087 return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
1090 int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
1092 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1096 case BR_STATE_DISABLED:
1097 stp_state = PORT_CONTROL_STATE_DISABLED;
1099 case BR_STATE_BLOCKING:
1100 case BR_STATE_LISTENING:
1101 stp_state = PORT_CONTROL_STATE_BLOCKING;
1103 case BR_STATE_LEARNING:
1104 stp_state = PORT_CONTROL_STATE_LEARNING;
1106 case BR_STATE_FORWARDING:
1108 stp_state = PORT_CONTROL_STATE_FORWARDING;
1112 netdev_dbg(ds->ports[port], "port state %d [%d]\n", state, stp_state);
1114 /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
1115 * so we can not update the port state directly but need to schedule it.
1117 ps->port_state[port] = stp_state;
1118 set_bit(port, &ps->port_state_update_mask);
1119 schedule_work(&ps->bridge_work);
1124 int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
1128 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
1132 *pvid = ret & PORT_DEFAULT_VLAN_MASK;
1137 int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
1139 return mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
1140 pvid & PORT_DEFAULT_VLAN_MASK);
1143 static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
1145 return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
1146 GLOBAL_VTU_OP_BUSY);
1149 static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
1153 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
1157 return _mv88e6xxx_vtu_wait(ds);
1160 static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
1164 ret = _mv88e6xxx_vtu_wait(ds);
1168 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
1171 static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
1172 struct mv88e6xxx_vtu_stu_entry *entry,
1173 unsigned int nibble_offset)
1175 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1180 for (i = 0; i < 3; ++i) {
1181 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1182 GLOBAL_VTU_DATA_0_3 + i);
1189 for (i = 0; i < ps->num_ports; ++i) {
1190 unsigned int shift = (i % 4) * 4 + nibble_offset;
1191 u16 reg = regs[i / 4];
1193 entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
1199 static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
1200 struct mv88e6xxx_vtu_stu_entry *entry,
1201 unsigned int nibble_offset)
1203 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1204 u16 regs[3] = { 0 };
1208 for (i = 0; i < ps->num_ports; ++i) {
1209 unsigned int shift = (i % 4) * 4 + nibble_offset;
1210 u8 data = entry->data[i];
1212 regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
1215 for (i = 0; i < 3; ++i) {
1216 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
1217 GLOBAL_VTU_DATA_0_3 + i, regs[i]);
1225 static int _mv88e6xxx_vtu_vid_write(struct dsa_switch *ds, u16 vid)
1227 return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
1228 vid & GLOBAL_VTU_VID_MASK);
1231 static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
1232 struct mv88e6xxx_vtu_stu_entry *entry)
1234 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1237 ret = _mv88e6xxx_vtu_wait(ds);
1241 ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
1245 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
1249 next.vid = ret & GLOBAL_VTU_VID_MASK;
1250 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1253 ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
1257 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1258 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1259 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1264 next.fid = ret & GLOBAL_VTU_FID_MASK;
1266 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1271 next.sid = ret & GLOBAL_VTU_SID_MASK;
1279 static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
1280 struct mv88e6xxx_vtu_stu_entry *entry)
1285 ret = _mv88e6xxx_vtu_wait(ds);
1292 /* Write port member tags */
1293 ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
1297 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1298 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1299 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1300 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1304 reg = entry->fid & GLOBAL_VTU_FID_MASK;
1305 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
1310 reg = GLOBAL_VTU_VID_VALID;
1312 reg |= entry->vid & GLOBAL_VTU_VID_MASK;
1313 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1317 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
1320 static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
1321 struct mv88e6xxx_vtu_stu_entry *entry)
1323 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1326 ret = _mv88e6xxx_vtu_wait(ds);
1330 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
1331 sid & GLOBAL_VTU_SID_MASK);
1335 ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
1339 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
1343 next.sid = ret & GLOBAL_VTU_SID_MASK;
1345 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
1349 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1352 ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
1361 static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
1362 struct mv88e6xxx_vtu_stu_entry *entry)
1367 ret = _mv88e6xxx_vtu_wait(ds);
1374 /* Write port states */
1375 ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
1379 reg = GLOBAL_VTU_VID_VALID;
1381 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1385 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1386 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1390 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
1393 static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
1394 struct mv88e6xxx_vtu_stu_entry *entry)
1396 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1397 struct mv88e6xxx_vtu_stu_entry vlan = {
1400 .fid = vid, /* We use one FID per VLAN */
1404 /* exclude all ports except the CPU */
1405 for (i = 0; i < ps->num_ports; ++i)
1406 vlan.data[i] = dsa_is_cpu_port(ds, i) ?
1407 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED :
1408 GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1410 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1411 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1412 struct mv88e6xxx_vtu_stu_entry vstp;
1415 /* Adding a VTU entry requires a valid STU entry. As VSTP is not
1416 * implemented, only one STU entry is needed to cover all VTU
1417 * entries. Thus, validate the SID 0.
1420 err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
1424 if (vstp.sid != vlan.sid || !vstp.valid) {
1425 memset(&vstp, 0, sizeof(vstp));
1427 vstp.sid = vlan.sid;
1429 err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
1434 /* Clear all MAC addresses from the new database */
1435 err = _mv88e6xxx_atu_flush(ds, vlan.fid, true);
1444 int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
1447 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1448 struct mv88e6xxx_vtu_stu_entry vlan;
1451 mutex_lock(&ps->smi_mutex);
1453 err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
1457 err = _mv88e6xxx_vtu_getnext(ds, &vlan);
1461 if (vlan.vid != vid || !vlan.valid) {
1462 err = _mv88e6xxx_vlan_init(ds, vid, &vlan);
1467 vlan.data[port] = untagged ?
1468 GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
1469 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
1471 err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
1473 mutex_unlock(&ps->smi_mutex);
1478 int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
1480 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1481 struct mv88e6xxx_vtu_stu_entry vlan;
1484 mutex_lock(&ps->smi_mutex);
1486 err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
1490 err = _mv88e6xxx_vtu_getnext(ds, &vlan);
1494 if (vlan.vid != vid || !vlan.valid ||
1495 vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
1500 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1502 /* keep the VLAN unless all ports are excluded */
1504 for (i = 0; i < ps->num_ports; ++i) {
1505 if (dsa_is_cpu_port(ds, i))
1508 if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
1514 err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
1518 err = _mv88e6xxx_atu_remove(ds, vlan.fid, port, false);
1520 mutex_unlock(&ps->smi_mutex);
1525 int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
1526 unsigned long *ports, unsigned long *untagged)
1528 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1529 struct mv88e6xxx_vtu_stu_entry next;
1536 mutex_lock(&ps->smi_mutex);
1537 err = _mv88e6xxx_vtu_vid_write(ds, *vid);
1541 err = _mv88e6xxx_vtu_getnext(ds, &next);
1543 mutex_unlock(&ps->smi_mutex);
1553 for (port = 0; port < ps->num_ports; ++port) {
1554 clear_bit(port, ports);
1555 clear_bit(port, untagged);
1557 if (dsa_is_cpu_port(ds, port))
1560 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED ||
1561 next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
1562 set_bit(port, ports);
1564 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
1565 set_bit(port, untagged);
1571 static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
1572 const unsigned char *addr)
1576 for (i = 0; i < 3; i++) {
1577 ret = _mv88e6xxx_reg_write(
1578 ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
1579 (addr[i * 2] << 8) | addr[i * 2 + 1]);
1587 static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
1591 for (i = 0; i < 3; i++) {
1592 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1593 GLOBAL_ATU_MAC_01 + i);
1596 addr[i * 2] = ret >> 8;
1597 addr[i * 2 + 1] = ret & 0xff;
1603 static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
1604 struct mv88e6xxx_atu_entry *entry)
1608 ret = _mv88e6xxx_atu_wait(ds);
1612 ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
1616 ret = _mv88e6xxx_atu_data_write(ds, entry);
1620 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, entry->fid);
1624 return _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_LOAD_DB);
1627 static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
1628 const unsigned char *addr, u16 vid,
1631 struct mv88e6xxx_atu_entry entry = { 0 };
1633 entry.fid = vid; /* We use one FID per VLAN */
1634 entry.state = state;
1635 ether_addr_copy(entry.mac, addr);
1636 if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1637 entry.trunk = false;
1638 entry.portv_trunkid = BIT(port);
1641 return _mv88e6xxx_atu_load(ds, &entry);
1644 int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
1645 const struct switchdev_obj_port_fdb *fdb,
1646 struct switchdev_trans *trans)
1648 /* We don't use per-port FDB */
1652 /* We don't need any dynamic resource from the kernel (yet),
1653 * so skip the prepare phase.
1658 int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
1659 const struct switchdev_obj_port_fdb *fdb,
1660 struct switchdev_trans *trans)
1662 int state = is_multicast_ether_addr(fdb->addr) ?
1663 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1664 GLOBAL_ATU_DATA_STATE_UC_STATIC;
1665 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1668 mutex_lock(&ps->smi_mutex);
1669 ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid, state);
1670 mutex_unlock(&ps->smi_mutex);
1675 int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
1676 const struct switchdev_obj_port_fdb *fdb)
1678 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1681 mutex_lock(&ps->smi_mutex);
1682 ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid,
1683 GLOBAL_ATU_DATA_STATE_UNUSED);
1684 mutex_unlock(&ps->smi_mutex);
1689 static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
1690 struct mv88e6xxx_atu_entry *entry)
1692 struct mv88e6xxx_atu_entry next = { 0 };
1697 ret = _mv88e6xxx_atu_wait(ds);
1701 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
1705 ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB);
1709 ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
1713 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
1717 next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
1718 if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1719 unsigned int mask, shift;
1721 if (ret & GLOBAL_ATU_DATA_TRUNK) {
1723 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
1724 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
1727 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
1728 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1731 next.portv_trunkid = (ret & mask) >> shift;
1738 int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
1739 struct switchdev_obj_port_fdb *fdb,
1740 int (*cb)(struct switchdev_obj *obj))
1742 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1743 struct mv88e6xxx_vtu_stu_entry vlan = {
1744 .vid = GLOBAL_VTU_VID_MASK, /* all ones */
1748 mutex_lock(&ps->smi_mutex);
1750 err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid);
1755 struct mv88e6xxx_atu_entry addr = {
1756 .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
1759 err = _mv88e6xxx_vtu_getnext(ds, &vlan);
1766 err = _mv88e6xxx_atu_mac_write(ds, addr.mac);
1771 err = _mv88e6xxx_atu_getnext(ds, vlan.fid, &addr);
1775 if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
1778 if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
1779 bool is_static = addr.state ==
1780 (is_multicast_ether_addr(addr.mac) ?
1781 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1782 GLOBAL_ATU_DATA_STATE_UC_STATIC);
1784 fdb->vid = vlan.vid;
1785 ether_addr_copy(fdb->addr, addr.mac);
1786 fdb->ndm_state = is_static ? NUD_NOARP :
1789 err = cb(&fdb->obj);
1793 } while (!is_broadcast_ether_addr(addr.mac));
1795 } while (vlan.vid < GLOBAL_VTU_VID_MASK);
1798 mutex_unlock(&ps->smi_mutex);
1803 static void mv88e6xxx_bridge_work(struct work_struct *work)
1805 struct mv88e6xxx_priv_state *ps;
1806 struct dsa_switch *ds;
1809 ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
1810 ds = ((struct dsa_switch *)ps) - 1;
1812 while (ps->port_state_update_mask) {
1813 port = __ffs(ps->port_state_update_mask);
1814 clear_bit(port, &ps->port_state_update_mask);
1815 mv88e6xxx_set_port_state(ds, port, ps->port_state[port]);
1819 static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
1821 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1825 mutex_lock(&ps->smi_mutex);
1827 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1828 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1829 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
1830 mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
1831 /* MAC Forcing register: don't force link, speed,
1832 * duplex or flow control state to any particular
1833 * values on physical ports, but force the CPU port
1834 * and all DSA ports to their maximum bandwidth and
1837 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
1838 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
1839 reg &= ~PORT_PCS_CTRL_UNFORCED;
1840 reg |= PORT_PCS_CTRL_FORCE_LINK |
1841 PORT_PCS_CTRL_LINK_UP |
1842 PORT_PCS_CTRL_DUPLEX_FULL |
1843 PORT_PCS_CTRL_FORCE_DUPLEX;
1844 if (mv88e6xxx_6065_family(ds))
1845 reg |= PORT_PCS_CTRL_100;
1847 reg |= PORT_PCS_CTRL_1000;
1849 reg |= PORT_PCS_CTRL_UNFORCED;
1852 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1853 PORT_PCS_CTRL, reg);
1858 /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
1859 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
1860 * tunneling, determine priority by looking at 802.1p and IP
1861 * priority fields (IP prio has precedence), and set STP state
1864 * If this is the CPU link, use DSA or EDSA tagging depending
1865 * on which tagging mode was configured.
1867 * If this is a link to another switch, use DSA tagging mode.
1869 * If this is the upstream port for this switch, enable
1870 * forwarding of unknown unicasts and multicasts.
1873 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1874 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1875 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
1876 mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
1877 reg = PORT_CONTROL_IGMP_MLD_SNOOP |
1878 PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
1879 PORT_CONTROL_STATE_FORWARDING;
1880 if (dsa_is_cpu_port(ds, port)) {
1881 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
1882 reg |= PORT_CONTROL_DSA_TAG;
1883 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1884 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1885 mv88e6xxx_6320_family(ds)) {
1886 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
1887 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
1889 reg |= PORT_CONTROL_FRAME_MODE_DSA;
1890 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
1891 PORT_CONTROL_FORWARD_UNKNOWN_MC;
1894 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1895 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1896 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
1897 mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
1898 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
1899 reg |= PORT_CONTROL_EGRESS_ADD_TAG;
1902 if (dsa_is_dsa_port(ds, port)) {
1903 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
1904 reg |= PORT_CONTROL_DSA_TAG;
1905 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1906 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1907 mv88e6xxx_6320_family(ds)) {
1908 reg |= PORT_CONTROL_FRAME_MODE_DSA;
1911 if (port == dsa_upstream_port(ds))
1912 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
1913 PORT_CONTROL_FORWARD_UNKNOWN_MC;
1916 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1922 /* Port Control 2: don't force a good FCS, set the maximum frame size to
1923 * 10240 bytes, enable secure 802.1q tags, don't discard tagged or
1924 * untagged frames on this port, do a destination address lookup on all
1925 * received packets as usual, disable ARP mirroring and don't send a
1926 * copy of all transmitted/received frames on this port to the CPU.
1929 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1930 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1931 mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
1932 reg = PORT_CONTROL_2_MAP_DA;
1934 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1935 mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
1936 reg |= PORT_CONTROL_2_JUMBO_10240;
1938 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
1939 /* Set the upstream port this port should use */
1940 reg |= dsa_upstream_port(ds);
1941 /* enable forwarding of unknown multicast addresses to
1944 if (port == dsa_upstream_port(ds))
1945 reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
1948 reg |= PORT_CONTROL_2_8021Q_SECURE;
1951 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1952 PORT_CONTROL_2, reg);
1957 /* Port Association Vector: when learning source addresses
1958 * of packets, add the address to the address database using
1959 * a port bitmap that has only the bit for this port set and
1960 * the other bits clear.
1962 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR,
1967 /* Egress rate control 2: disable egress rate control. */
1968 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
1973 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1974 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1975 mv88e6xxx_6320_family(ds)) {
1976 /* Do not limit the period of time that this port can
1977 * be paused for by the remote end or the period of
1978 * time that this port can pause the remote end.
1980 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1981 PORT_PAUSE_CTRL, 0x0000);
1985 /* Port ATU control: disable limiting the number of
1986 * address database entries that this port is allowed
1989 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1990 PORT_ATU_CONTROL, 0x0000);
1991 /* Priority Override: disable DA, SA and VTU priority
1994 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1995 PORT_PRI_OVERRIDE, 0x0000);
1999 /* Port Ethertype: use the Ethertype DSA Ethertype
2002 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2003 PORT_ETH_TYPE, ETH_P_EDSA);
2006 /* Tag Remap: use an identity 802.1p prio -> switch
2009 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2010 PORT_TAG_REGMAP_0123, 0x3210);
2014 /* Tag Remap 2: use an identity 802.1p prio -> switch
2017 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2018 PORT_TAG_REGMAP_4567, 0x7654);
2023 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2024 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2025 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2026 mv88e6xxx_6320_family(ds)) {
2027 /* Rate Control: disable ingress rate limiting. */
2028 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2029 PORT_RATE_CONTROL, 0x0001);
2034 /* Port Control 1: disable trunking, disable sending
2035 * learning messages to this port.
2037 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
2041 /* Port based VLAN map: do not give each port its own address
2042 * database, and allow every port to egress frames on all other ports.
2044 reg = BIT(ps->num_ports) - 1; /* all ports */
2045 ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg & ~port);
2049 /* Default VLAN ID and priority: don't set a default VLAN
2050 * ID, and set the default packet priority to zero.
2052 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
2055 mutex_unlock(&ps->smi_mutex);
2059 int mv88e6xxx_setup_ports(struct dsa_switch *ds)
2061 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2065 for (i = 0; i < ps->num_ports; i++) {
2066 ret = mv88e6xxx_setup_port(ds, i);
2073 int mv88e6xxx_setup_common(struct dsa_switch *ds)
2075 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2077 mutex_init(&ps->smi_mutex);
2079 ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
2081 INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
2086 int mv88e6xxx_setup_global(struct dsa_switch *ds)
2088 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2092 /* Set the default address aging time to 5 minutes, and
2093 * enable address learn messages to be sent to all message
2096 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
2097 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
2099 /* Configure the IP ToS mapping registers. */
2100 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
2101 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
2102 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
2103 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
2104 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
2105 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
2106 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
2107 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
2109 /* Configure the IEEE 802.1p priority mapping register. */
2110 REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
2112 /* Send all frames with destination addresses matching
2113 * 01:80:c2:00:00:0x to the CPU port.
2115 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
2117 /* Ignore removed tag data on doubly tagged packets, disable
2118 * flow control messages, force flow control priority to the
2119 * highest, and send all special multicast frames to the CPU
2120 * port at the highest priority.
2122 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
2123 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
2124 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
2126 /* Program the DSA routing table. */
2127 for (i = 0; i < 32; i++) {
2130 if (ds->pd->rtable &&
2131 i != ds->index && i < ds->dst->pd->nr_chips)
2132 nexthop = ds->pd->rtable[i] & 0x1f;
2134 REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
2135 GLOBAL2_DEVICE_MAPPING_UPDATE |
2136 (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
2140 /* Clear all trunk masks. */
2141 for (i = 0; i < 8; i++)
2142 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
2143 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
2144 ((1 << ps->num_ports) - 1));
2146 /* Clear all trunk mappings. */
2147 for (i = 0; i < 16; i++)
2148 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
2149 GLOBAL2_TRUNK_MAPPING_UPDATE |
2150 (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
2152 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2153 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2154 mv88e6xxx_6320_family(ds)) {
2155 /* Send all frames with destination addresses matching
2156 * 01:80:c2:00:00:2x to the CPU port.
2158 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
2160 /* Initialise cross-chip port VLAN table to reset
2163 REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
2165 /* Clear the priority override table. */
2166 for (i = 0; i < 16; i++)
2167 REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
2171 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2172 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2173 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2174 mv88e6xxx_6320_family(ds)) {
2175 /* Disable ingress rate limiting by resetting all
2176 * ingress rate limit registers to their initial
2179 for (i = 0; i < ps->num_ports; i++)
2180 REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
2184 /* Clear the statistics counters for all ports */
2185 REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
2187 /* Wait for the flush to complete. */
2188 mutex_lock(&ps->smi_mutex);
2189 ret = _mv88e6xxx_stats_wait(ds);
2193 /* Clear all ATU entries */
2194 ret = _mv88e6xxx_atu_flush(ds, 0, true);
2198 /* Clear all the VTU and STU entries */
2199 ret = _mv88e6xxx_vtu_stu_flush(ds);
2201 mutex_unlock(&ps->smi_mutex);
2206 int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
2208 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2209 u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
2210 unsigned long timeout;
2214 /* Set all ports to the disabled state. */
2215 for (i = 0; i < ps->num_ports; i++) {
2216 ret = REG_READ(REG_PORT(i), PORT_CONTROL);
2217 REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
2220 /* Wait for transmit queues to drain. */
2221 usleep_range(2000, 4000);
2223 /* Reset the switch. Keep the PPU active if requested. The PPU
2224 * needs to be active to support indirect phy register access
2225 * through global registers 0x18 and 0x19.
2228 REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
2230 REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
2232 /* Wait up to one second for reset to complete. */
2233 timeout = jiffies + 1 * HZ;
2234 while (time_before(jiffies, timeout)) {
2235 ret = REG_READ(REG_GLOBAL, 0x00);
2236 if ((ret & is_reset) == is_reset)
2238 usleep_range(1000, 2000);
2240 if (time_after(jiffies, timeout))
2246 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
2248 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2251 mutex_lock(&ps->smi_mutex);
2252 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
2255 ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
2257 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2258 mutex_unlock(&ps->smi_mutex);
2262 int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
2265 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2268 mutex_lock(&ps->smi_mutex);
2269 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
2273 ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
2275 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2276 mutex_unlock(&ps->smi_mutex);
2280 static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
2282 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2284 if (port >= 0 && port < ps->num_ports)
2290 mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
2292 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2293 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2299 mutex_lock(&ps->smi_mutex);
2300 ret = _mv88e6xxx_phy_read(ds, addr, regnum);
2301 mutex_unlock(&ps->smi_mutex);
2306 mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
2308 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2309 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2315 mutex_lock(&ps->smi_mutex);
2316 ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
2317 mutex_unlock(&ps->smi_mutex);
2322 mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
2324 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2325 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2331 mutex_lock(&ps->smi_mutex);
2332 ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
2333 mutex_unlock(&ps->smi_mutex);
2338 mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
2341 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2342 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2348 mutex_lock(&ps->smi_mutex);
2349 ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
2350 mutex_unlock(&ps->smi_mutex);
2354 #ifdef CONFIG_NET_DSA_HWMON
2356 static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
2358 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2364 mutex_lock(&ps->smi_mutex);
2366 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
2370 /* Enable temperature sensor */
2371 ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2375 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
2379 /* Wait for temperature to stabilize */
2380 usleep_range(10000, 12000);
2382 val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2388 /* Disable temperature sensor */
2389 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
2393 *temp = ((val & 0x1f) - 5) * 5;
2396 _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
2397 mutex_unlock(&ps->smi_mutex);
2401 static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
2403 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2408 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
2412 *temp = (ret & 0xff) - 25;
2417 int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
2419 if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
2420 return mv88e63xx_get_temp(ds, temp);
2422 return mv88e61xx_get_temp(ds, temp);
2425 int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
2427 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2430 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2435 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2439 *temp = (((ret >> 8) & 0x1f) * 5) - 25;
2444 int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
2446 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2449 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2452 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2455 temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
2456 return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
2457 (ret & 0xe0ff) | (temp << 8));
2460 int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
2462 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2465 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2470 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2474 *alarm = !!(ret & 0x40);
2478 #endif /* CONFIG_NET_DSA_HWMON */
2480 static int __init mv88e6xxx_init(void)
2482 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2483 register_switch_driver(&mv88e6131_switch_driver);
2485 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2486 register_switch_driver(&mv88e6123_61_65_switch_driver);
2488 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2489 register_switch_driver(&mv88e6352_switch_driver);
2491 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2492 register_switch_driver(&mv88e6171_switch_driver);
2496 module_init(mv88e6xxx_init);
2498 static void __exit mv88e6xxx_cleanup(void)
2500 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2501 unregister_switch_driver(&mv88e6171_switch_driver);
2503 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2504 unregister_switch_driver(&mv88e6352_switch_driver);
2506 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2507 unregister_switch_driver(&mv88e6123_61_65_switch_driver);
2509 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2510 unregister_switch_driver(&mv88e6131_switch_driver);
2513 module_exit(mv88e6xxx_cleanup);
2515 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
2516 MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
2517 MODULE_LICENSE("GPL");