2 * Broadcom GENET (Gigabit Ethernet) controller driver
4 * Copyright (c) 2014 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) "bcmgenet: " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/fcntl.h>
18 #include <linux/interrupt.h>
19 #include <linux/string.h>
20 #include <linux/if_ether.h>
21 #include <linux/init.h>
22 #include <linux/errno.h>
23 #include <linux/delay.h>
24 #include <linux/platform_device.h>
25 #include <linux/dma-mapping.h>
27 #include <linux/clk.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/of_net.h>
32 #include <linux/of_platform.h>
35 #include <linux/mii.h>
36 #include <linux/ethtool.h>
37 #include <linux/netdevice.h>
38 #include <linux/inetdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
43 #include <linux/ipv6.h>
44 #include <linux/phy.h>
46 #include <asm/unaligned.h>
50 /* Maximum number of hardware queues, downsized if needed */
51 #define GENET_MAX_MQ_CNT 4
53 /* Default highest priority queue for multi queue support */
54 #define GENET_Q0_PRIORITY 0
56 #define GENET_DEFAULT_BD_CNT \
57 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt)
59 #define RX_BUF_LENGTH 2048
60 #define SKB_ALIGNMENT 32
62 /* Tx/Rx DMA register offset, skip 256 descriptors */
63 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
64 #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
66 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
67 TOTAL_DESC * DMA_DESC_SIZE)
69 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
70 TOTAL_DESC * DMA_DESC_SIZE)
72 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
73 void __iomem *d, u32 value)
75 __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
78 static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
81 return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
84 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
88 __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
90 /* Register writes to GISB bus can take couple hundred nanoseconds
91 * and are done for each packet, save these expensive writes unless
92 * the platform is explicitly configured for 64-bits/LPAE.
94 #ifdef CONFIG_PHYS_ADDR_T_64BIT
95 if (priv->hw_params->flags & GENET_HAS_40BITS)
96 __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
100 /* Combined address + length/status setter */
101 static inline void dmadesc_set(struct bcmgenet_priv *priv,
102 void __iomem *d, dma_addr_t addr, u32 val)
104 dmadesc_set_length_status(priv, d, val);
105 dmadesc_set_addr(priv, d, addr);
108 static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
113 addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
115 /* Register writes to GISB bus can take couple hundred nanoseconds
116 * and are done for each packet, save these expensive writes unless
117 * the platform is explicitly configured for 64-bits/LPAE.
119 #ifdef CONFIG_PHYS_ADDR_T_64BIT
120 if (priv->hw_params->flags & GENET_HAS_40BITS)
121 addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
126 #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
128 #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
131 static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
133 if (GENET_IS_V1(priv))
134 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
136 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
139 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
141 if (GENET_IS_V1(priv))
142 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
144 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
147 /* These macros are defined to deal with register map change
148 * between GENET1.1 and GENET2. Only those currently being used
149 * by driver are defined.
151 static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
153 if (GENET_IS_V1(priv))
154 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
156 return __raw_readl(priv->base +
157 priv->hw_params->tbuf_offset + TBUF_CTRL);
160 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
162 if (GENET_IS_V1(priv))
163 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
165 __raw_writel(val, priv->base +
166 priv->hw_params->tbuf_offset + TBUF_CTRL);
169 static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
171 if (GENET_IS_V1(priv))
172 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
174 return __raw_readl(priv->base +
175 priv->hw_params->tbuf_offset + TBUF_BP_MC);
178 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
180 if (GENET_IS_V1(priv))
181 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
183 __raw_writel(val, priv->base +
184 priv->hw_params->tbuf_offset + TBUF_BP_MC);
187 /* RX/TX DMA register accessors */
199 static const u8 bcmgenet_dma_regs_v3plus[] = {
200 [DMA_RING_CFG] = 0x00,
203 [DMA_SCB_BURST_SIZE] = 0x0C,
204 [DMA_ARB_CTRL] = 0x2C,
205 [DMA_PRIORITY_0] = 0x30,
206 [DMA_PRIORITY_1] = 0x34,
207 [DMA_PRIORITY_2] = 0x38,
210 static const u8 bcmgenet_dma_regs_v2[] = {
211 [DMA_RING_CFG] = 0x00,
214 [DMA_SCB_BURST_SIZE] = 0x0C,
215 [DMA_ARB_CTRL] = 0x30,
216 [DMA_PRIORITY_0] = 0x34,
217 [DMA_PRIORITY_1] = 0x38,
218 [DMA_PRIORITY_2] = 0x3C,
221 static const u8 bcmgenet_dma_regs_v1[] = {
224 [DMA_SCB_BURST_SIZE] = 0x0C,
225 [DMA_ARB_CTRL] = 0x30,
226 [DMA_PRIORITY_0] = 0x34,
227 [DMA_PRIORITY_1] = 0x38,
228 [DMA_PRIORITY_2] = 0x3C,
231 /* Set at runtime once bcmgenet version is known */
232 static const u8 *bcmgenet_dma_regs;
234 static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
236 return netdev_priv(dev_get_drvdata(dev));
239 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
242 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
243 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
246 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
247 u32 val, enum dma_reg r)
249 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
250 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
253 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
256 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
257 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
260 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
261 u32 val, enum dma_reg r)
263 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
264 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
267 /* RDMA/TDMA ring registers and accessors
268 * we merge the common fields and just prefix with T/D the registers
269 * having different meaning depending on the direction
273 RDMA_WRITE_PTR = TDMA_READ_PTR,
275 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
277 RDMA_PROD_INDEX = TDMA_CONS_INDEX,
279 RDMA_CONS_INDEX = TDMA_PROD_INDEX,
285 DMA_MBUF_DONE_THRESH,
287 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
289 RDMA_READ_PTR = TDMA_WRITE_PTR,
291 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
294 /* GENET v4 supports 40-bits pointer addressing
295 * for obvious reasons the LO and HI word parts
296 * are contiguous, but this offsets the other
299 static const u8 genet_dma_ring_regs_v4[] = {
300 [TDMA_READ_PTR] = 0x00,
301 [TDMA_READ_PTR_HI] = 0x04,
302 [TDMA_CONS_INDEX] = 0x08,
303 [TDMA_PROD_INDEX] = 0x0C,
304 [DMA_RING_BUF_SIZE] = 0x10,
305 [DMA_START_ADDR] = 0x14,
306 [DMA_START_ADDR_HI] = 0x18,
307 [DMA_END_ADDR] = 0x1C,
308 [DMA_END_ADDR_HI] = 0x20,
309 [DMA_MBUF_DONE_THRESH] = 0x24,
310 [TDMA_FLOW_PERIOD] = 0x28,
311 [TDMA_WRITE_PTR] = 0x2C,
312 [TDMA_WRITE_PTR_HI] = 0x30,
315 static const u8 genet_dma_ring_regs_v123[] = {
316 [TDMA_READ_PTR] = 0x00,
317 [TDMA_CONS_INDEX] = 0x04,
318 [TDMA_PROD_INDEX] = 0x08,
319 [DMA_RING_BUF_SIZE] = 0x0C,
320 [DMA_START_ADDR] = 0x10,
321 [DMA_END_ADDR] = 0x14,
322 [DMA_MBUF_DONE_THRESH] = 0x18,
323 [TDMA_FLOW_PERIOD] = 0x1C,
324 [TDMA_WRITE_PTR] = 0x20,
327 /* Set at runtime once GENET version is known */
328 static const u8 *genet_dma_ring_regs;
330 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
334 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
335 (DMA_RING_SIZE * ring) +
336 genet_dma_ring_regs[r]);
339 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
340 unsigned int ring, u32 val,
343 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
344 (DMA_RING_SIZE * ring) +
345 genet_dma_ring_regs[r]);
348 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
352 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
353 (DMA_RING_SIZE * ring) +
354 genet_dma_ring_regs[r]);
357 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
358 unsigned int ring, u32 val,
361 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
362 (DMA_RING_SIZE * ring) +
363 genet_dma_ring_regs[r]);
366 static int bcmgenet_get_settings(struct net_device *dev,
367 struct ethtool_cmd *cmd)
369 struct bcmgenet_priv *priv = netdev_priv(dev);
371 if (!netif_running(dev))
377 return phy_ethtool_gset(priv->phydev, cmd);
380 static int bcmgenet_set_settings(struct net_device *dev,
381 struct ethtool_cmd *cmd)
383 struct bcmgenet_priv *priv = netdev_priv(dev);
385 if (!netif_running(dev))
391 return phy_ethtool_sset(priv->phydev, cmd);
394 static int bcmgenet_set_rx_csum(struct net_device *dev,
395 netdev_features_t wanted)
397 struct bcmgenet_priv *priv = netdev_priv(dev);
401 rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
403 rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
405 /* enable rx checksumming */
407 rbuf_chk_ctrl |= RBUF_RXCHK_EN;
409 rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
410 priv->desc_rxchk_en = rx_csum_en;
412 /* If UniMAC forwards CRC, we need to skip over it to get
413 * a valid CHK bit to be set in the per-packet status word
415 if (rx_csum_en && priv->crc_fwd_en)
416 rbuf_chk_ctrl |= RBUF_SKIP_FCS;
418 rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
420 bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
425 static int bcmgenet_set_tx_csum(struct net_device *dev,
426 netdev_features_t wanted)
428 struct bcmgenet_priv *priv = netdev_priv(dev);
430 u32 tbuf_ctrl, rbuf_ctrl;
432 tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
433 rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
435 desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
437 /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
439 tbuf_ctrl |= RBUF_64B_EN;
440 rbuf_ctrl |= RBUF_64B_EN;
442 tbuf_ctrl &= ~RBUF_64B_EN;
443 rbuf_ctrl &= ~RBUF_64B_EN;
445 priv->desc_64b_en = desc_64b_en;
447 bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
448 bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
453 static int bcmgenet_set_features(struct net_device *dev,
454 netdev_features_t features)
456 netdev_features_t changed = features ^ dev->features;
457 netdev_features_t wanted = dev->wanted_features;
460 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
461 ret = bcmgenet_set_tx_csum(dev, wanted);
462 if (changed & (NETIF_F_RXCSUM))
463 ret = bcmgenet_set_rx_csum(dev, wanted);
468 static u32 bcmgenet_get_msglevel(struct net_device *dev)
470 struct bcmgenet_priv *priv = netdev_priv(dev);
472 return priv->msg_enable;
475 static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
477 struct bcmgenet_priv *priv = netdev_priv(dev);
479 priv->msg_enable = level;
482 /* standard ethtool support functions. */
483 enum bcmgenet_stat_type {
484 BCMGENET_STAT_NETDEV = -1,
485 BCMGENET_STAT_MIB_RX,
486 BCMGENET_STAT_MIB_TX,
491 struct bcmgenet_stats {
492 char stat_string[ETH_GSTRING_LEN];
495 enum bcmgenet_stat_type type;
496 /* reg offset from UMAC base for misc counters */
500 #define STAT_NETDEV(m) { \
501 .stat_string = __stringify(m), \
502 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
503 .stat_offset = offsetof(struct net_device_stats, m), \
504 .type = BCMGENET_STAT_NETDEV, \
507 #define STAT_GENET_MIB(str, m, _type) { \
508 .stat_string = str, \
509 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
510 .stat_offset = offsetof(struct bcmgenet_priv, m), \
514 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
515 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
516 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
518 #define STAT_GENET_MISC(str, m, offset) { \
519 .stat_string = str, \
520 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
521 .stat_offset = offsetof(struct bcmgenet_priv, m), \
522 .type = BCMGENET_STAT_MISC, \
523 .reg_offset = offset, \
527 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
528 * between the end of TX stats and the beginning of the RX RUNT
530 #define BCMGENET_STAT_OFFSET 0xc
532 /* Hardware counters must be kept in sync because the order/offset
533 * is important here (order in structure declaration = order in hardware)
535 static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
537 STAT_NETDEV(rx_packets),
538 STAT_NETDEV(tx_packets),
539 STAT_NETDEV(rx_bytes),
540 STAT_NETDEV(tx_bytes),
541 STAT_NETDEV(rx_errors),
542 STAT_NETDEV(tx_errors),
543 STAT_NETDEV(rx_dropped),
544 STAT_NETDEV(tx_dropped),
545 STAT_NETDEV(multicast),
546 /* UniMAC RSV counters */
547 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
548 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
549 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
550 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
551 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
552 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
553 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
554 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
555 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
556 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
557 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
558 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
559 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
560 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
561 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
562 STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
563 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
564 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
565 STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
566 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
567 STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
568 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
569 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
570 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
571 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
572 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
573 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
574 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
575 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
576 /* UniMAC TSV counters */
577 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
578 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
579 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
580 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
581 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
582 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
583 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
584 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
585 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
586 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
587 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
588 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
589 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
590 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
591 STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
592 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
593 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
594 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
595 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
596 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
597 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
598 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
599 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
600 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
601 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
602 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
603 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
604 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
605 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
606 /* UniMAC RUNT counters */
607 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
608 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
609 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
610 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
611 /* Misc UniMAC counters */
612 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
614 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
615 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
618 #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
620 static void bcmgenet_get_drvinfo(struct net_device *dev,
621 struct ethtool_drvinfo *info)
623 strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
624 strlcpy(info->version, "v2.0", sizeof(info->version));
625 info->n_stats = BCMGENET_STATS_LEN;
628 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
630 switch (string_set) {
632 return BCMGENET_STATS_LEN;
638 static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
645 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
646 memcpy(data + i * ETH_GSTRING_LEN,
647 bcmgenet_gstrings_stats[i].stat_string,
654 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
658 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
659 const struct bcmgenet_stats *s;
664 s = &bcmgenet_gstrings_stats[i];
666 case BCMGENET_STAT_NETDEV:
668 case BCMGENET_STAT_MIB_RX:
669 case BCMGENET_STAT_MIB_TX:
670 case BCMGENET_STAT_RUNT:
671 if (s->type != BCMGENET_STAT_MIB_RX)
672 offset = BCMGENET_STAT_OFFSET;
673 val = bcmgenet_umac_readl(priv,
674 UMAC_MIB_START + j + offset);
676 case BCMGENET_STAT_MISC:
677 val = bcmgenet_umac_readl(priv, s->reg_offset);
678 /* clear if overflowed */
680 bcmgenet_umac_writel(priv, 0, s->reg_offset);
685 p = (char *)priv + s->stat_offset;
690 static void bcmgenet_get_ethtool_stats(struct net_device *dev,
691 struct ethtool_stats *stats,
694 struct bcmgenet_priv *priv = netdev_priv(dev);
697 if (netif_running(dev))
698 bcmgenet_update_mib_counters(priv);
700 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
701 const struct bcmgenet_stats *s;
704 s = &bcmgenet_gstrings_stats[i];
705 if (s->type == BCMGENET_STAT_NETDEV)
706 p = (char *)&dev->stats;
714 /* standard ethtool support functions. */
715 static struct ethtool_ops bcmgenet_ethtool_ops = {
716 .get_strings = bcmgenet_get_strings,
717 .get_sset_count = bcmgenet_get_sset_count,
718 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
719 .get_settings = bcmgenet_get_settings,
720 .set_settings = bcmgenet_set_settings,
721 .get_drvinfo = bcmgenet_get_drvinfo,
722 .get_link = ethtool_op_get_link,
723 .get_msglevel = bcmgenet_get_msglevel,
724 .set_msglevel = bcmgenet_set_msglevel,
725 .get_wol = bcmgenet_get_wol,
726 .set_wol = bcmgenet_set_wol,
729 /* Power down the unimac, based on mode. */
730 static void bcmgenet_power_down(struct bcmgenet_priv *priv,
731 enum bcmgenet_power_mode mode)
736 case GENET_POWER_CABLE_SENSE:
737 phy_detach(priv->phydev);
740 case GENET_POWER_WOL_MAGIC:
741 bcmgenet_wol_power_down_cfg(priv, mode);
744 case GENET_POWER_PASSIVE:
746 if (priv->hw_params->flags & GENET_HAS_EXT) {
747 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
748 reg |= (EXT_PWR_DOWN_PHY |
749 EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
750 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
758 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
759 enum bcmgenet_power_mode mode)
763 if (!(priv->hw_params->flags & GENET_HAS_EXT))
766 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
769 case GENET_POWER_PASSIVE:
770 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
773 case GENET_POWER_CABLE_SENSE:
775 reg |= EXT_PWR_DN_EN_LD;
777 case GENET_POWER_WOL_MAGIC:
778 bcmgenet_wol_power_up_cfg(priv, mode);
784 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
786 if (mode == GENET_POWER_PASSIVE)
787 bcmgenet_mii_reset(priv->dev);
790 /* ioctl handle special commands that are not present in ethtool. */
791 static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
793 struct bcmgenet_priv *priv = netdev_priv(dev);
796 if (!netif_running(dev))
806 val = phy_mii_ioctl(priv->phydev, rq, cmd);
817 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
818 struct bcmgenet_tx_ring *ring)
820 struct enet_cb *tx_cb_ptr;
822 tx_cb_ptr = ring->cbs;
823 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
824 tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE;
825 /* Advancing local write pointer */
826 if (ring->write_ptr == ring->end_ptr)
827 ring->write_ptr = ring->cb_ptr;
834 /* Simple helper to free a control block's resources */
835 static void bcmgenet_free_cb(struct enet_cb *cb)
837 dev_kfree_skb_any(cb->skb);
839 dma_unmap_addr_set(cb, dma_addr, 0);
842 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv,
843 struct bcmgenet_tx_ring *ring)
845 bcmgenet_intrl2_0_writel(priv,
846 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
847 INTRL2_CPU_MASK_SET);
850 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv,
851 struct bcmgenet_tx_ring *ring)
853 bcmgenet_intrl2_0_writel(priv,
854 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
855 INTRL2_CPU_MASK_CLEAR);
858 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv,
859 struct bcmgenet_tx_ring *ring)
861 bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
862 INTRL2_CPU_MASK_CLEAR);
863 priv->int1_mask &= ~(1 << ring->index);
866 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
867 struct bcmgenet_tx_ring *ring)
869 bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
870 INTRL2_CPU_MASK_SET);
871 priv->int1_mask |= (1 << ring->index);
874 /* Unlocked version of the reclaim routine */
875 static void __bcmgenet_tx_reclaim(struct net_device *dev,
876 struct bcmgenet_tx_ring *ring)
878 struct bcmgenet_priv *priv = netdev_priv(dev);
879 int last_tx_cn, last_c_index, num_tx_bds;
880 struct enet_cb *tx_cb_ptr;
881 struct netdev_queue *txq;
882 unsigned int bds_compl;
883 unsigned int c_index;
885 /* Compute how many buffers are transmitted since last xmit call */
886 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
887 txq = netdev_get_tx_queue(dev, ring->queue);
889 last_c_index = ring->c_index;
890 num_tx_bds = ring->size;
892 c_index &= (num_tx_bds - 1);
894 if (c_index >= last_c_index)
895 last_tx_cn = c_index - last_c_index;
897 last_tx_cn = num_tx_bds - last_c_index + c_index;
899 netif_dbg(priv, tx_done, dev,
900 "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
901 __func__, ring->index,
902 c_index, last_tx_cn, last_c_index);
904 /* Reclaim transmitted buffers */
905 while (last_tx_cn-- > 0) {
906 tx_cb_ptr = ring->cbs + last_c_index;
908 if (tx_cb_ptr->skb) {
909 bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
910 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
911 dma_unmap_single(&dev->dev,
912 dma_unmap_addr(tx_cb_ptr, dma_addr),
915 bcmgenet_free_cb(tx_cb_ptr);
916 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
917 dev->stats.tx_bytes +=
918 dma_unmap_len(tx_cb_ptr, dma_len);
919 dma_unmap_page(&dev->dev,
920 dma_unmap_addr(tx_cb_ptr, dma_addr),
921 dma_unmap_len(tx_cb_ptr, dma_len),
923 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
925 dev->stats.tx_packets++;
926 ring->free_bds += bds_compl;
929 last_c_index &= (num_tx_bds - 1);
932 if (ring->free_bds > (MAX_SKB_FRAGS + 1))
933 ring->int_disable(priv, ring);
935 if (netif_tx_queue_stopped(txq))
936 netif_tx_wake_queue(txq);
938 ring->c_index = c_index;
941 static void bcmgenet_tx_reclaim(struct net_device *dev,
942 struct bcmgenet_tx_ring *ring)
946 spin_lock_irqsave(&ring->lock, flags);
947 __bcmgenet_tx_reclaim(dev, ring);
948 spin_unlock_irqrestore(&ring->lock, flags);
951 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
953 struct bcmgenet_priv *priv = netdev_priv(dev);
956 if (netif_is_multiqueue(dev)) {
957 for (i = 0; i < priv->hw_params->tx_queues; i++)
958 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
961 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
964 /* Transmits a single SKB (either head of a fragment or a single SKB)
965 * caller must hold priv->lock
967 static int bcmgenet_xmit_single(struct net_device *dev,
970 struct bcmgenet_tx_ring *ring)
972 struct bcmgenet_priv *priv = netdev_priv(dev);
973 struct device *kdev = &priv->pdev->dev;
974 struct enet_cb *tx_cb_ptr;
975 unsigned int skb_len;
980 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
982 if (unlikely(!tx_cb_ptr))
985 tx_cb_ptr->skb = skb;
987 skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
989 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
990 ret = dma_mapping_error(kdev, mapping);
992 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
997 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
998 dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
999 length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1000 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
1003 if (skb->ip_summed == CHECKSUM_PARTIAL)
1004 length_status |= DMA_TX_DO_CSUM;
1006 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
1008 /* Decrement total BD count and advance our write pointer */
1009 ring->free_bds -= 1;
1010 ring->prod_index += 1;
1011 ring->prod_index &= DMA_P_INDEX_MASK;
1016 /* Transmit a SKB fragment */
1017 static int bcmgenet_xmit_frag(struct net_device *dev,
1020 struct bcmgenet_tx_ring *ring)
1022 struct bcmgenet_priv *priv = netdev_priv(dev);
1023 struct device *kdev = &priv->pdev->dev;
1024 struct enet_cb *tx_cb_ptr;
1028 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1030 if (unlikely(!tx_cb_ptr))
1032 tx_cb_ptr->skb = NULL;
1034 mapping = skb_frag_dma_map(kdev, frag, 0,
1035 skb_frag_size(frag), DMA_TO_DEVICE);
1036 ret = dma_mapping_error(kdev, mapping);
1038 netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
1043 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1044 dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
1046 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
1047 (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1048 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
1051 ring->free_bds -= 1;
1052 ring->prod_index += 1;
1053 ring->prod_index &= DMA_P_INDEX_MASK;
1058 /* Reallocate the SKB to put enough headroom in front of it and insert
1059 * the transmit checksum offsets in the descriptors
1061 static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1062 struct sk_buff *skb)
1064 struct status_64 *status = NULL;
1065 struct sk_buff *new_skb;
1071 if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1072 /* If 64 byte status block enabled, must make sure skb has
1073 * enough headroom for us to insert 64B status block.
1075 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1078 dev->stats.tx_errors++;
1079 dev->stats.tx_dropped++;
1085 skb_push(skb, sizeof(*status));
1086 status = (struct status_64 *)skb->data;
1088 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1089 ip_ver = htons(skb->protocol);
1092 ip_proto = ip_hdr(skb)->protocol;
1095 ip_proto = ipv6_hdr(skb)->nexthdr;
1101 offset = skb_checksum_start_offset(skb) - sizeof(*status);
1102 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1103 (offset + skb->csum_offset);
1105 /* Set the length valid bit for TCP and UDP and just set
1106 * the special UDP flag for IPv4, else just set to 0.
1108 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1109 tx_csum_info |= STATUS_TX_CSUM_LV;
1110 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1111 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
1116 status->tx_csum_info = tx_csum_info;
1122 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1124 struct bcmgenet_priv *priv = netdev_priv(dev);
1125 struct bcmgenet_tx_ring *ring = NULL;
1126 struct netdev_queue *txq;
1127 unsigned long flags = 0;
1128 int nr_frags, index;
1133 index = skb_get_queue_mapping(skb);
1134 /* Mapping strategy:
1135 * queue_mapping = 0, unclassified, packet xmited through ring16
1136 * queue_mapping = 1, goes to ring 0. (highest priority queue
1137 * queue_mapping = 2, goes to ring 1.
1138 * queue_mapping = 3, goes to ring 2.
1139 * queue_mapping = 4, goes to ring 3.
1146 nr_frags = skb_shinfo(skb)->nr_frags;
1147 ring = &priv->tx_rings[index];
1148 txq = netdev_get_tx_queue(dev, ring->queue);
1150 spin_lock_irqsave(&ring->lock, flags);
1151 if (ring->free_bds <= nr_frags + 1) {
1152 netif_tx_stop_queue(txq);
1153 netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
1154 __func__, index, ring->queue);
1155 ret = NETDEV_TX_BUSY;
1159 if (skb_padto(skb, ETH_ZLEN)) {
1164 /* set the SKB transmit checksum */
1165 if (priv->desc_64b_en) {
1166 skb = bcmgenet_put_tx_csum(dev, skb);
1173 dma_desc_flags = DMA_SOP;
1175 dma_desc_flags |= DMA_EOP;
1177 /* Transmit single SKB or head of fragment list */
1178 ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
1185 for (i = 0; i < nr_frags; i++) {
1186 ret = bcmgenet_xmit_frag(dev,
1187 &skb_shinfo(skb)->frags[i],
1188 (i == nr_frags - 1) ? DMA_EOP : 0,
1196 skb_tx_timestamp(skb);
1198 /* we kept a software copy of how much we should advance the TDMA
1199 * producer index, now write it down to the hardware
1201 bcmgenet_tdma_ring_writel(priv, ring->index,
1202 ring->prod_index, TDMA_PROD_INDEX);
1204 if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
1205 netif_tx_stop_queue(txq);
1206 ring->int_enable(priv, ring);
1210 spin_unlock_irqrestore(&ring->lock, flags);
1216 static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb)
1218 struct device *kdev = &priv->pdev->dev;
1219 struct sk_buff *skb;
1223 skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
1227 /* a caller did not release this control block */
1228 WARN_ON(cb->skb != NULL);
1230 mapping = dma_map_single(kdev, skb->data,
1231 priv->rx_buf_len, DMA_FROM_DEVICE);
1232 ret = dma_mapping_error(kdev, mapping);
1234 bcmgenet_free_cb(cb);
1235 netif_err(priv, rx_err, priv->dev,
1236 "%s DMA map failed\n", __func__);
1240 dma_unmap_addr_set(cb, dma_addr, mapping);
1241 /* assign packet, prepare descriptor, and advance pointer */
1243 dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
1245 /* turn on the newly assigned BD for DMA to use */
1246 priv->rx_bd_assign_index++;
1247 priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
1249 priv->rx_bd_assign_ptr = priv->rx_bds +
1250 (priv->rx_bd_assign_index * DMA_DESC_SIZE);
1255 /* bcmgenet_desc_rx - descriptor based rx process.
1256 * this could be called from bottom half, or from NAPI polling method.
1258 static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
1259 unsigned int budget)
1261 struct net_device *dev = priv->dev;
1263 struct sk_buff *skb;
1264 u32 dma_length_status;
1265 unsigned long dma_flag;
1267 unsigned int rxpktprocessed = 0, rxpkttoprocess;
1268 unsigned int p_index;
1269 unsigned int chksum_ok = 0;
1271 p_index = bcmgenet_rdma_ring_readl(priv, DESC_INDEX, RDMA_PROD_INDEX);
1272 p_index &= DMA_P_INDEX_MASK;
1274 if (p_index < priv->rx_c_index)
1275 rxpkttoprocess = (DMA_C_INDEX_MASK + 1) -
1276 priv->rx_c_index + p_index;
1278 rxpkttoprocess = p_index - priv->rx_c_index;
1280 netif_dbg(priv, rx_status, dev,
1281 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
1283 while ((rxpktprocessed < rxpkttoprocess) &&
1284 (rxpktprocessed < budget)) {
1285 cb = &priv->rx_cbs[priv->rx_read_ptr];
1290 priv->rx_read_ptr++;
1291 priv->rx_read_ptr &= (priv->num_rx_bds - 1);
1293 /* We do not have a backing SKB, so we do not have a
1294 * corresponding DMA mapping for this incoming packet since
1295 * bcmgenet_rx_refill always either has both skb and mapping or
1298 if (unlikely(!skb)) {
1299 dev->stats.rx_dropped++;
1300 dev->stats.rx_errors++;
1304 /* Unmap the packet contents such that we can use the
1305 * RSV from the 64 bytes descriptor when enabled and save
1306 * a 32-bits register read
1308 dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
1309 priv->rx_buf_len, DMA_FROM_DEVICE);
1311 if (!priv->desc_64b_en) {
1313 dmadesc_get_length_status(priv,
1315 (priv->rx_read_ptr *
1318 struct status_64 *status;
1320 status = (struct status_64 *)skb->data;
1321 dma_length_status = status->length_status;
1324 /* DMA flags and length are still valid no matter how
1325 * we got the Receive Status Vector (64B RSB or register)
1327 dma_flag = dma_length_status & 0xffff;
1328 len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
1330 netif_dbg(priv, rx_status, dev,
1331 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1332 __func__, p_index, priv->rx_c_index,
1333 priv->rx_read_ptr, dma_length_status);
1335 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1336 netif_err(priv, rx_status, dev,
1337 "dropping fragmented packet!\n");
1338 dev->stats.rx_dropped++;
1339 dev->stats.rx_errors++;
1340 dev_kfree_skb_any(cb->skb);
1345 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
1350 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
1351 (unsigned int)dma_flag);
1352 if (dma_flag & DMA_RX_CRC_ERROR)
1353 dev->stats.rx_crc_errors++;
1354 if (dma_flag & DMA_RX_OV)
1355 dev->stats.rx_over_errors++;
1356 if (dma_flag & DMA_RX_NO)
1357 dev->stats.rx_frame_errors++;
1358 if (dma_flag & DMA_RX_LG)
1359 dev->stats.rx_length_errors++;
1360 dev->stats.rx_dropped++;
1361 dev->stats.rx_errors++;
1363 /* discard the packet and advance consumer index.*/
1364 dev_kfree_skb_any(cb->skb);
1367 } /* error packet */
1369 chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
1370 priv->desc_rxchk_en;
1373 if (priv->desc_64b_en) {
1378 if (likely(chksum_ok))
1379 skb->ip_summed = CHECKSUM_UNNECESSARY;
1381 /* remove hardware 2bytes added for IP alignment */
1385 if (priv->crc_fwd_en) {
1386 skb_trim(skb, len - ETH_FCS_LEN);
1390 /*Finish setting up the received SKB and send it to the kernel*/
1391 skb->protocol = eth_type_trans(skb, priv->dev);
1392 dev->stats.rx_packets++;
1393 dev->stats.rx_bytes += len;
1394 if (dma_flag & DMA_RX_MULT)
1395 dev->stats.multicast++;
1398 napi_gro_receive(&priv->napi, skb);
1400 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1402 /* refill RX path on the current control block */
1404 err = bcmgenet_rx_refill(priv, cb);
1406 netif_err(priv, rx_err, dev, "Rx refill failed\n");
1409 return rxpktprocessed;
1412 /* Assign skb to RX DMA descriptor. */
1413 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
1419 netif_dbg(priv, hw, priv->dev, "%s:\n", __func__);
1421 /* loop here for each buffer needing assign */
1422 for (i = 0; i < priv->num_rx_bds; i++) {
1423 cb = &priv->rx_cbs[priv->rx_bd_assign_index];
1427 ret = bcmgenet_rx_refill(priv, cb);
1435 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1440 for (i = 0; i < priv->num_rx_bds; i++) {
1441 cb = &priv->rx_cbs[i];
1443 if (dma_unmap_addr(cb, dma_addr)) {
1444 dma_unmap_single(&priv->dev->dev,
1445 dma_unmap_addr(cb, dma_addr),
1446 priv->rx_buf_len, DMA_FROM_DEVICE);
1447 dma_unmap_addr_set(cb, dma_addr, 0);
1451 bcmgenet_free_cb(cb);
1455 static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
1459 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1464 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
1466 /* UniMAC stops on a packet boundary, wait for a full-size packet
1470 usleep_range(1000, 2000);
1473 static int reset_umac(struct bcmgenet_priv *priv)
1475 struct device *kdev = &priv->pdev->dev;
1476 unsigned int timeout = 0;
1479 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1480 bcmgenet_rbuf_ctrl_set(priv, 0);
1483 /* disable MAC while updating its registers */
1484 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1486 /* issue soft reset, wait for it to complete */
1487 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
1488 while (timeout++ < 1000) {
1489 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1490 if (!(reg & CMD_SW_RESET))
1496 if (timeout == 1000) {
1498 "timeout waiting for MAC to come out of reset\n");
1505 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
1507 /* Mask all interrupts.*/
1508 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1509 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1510 bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1511 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1512 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1513 bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1516 static int init_umac(struct bcmgenet_priv *priv)
1518 struct device *kdev = &priv->pdev->dev;
1520 u32 reg, cpu_mask_clear;
1522 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1524 ret = reset_umac(priv);
1528 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1529 /* clear tx/rx counter */
1530 bcmgenet_umac_writel(priv,
1531 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
1533 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
1535 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1537 /* init rx registers, enable ip header optimization */
1538 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
1539 reg |= RBUF_ALIGN_2B;
1540 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
1542 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
1543 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
1545 bcmgenet_intr_disable(priv);
1547 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
1549 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
1551 /* Monitor cable plug/unplugged event for internal PHY */
1552 if (phy_is_internal(priv->phydev)) {
1553 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
1554 } else if (priv->ext_phy) {
1555 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
1556 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1557 reg = bcmgenet_bp_mc_get(priv);
1558 reg |= BIT(priv->hw_params->bp_in_en_shift);
1560 /* bp_mask: back pressure mask */
1561 if (netif_is_multiqueue(priv->dev))
1562 reg |= priv->hw_params->bp_in_mask;
1564 reg &= ~priv->hw_params->bp_in_mask;
1565 bcmgenet_bp_mc_set(priv, reg);
1568 /* Enable MDIO interrupts on GENET v3+ */
1569 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
1570 cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR;
1572 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
1574 /* Enable rx/tx engine.*/
1575 dev_dbg(kdev, "done init umac\n");
1580 /* Initialize all house-keeping variables for a TX ring, along
1581 * with corresponding hardware registers
1583 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1584 unsigned int index, unsigned int size,
1585 unsigned int write_ptr, unsigned int end_ptr)
1587 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1588 u32 words_per_bd = WORDS_PER_BD(priv);
1589 u32 flow_period_val = 0;
1590 unsigned int first_bd;
1592 spin_lock_init(&ring->lock);
1593 ring->index = index;
1594 if (index == DESC_INDEX) {
1596 ring->int_enable = bcmgenet_tx_ring16_int_enable;
1597 ring->int_disable = bcmgenet_tx_ring16_int_disable;
1599 ring->queue = index + 1;
1600 ring->int_enable = bcmgenet_tx_ring_int_enable;
1601 ring->int_disable = bcmgenet_tx_ring_int_disable;
1603 ring->cbs = priv->tx_cbs + write_ptr;
1606 ring->free_bds = size;
1607 ring->write_ptr = write_ptr;
1608 ring->cb_ptr = write_ptr;
1609 ring->end_ptr = end_ptr - 1;
1610 ring->prod_index = 0;
1612 /* Set flow period for ring != 16 */
1613 if (index != DESC_INDEX)
1614 flow_period_val = ENET_MAX_MTU_SIZE << 16;
1616 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
1617 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
1618 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
1619 /* Disable rate control for now */
1620 bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
1622 /* Unclassified traffic goes to ring 16 */
1623 bcmgenet_tdma_ring_writel(priv, index,
1624 ((size << DMA_RING_SIZE_SHIFT) |
1625 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
1627 first_bd = write_ptr;
1629 /* Set start and end address, read and write pointers */
1630 bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
1632 bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
1634 bcmgenet_tdma_ring_writel(priv, index, first_bd,
1636 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1640 /* Initialize a RDMA ring */
1641 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
1642 unsigned int index, unsigned int size)
1644 u32 words_per_bd = WORDS_PER_BD(priv);
1647 priv->num_rx_bds = TOTAL_DESC;
1648 priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
1649 priv->rx_bd_assign_ptr = priv->rx_bds;
1650 priv->rx_bd_assign_index = 0;
1651 priv->rx_c_index = 0;
1652 priv->rx_read_ptr = 0;
1653 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
1658 ret = bcmgenet_alloc_rx_buffers(priv);
1660 kfree(priv->rx_cbs);
1664 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR);
1665 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
1666 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
1667 bcmgenet_rdma_ring_writel(priv, index,
1668 ((size << DMA_RING_SIZE_SHIFT) |
1669 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
1670 bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
1671 bcmgenet_rdma_ring_writel(priv, index,
1672 words_per_bd * size - 1, DMA_END_ADDR);
1673 bcmgenet_rdma_ring_writel(priv, index,
1674 (DMA_FC_THRESH_LO <<
1675 DMA_XOFF_THRESHOLD_SHIFT) |
1676 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
1677 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
1682 /* init multi xmit queues, only available for GENET2+
1683 * the queue is partitioned as follows:
1685 * queue 0 - 3 is priority based, each one has 32 descriptors,
1686 * with queue 0 being the highest priority queue.
1688 * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT
1689 * descriptors: 256 - (number of tx queues * bds per queues) = 128
1692 * The transmit control block pool is then partitioned as following:
1693 * - tx_cbs[0...127] are for queue 16
1694 * - tx_ring_cbs[0] points to tx_cbs[128..159]
1695 * - tx_ring_cbs[1] points to tx_cbs[160..191]
1696 * - tx_ring_cbs[2] points to tx_cbs[192..223]
1697 * - tx_ring_cbs[3] points to tx_cbs[224..255]
1699 static void bcmgenet_init_multiq(struct net_device *dev)
1701 struct bcmgenet_priv *priv = netdev_priv(dev);
1702 unsigned int i, dma_enable;
1703 u32 reg, dma_ctrl, ring_cfg = 0;
1704 u32 dma_priority[3] = {0, 0, 0};
1706 if (!netif_is_multiqueue(dev)) {
1707 netdev_warn(dev, "called with non multi queue aware HW\n");
1711 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
1712 dma_enable = dma_ctrl & DMA_EN;
1713 dma_ctrl &= ~DMA_EN;
1714 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
1716 /* Enable strict priority arbiter mode */
1717 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
1719 for (i = 0; i < priv->hw_params->tx_queues; i++) {
1720 /* first 64 tx_cbs are reserved for default tx queue
1723 bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt,
1724 i * priv->hw_params->bds_cnt,
1725 (i + 1) * priv->hw_params->bds_cnt);
1727 /* Configure ring as descriptor ring and setup priority */
1729 dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT);
1731 dma_priority[DMA_PRIO_REG_INDEX(i)] |=
1732 ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
1735 /* Set ring 16 priority and program the hardware registers */
1736 dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
1737 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
1738 DMA_PRIO_REG_SHIFT(DESC_INDEX));
1739 bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
1740 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
1741 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
1744 reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG);
1746 bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG);
1748 /* Configure ring as descriptor ring and re-enable DMA if enabled */
1749 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
1753 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1756 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
1762 /* Disable TDMA to stop add more frames in TX DMA */
1763 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
1765 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1767 /* Check TDMA status register to confirm TDMA is disabled */
1768 while (timeout++ < DMA_TIMEOUT_VAL) {
1769 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
1770 if (reg & DMA_DISABLED)
1776 if (timeout == DMA_TIMEOUT_VAL) {
1777 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
1781 /* Wait 10ms for packet drain in both tx and rx dma */
1782 usleep_range(10000, 20000);
1785 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
1787 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
1790 /* Check RDMA status register to confirm RDMA is disabled */
1791 while (timeout++ < DMA_TIMEOUT_VAL) {
1792 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
1793 if (reg & DMA_DISABLED)
1799 if (timeout == DMA_TIMEOUT_VAL) {
1800 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
1807 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1812 bcmgenet_dma_teardown(priv);
1814 for (i = 0; i < priv->num_tx_bds; i++) {
1815 if (priv->tx_cbs[i].skb != NULL) {
1816 dev_kfree_skb(priv->tx_cbs[i].skb);
1817 priv->tx_cbs[i].skb = NULL;
1821 bcmgenet_free_rx_buffers(priv);
1822 kfree(priv->rx_cbs);
1823 kfree(priv->tx_cbs);
1826 /* init_edma: Initialize DMA control register */
1827 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
1831 netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n");
1833 /* by default, enable ring 16 (descriptor based) */
1834 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC);
1836 netdev_err(priv->dev, "failed to initialize RX ring\n");
1841 bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
1844 bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
1846 /* Initialize common TX ring structures */
1847 priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
1848 priv->num_tx_bds = TOTAL_DESC;
1849 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
1851 if (!priv->tx_cbs) {
1852 bcmgenet_fini_dma(priv);
1856 /* initialize multi xmit queue */
1857 bcmgenet_init_multiq(priv->dev);
1859 /* initialize special ring 16 */
1860 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT,
1861 priv->hw_params->tx_queues *
1862 priv->hw_params->bds_cnt,
1868 /* NAPI polling method*/
1869 static int bcmgenet_poll(struct napi_struct *napi, int budget)
1871 struct bcmgenet_priv *priv = container_of(napi,
1872 struct bcmgenet_priv, napi);
1873 unsigned int work_done;
1876 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
1878 work_done = bcmgenet_desc_rx(priv, budget);
1880 /* Advancing our consumer index*/
1881 priv->rx_c_index += work_done;
1882 priv->rx_c_index &= DMA_C_INDEX_MASK;
1883 bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
1884 priv->rx_c_index, RDMA_CONS_INDEX);
1885 if (work_done < budget) {
1886 napi_complete(napi);
1887 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
1888 INTRL2_CPU_MASK_CLEAR);
1894 /* Interrupt bottom half */
1895 static void bcmgenet_irq_task(struct work_struct *work)
1897 struct bcmgenet_priv *priv = container_of(
1898 work, struct bcmgenet_priv, bcmgenet_irq_work);
1900 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
1902 if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
1903 priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
1904 netif_dbg(priv, wol, priv->dev,
1905 "magic packet detected, waking up\n");
1906 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
1909 /* Link UP/DOWN event */
1910 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
1911 (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
1912 phy_mac_interrupt(priv->phydev,
1913 priv->irq0_stat & UMAC_IRQ_LINK_UP);
1914 priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
1918 /* bcmgenet_isr1: interrupt handler for ring buffer. */
1919 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
1921 struct bcmgenet_priv *priv = dev_id;
1924 /* Save irq status for bottom-half processing. */
1926 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
1928 /* clear interrupts */
1929 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
1931 netif_dbg(priv, intr, priv->dev,
1932 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
1933 /* Check the MBDONE interrupts.
1934 * packet is done, reclaim descriptors
1936 if (priv->irq1_stat & 0x0000ffff) {
1938 for (index = 0; index < 16; index++) {
1939 if (priv->irq1_stat & (1 << index))
1940 bcmgenet_tx_reclaim(priv->dev,
1941 &priv->tx_rings[index]);
1947 /* bcmgenet_isr0: Handle various interrupts. */
1948 static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
1950 struct bcmgenet_priv *priv = dev_id;
1952 /* Save irq status for bottom-half processing. */
1954 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
1955 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
1956 /* clear interrupts */
1957 bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1959 netif_dbg(priv, intr, priv->dev,
1960 "IRQ=0x%x\n", priv->irq0_stat);
1962 if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
1963 /* We use NAPI(software interrupt throttling, if
1964 * Rx Descriptor throttling is not used.
1965 * Disable interrupt, will be enabled in the poll method.
1967 if (likely(napi_schedule_prep(&priv->napi))) {
1968 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
1969 INTRL2_CPU_MASK_SET);
1970 __napi_schedule(&priv->napi);
1973 if (priv->irq0_stat &
1974 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
1976 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
1978 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
1979 UMAC_IRQ_PHY_DET_F |
1981 UMAC_IRQ_LINK_DOWN |
1985 /* all other interested interrupts handled in bottom half */
1986 schedule_work(&priv->bcmgenet_irq_work);
1989 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
1990 priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
1991 priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
1998 static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
2000 struct bcmgenet_priv *priv = dev_id;
2002 pm_wakeup_event(&priv->pdev->dev, 0);
2007 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
2011 reg = bcmgenet_rbuf_ctrl_get(priv);
2013 bcmgenet_rbuf_ctrl_set(priv, reg);
2017 bcmgenet_rbuf_ctrl_set(priv, reg);
2021 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
2022 unsigned char *addr)
2024 bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
2025 (addr[2] << 8) | addr[3], UMAC_MAC0);
2026 bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
2029 /* Returns a reusable dma control register value */
2030 static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
2036 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2037 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2039 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2041 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2043 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2045 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
2047 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
2052 static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
2056 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2058 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2060 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2062 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2065 static void bcmgenet_netif_start(struct net_device *dev)
2067 struct bcmgenet_priv *priv = netdev_priv(dev);
2069 /* Start the network engine */
2070 napi_enable(&priv->napi);
2072 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
2074 if (phy_is_internal(priv->phydev))
2075 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2077 netif_tx_start_all_queues(dev);
2079 phy_start(priv->phydev);
2082 static int bcmgenet_open(struct net_device *dev)
2084 struct bcmgenet_priv *priv = netdev_priv(dev);
2085 unsigned long dma_ctrl;
2089 netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
2091 /* Turn on the clock */
2092 if (!IS_ERR(priv->clk))
2093 clk_prepare_enable(priv->clk);
2095 /* take MAC out of reset */
2096 bcmgenet_umac_reset(priv);
2098 ret = init_umac(priv);
2100 goto err_clk_disable;
2102 /* disable ethernet MAC while updating its registers */
2103 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
2105 /* Make sure we reflect the value of CRC_CMD_FWD */
2106 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2107 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
2109 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2111 if (phy_is_internal(priv->phydev)) {
2112 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2113 reg |= EXT_ENERGY_DET_MASK;
2114 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2117 /* Disable RX/TX DMA and flush TX queues */
2118 dma_ctrl = bcmgenet_dma_disable(priv);
2120 /* Reinitialize TDMA and RDMA and SW housekeeping */
2121 ret = bcmgenet_init_dma(priv);
2123 netdev_err(dev, "failed to initialize DMA\n");
2127 /* Always enable ring 16 - descriptor ring */
2128 bcmgenet_enable_dma(priv, dma_ctrl);
2130 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
2133 netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
2137 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
2140 netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
2144 bcmgenet_netif_start(dev);
2149 free_irq(priv->irq0, dev);
2151 bcmgenet_fini_dma(priv);
2153 if (!IS_ERR(priv->clk))
2154 clk_disable_unprepare(priv->clk);
2158 static void bcmgenet_netif_stop(struct net_device *dev)
2160 struct bcmgenet_priv *priv = netdev_priv(dev);
2162 netif_tx_stop_all_queues(dev);
2163 napi_disable(&priv->napi);
2164 phy_stop(priv->phydev);
2166 bcmgenet_intr_disable(priv);
2168 /* Wait for pending work items to complete. Since interrupts are
2169 * disabled no new work will be scheduled.
2171 cancel_work_sync(&priv->bcmgenet_irq_work);
2173 priv->old_link = -1;
2174 priv->old_speed = -1;
2175 priv->old_duplex = -1;
2176 priv->old_pause = -1;
2179 static int bcmgenet_close(struct net_device *dev)
2181 struct bcmgenet_priv *priv = netdev_priv(dev);
2184 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
2186 bcmgenet_netif_stop(dev);
2188 /* Disable MAC receive */
2189 umac_enable_set(priv, CMD_RX_EN, false);
2191 ret = bcmgenet_dma_teardown(priv);
2195 /* Disable MAC transmit. TX DMA disabled have to done before this */
2196 umac_enable_set(priv, CMD_TX_EN, false);
2199 bcmgenet_tx_reclaim_all(dev);
2200 bcmgenet_fini_dma(priv);
2202 free_irq(priv->irq0, priv);
2203 free_irq(priv->irq1, priv);
2205 if (phy_is_internal(priv->phydev))
2206 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2208 if (!IS_ERR(priv->clk))
2209 clk_disable_unprepare(priv->clk);
2214 static void bcmgenet_timeout(struct net_device *dev)
2216 struct bcmgenet_priv *priv = netdev_priv(dev);
2218 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
2220 dev->trans_start = jiffies;
2222 dev->stats.tx_errors++;
2224 netif_tx_wake_all_queues(dev);
2227 #define MAX_MC_COUNT 16
2229 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
2230 unsigned char *addr,
2236 bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
2237 UMAC_MDF_ADDR + (*i * 4));
2238 bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
2239 addr[4] << 8 | addr[5],
2240 UMAC_MDF_ADDR + ((*i + 1) * 4));
2241 reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
2242 reg |= (1 << (MAX_MC_COUNT - *mc));
2243 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
2248 static void bcmgenet_set_rx_mode(struct net_device *dev)
2250 struct bcmgenet_priv *priv = netdev_priv(dev);
2251 struct netdev_hw_addr *ha;
2255 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
2257 /* Promiscuous mode */
2258 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2259 if (dev->flags & IFF_PROMISC) {
2261 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2262 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
2265 reg &= ~CMD_PROMISC;
2266 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2269 /* UniMac doesn't support ALLMULTI */
2270 if (dev->flags & IFF_ALLMULTI) {
2271 netdev_warn(dev, "ALLMULTI is not supported\n");
2275 /* update MDF filter */
2279 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
2280 /* my own address.*/
2281 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
2283 if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
2286 if (!netdev_uc_empty(dev))
2287 netdev_for_each_uc_addr(ha, dev)
2288 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2290 if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
2293 netdev_for_each_mc_addr(ha, dev)
2294 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2297 /* Set the hardware MAC address. */
2298 static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
2300 struct sockaddr *addr = p;
2302 /* Setting the MAC address at the hardware level is not possible
2303 * without disabling the UniMAC RX/TX enable bits.
2305 if (netif_running(dev))
2308 ether_addr_copy(dev->dev_addr, addr->sa_data);
2313 static const struct net_device_ops bcmgenet_netdev_ops = {
2314 .ndo_open = bcmgenet_open,
2315 .ndo_stop = bcmgenet_close,
2316 .ndo_start_xmit = bcmgenet_xmit,
2317 .ndo_tx_timeout = bcmgenet_timeout,
2318 .ndo_set_rx_mode = bcmgenet_set_rx_mode,
2319 .ndo_set_mac_address = bcmgenet_set_mac_addr,
2320 .ndo_do_ioctl = bcmgenet_ioctl,
2321 .ndo_set_features = bcmgenet_set_features,
2324 /* Array of GENET hardware parameters/characteristics */
2325 static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
2330 .bp_in_en_shift = 16,
2331 .bp_in_mask = 0xffff,
2332 .hfb_filter_cnt = 16,
2334 .hfb_offset = 0x1000,
2335 .rdma_offset = 0x2000,
2336 .tdma_offset = 0x3000,
2343 .bp_in_en_shift = 16,
2344 .bp_in_mask = 0xffff,
2345 .hfb_filter_cnt = 16,
2347 .tbuf_offset = 0x0600,
2348 .hfb_offset = 0x1000,
2349 .hfb_reg_offset = 0x2000,
2350 .rdma_offset = 0x3000,
2351 .tdma_offset = 0x4000,
2353 .flags = GENET_HAS_EXT,
2359 .bp_in_en_shift = 17,
2360 .bp_in_mask = 0x1ffff,
2361 .hfb_filter_cnt = 48,
2363 .tbuf_offset = 0x0600,
2364 .hfb_offset = 0x8000,
2365 .hfb_reg_offset = 0xfc00,
2366 .rdma_offset = 0x10000,
2367 .tdma_offset = 0x11000,
2369 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
2375 .bp_in_en_shift = 17,
2376 .bp_in_mask = 0x1ffff,
2377 .hfb_filter_cnt = 48,
2379 .tbuf_offset = 0x0600,
2380 .hfb_offset = 0x8000,
2381 .hfb_reg_offset = 0xfc00,
2382 .rdma_offset = 0x2000,
2383 .tdma_offset = 0x4000,
2385 .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
2389 /* Infer hardware parameters from the detected GENET version */
2390 static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
2392 struct bcmgenet_hw_params *params;
2396 if (GENET_IS_V4(priv)) {
2397 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
2398 genet_dma_ring_regs = genet_dma_ring_regs_v4;
2399 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
2400 priv->version = GENET_V4;
2401 } else if (GENET_IS_V3(priv)) {
2402 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
2403 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2404 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
2405 priv->version = GENET_V3;
2406 } else if (GENET_IS_V2(priv)) {
2407 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
2408 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2409 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
2410 priv->version = GENET_V2;
2411 } else if (GENET_IS_V1(priv)) {
2412 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
2413 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2414 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
2415 priv->version = GENET_V1;
2418 /* enum genet_version starts at 1 */
2419 priv->hw_params = &bcmgenet_hw_params[priv->version];
2420 params = priv->hw_params;
2422 /* Read GENET HW version */
2423 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
2424 major = (reg >> 24 & 0x0f);
2427 else if (major == 0)
2429 if (major != priv->version) {
2430 dev_err(&priv->pdev->dev,
2431 "GENET version mismatch, got: %d, configured for: %d\n",
2432 major, priv->version);
2435 /* Print the GENET core version */
2436 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
2437 major, (reg >> 16) & 0x0f, reg & 0xffff);
2439 /* Store the integrated PHY revision for the MDIO probing function
2440 * to pass this information to the PHY driver. The PHY driver expects
2441 * to find the PHY major revision in bits 15:8 while the GENET register
2442 * stores that information in bits 7:0, account for that.
2444 priv->gphy_rev = (reg & 0xffff) << 8;
2446 #ifdef CONFIG_PHYS_ADDR_T_64BIT
2447 if (!(params->flags & GENET_HAS_40BITS))
2448 pr_warn("GENET does not support 40-bits PA\n");
2451 pr_debug("Configuration for version: %d\n"
2452 "TXq: %1d, RXq: %1d, BDs: %1d\n"
2453 "BP << en: %2d, BP msk: 0x%05x\n"
2454 "HFB count: %2d, QTAQ msk: 0x%05x\n"
2455 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
2456 "RDMA: 0x%05x, TDMA: 0x%05x\n"
2459 params->tx_queues, params->rx_queues, params->bds_cnt,
2460 params->bp_in_en_shift, params->bp_in_mask,
2461 params->hfb_filter_cnt, params->qtag_mask,
2462 params->tbuf_offset, params->hfb_offset,
2463 params->hfb_reg_offset,
2464 params->rdma_offset, params->tdma_offset,
2465 params->words_per_bd);
2468 static const struct of_device_id bcmgenet_match[] = {
2469 { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
2470 { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
2471 { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
2472 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
2476 static int bcmgenet_probe(struct platform_device *pdev)
2478 struct device_node *dn = pdev->dev.of_node;
2479 const struct of_device_id *of_id;
2480 struct bcmgenet_priv *priv;
2481 struct net_device *dev;
2482 const void *macaddr;
2486 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */
2487 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1);
2489 dev_err(&pdev->dev, "can't allocate net device\n");
2493 of_id = of_match_node(bcmgenet_match, dn);
2497 priv = netdev_priv(dev);
2498 priv->irq0 = platform_get_irq(pdev, 0);
2499 priv->irq1 = platform_get_irq(pdev, 1);
2500 priv->wol_irq = platform_get_irq(pdev, 2);
2501 if (!priv->irq0 || !priv->irq1) {
2502 dev_err(&pdev->dev, "can't find IRQs\n");
2507 macaddr = of_get_mac_address(dn);
2509 dev_err(&pdev->dev, "can't find MAC address\n");
2514 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2515 priv->base = devm_ioremap_resource(&pdev->dev, r);
2516 if (IS_ERR(priv->base)) {
2517 err = PTR_ERR(priv->base);
2521 SET_NETDEV_DEV(dev, &pdev->dev);
2522 dev_set_drvdata(&pdev->dev, dev);
2523 ether_addr_copy(dev->dev_addr, macaddr);
2524 dev->watchdog_timeo = 2 * HZ;
2525 dev->ethtool_ops = &bcmgenet_ethtool_ops;
2526 dev->netdev_ops = &bcmgenet_netdev_ops;
2527 netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
2529 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
2531 /* Set hardware features */
2532 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
2533 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
2535 /* Request the WOL interrupt and advertise suspend if available */
2536 priv->wol_irq_disabled = true;
2537 err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
2540 device_set_wakeup_capable(&pdev->dev, 1);
2542 /* Set the needed headroom to account for any possible
2543 * features enabling/disabling at runtime
2545 dev->needed_headroom += 64;
2547 netdev_boot_setup_check(dev);
2551 priv->version = (enum bcmgenet_version)of_id->data;
2553 priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
2554 if (IS_ERR(priv->clk))
2555 dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
2557 if (!IS_ERR(priv->clk))
2558 clk_prepare_enable(priv->clk);
2560 bcmgenet_set_hw_params(priv);
2562 /* Mii wait queue */
2563 init_waitqueue_head(&priv->wq);
2564 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
2565 priv->rx_buf_len = RX_BUF_LENGTH;
2566 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
2568 priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
2569 if (IS_ERR(priv->clk_wol))
2570 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
2572 err = reset_umac(priv);
2574 goto err_clk_disable;
2576 err = bcmgenet_mii_init(dev);
2578 goto err_clk_disable;
2580 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
2581 * just the ring 16 descriptor based TX
2583 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
2584 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
2586 /* libphy will determine the link state */
2587 netif_carrier_off(dev);
2589 /* Turn off the main clock, WOL clock is handled separately */
2590 if (!IS_ERR(priv->clk))
2591 clk_disable_unprepare(priv->clk);
2593 err = register_netdev(dev);
2600 if (!IS_ERR(priv->clk))
2601 clk_disable_unprepare(priv->clk);
2607 static int bcmgenet_remove(struct platform_device *pdev)
2609 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
2611 dev_set_drvdata(&pdev->dev, NULL);
2612 unregister_netdev(priv->dev);
2613 bcmgenet_mii_exit(priv->dev);
2614 free_netdev(priv->dev);
2619 #ifdef CONFIG_PM_SLEEP
2620 static int bcmgenet_suspend(struct device *d)
2622 struct net_device *dev = dev_get_drvdata(d);
2623 struct bcmgenet_priv *priv = netdev_priv(dev);
2626 if (!netif_running(dev))
2629 bcmgenet_netif_stop(dev);
2631 phy_suspend(priv->phydev);
2633 netif_device_detach(dev);
2635 /* Disable MAC receive */
2636 umac_enable_set(priv, CMD_RX_EN, false);
2638 ret = bcmgenet_dma_teardown(priv);
2642 /* Disable MAC transmit. TX DMA disabled have to done before this */
2643 umac_enable_set(priv, CMD_TX_EN, false);
2646 bcmgenet_tx_reclaim_all(dev);
2647 bcmgenet_fini_dma(priv);
2649 /* Prepare the device for Wake-on-LAN and switch to the slow clock */
2650 if (device_may_wakeup(d) && priv->wolopts) {
2651 bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
2652 clk_prepare_enable(priv->clk_wol);
2655 /* Turn off the clocks */
2656 clk_disable_unprepare(priv->clk);
2661 static int bcmgenet_resume(struct device *d)
2663 struct net_device *dev = dev_get_drvdata(d);
2664 struct bcmgenet_priv *priv = netdev_priv(dev);
2665 unsigned long dma_ctrl;
2669 if (!netif_running(dev))
2672 /* Turn on the clock */
2673 ret = clk_prepare_enable(priv->clk);
2677 bcmgenet_umac_reset(priv);
2679 ret = init_umac(priv);
2681 goto out_clk_disable;
2683 /* From WOL-enabled suspend, switch to regular clock */
2685 clk_disable_unprepare(priv->clk_wol);
2687 phy_init_hw(priv->phydev);
2688 /* Speed settings must be restored */
2689 bcmgenet_mii_config(priv->dev);
2691 /* disable ethernet MAC while updating its registers */
2692 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
2694 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2696 if (phy_is_internal(priv->phydev)) {
2697 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2698 reg |= EXT_ENERGY_DET_MASK;
2699 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2703 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
2705 /* Disable RX/TX DMA and flush TX queues */
2706 dma_ctrl = bcmgenet_dma_disable(priv);
2708 /* Reinitialize TDMA and RDMA and SW housekeeping */
2709 ret = bcmgenet_init_dma(priv);
2711 netdev_err(dev, "failed to initialize DMA\n");
2712 goto out_clk_disable;
2715 /* Always enable ring 16 - descriptor ring */
2716 bcmgenet_enable_dma(priv, dma_ctrl);
2718 netif_device_attach(dev);
2720 phy_resume(priv->phydev);
2722 bcmgenet_netif_start(dev);
2727 clk_disable_unprepare(priv->clk);
2730 #endif /* CONFIG_PM_SLEEP */
2732 static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
2734 static struct platform_driver bcmgenet_driver = {
2735 .probe = bcmgenet_probe,
2736 .remove = bcmgenet_remove,
2739 .owner = THIS_MODULE,
2740 .of_match_table = bcmgenet_match,
2741 .pm = &bcmgenet_pm_ops,
2744 module_platform_driver(bcmgenet_driver);
2746 MODULE_AUTHOR("Broadcom Corporation");
2747 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
2748 MODULE_ALIAS("platform:bcmgenet");
2749 MODULE_LICENSE("GPL");