2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
5 * Right now, I am very wasteful with the buffers. I allocate memory
6 * pages and then divide them into 2K frame buffers. This way I know I
7 * have buffers large enough to hold one frame within one buffer descriptor.
8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
9 * will be much more memory efficient and will easily handle lots of
12 * Much better multiple PHY support by Magnus Damm.
13 * Copyright (c) 2000 Ericsson Radio Systems AB.
15 * Support for FEC controller of ColdFire processors.
16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19 * Copyright (c) 2004-2006 Macq Electronique SA.
21 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/string.h>
27 #include <linux/ptrace.h>
28 #include <linux/errno.h>
29 #include <linux/ioport.h>
30 #include <linux/slab.h>
31 #include <linux/interrupt.h>
32 #include <linux/delay.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
40 #include <linux/tcp.h>
41 #include <linux/udp.h>
42 #include <linux/icmp.h>
43 #include <linux/spinlock.h>
44 #include <linux/workqueue.h>
45 #include <linux/bitops.h>
47 #include <linux/irq.h>
48 #include <linux/clk.h>
49 #include <linux/platform_device.h>
50 #include <linux/phy.h>
51 #include <linux/fec.h>
53 #include <linux/of_device.h>
54 #include <linux/of_gpio.h>
55 #include <linux/of_mdio.h>
56 #include <linux/of_net.h>
57 #include <linux/regulator/consumer.h>
58 #include <linux/if_vlan.h>
59 #include <linux/pinctrl/consumer.h>
60 #include <linux/prefetch.h>
62 #include <asm/cacheflush.h>
66 static void set_multicast_list(struct net_device *ndev);
67 static void fec_enet_itr_coal_init(struct net_device *ndev);
69 #define DRIVER_NAME "fec"
71 #define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))
73 /* Pause frame feild and FIFO threshold */
74 #define FEC_ENET_FCE (1 << 5)
75 #define FEC_ENET_RSEM_V 0x84
76 #define FEC_ENET_RSFL_V 16
77 #define FEC_ENET_RAEM_V 0x8
78 #define FEC_ENET_RAFL_V 0x8
79 #define FEC_ENET_OPD_V 0xFFF0
81 static struct platform_device_id fec_devtype[] = {
83 /* keep it for coldfire */
88 .driver_data = FEC_QUIRK_USE_GASKET,
94 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
97 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
98 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
99 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358,
101 .name = "mvf600-fec",
102 .driver_data = FEC_QUIRK_ENET_MAC,
104 .name = "imx6sx-fec",
105 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
106 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
107 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
108 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE,
113 MODULE_DEVICE_TABLE(platform, fec_devtype);
116 IMX25_FEC = 1, /* runs on i.mx25/50/53 */
117 IMX27_FEC, /* runs on i.mx27/35/51 */
124 static const struct of_device_id fec_dt_ids[] = {
125 { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
126 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
127 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
128 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
129 { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
130 { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
133 MODULE_DEVICE_TABLE(of, fec_dt_ids);
135 static unsigned char macaddr[ETH_ALEN];
136 module_param_array(macaddr, byte, NULL, 0);
137 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
139 #if defined(CONFIG_M5272)
141 * Some hardware gets it MAC address out of local flash memory.
142 * if this is non-zero then assume it is the address to get MAC from.
144 #if defined(CONFIG_NETtel)
145 #define FEC_FLASHMAC 0xf0006006
146 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
147 #define FEC_FLASHMAC 0xf0006000
148 #elif defined(CONFIG_CANCam)
149 #define FEC_FLASHMAC 0xf0020000
150 #elif defined (CONFIG_M5272C3)
151 #define FEC_FLASHMAC (0xffe04000 + 4)
152 #elif defined(CONFIG_MOD5272)
153 #define FEC_FLASHMAC 0xffc0406b
155 #define FEC_FLASHMAC 0
157 #endif /* CONFIG_M5272 */
159 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
161 #define PKT_MAXBUF_SIZE 1522
162 #define PKT_MINBUF_SIZE 64
163 #define PKT_MAXBLR_SIZE 1536
165 /* FEC receive acceleration */
166 #define FEC_RACC_IPDIS (1 << 1)
167 #define FEC_RACC_PRODIS (1 << 2)
168 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
171 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
172 * size bits. Other FEC hardware does not, so we need to take that into
173 * account when setting it.
175 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
176 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
177 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
179 #define OPT_FRAME_SIZE 0
182 /* FEC MII MMFR bits definition */
183 #define FEC_MMFR_ST (1 << 30)
184 #define FEC_MMFR_OP_READ (2 << 28)
185 #define FEC_MMFR_OP_WRITE (1 << 28)
186 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
187 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
188 #define FEC_MMFR_TA (2 << 16)
189 #define FEC_MMFR_DATA(v) (v & 0xffff)
190 /* FEC ECR bits definition */
191 #define FEC_ECR_MAGICEN (1 << 2)
192 #define FEC_ECR_SLEEP (1 << 3)
194 #define FEC_MII_TIMEOUT 30000 /* us */
196 /* Transmitter timeout */
197 #define TX_TIMEOUT (2 * HZ)
199 #define FEC_PAUSE_FLAG_AUTONEG 0x1
200 #define FEC_PAUSE_FLAG_ENABLE 0x2
201 #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0)
202 #define FEC_WOL_FLAG_ENABLE (0x1 << 1)
203 #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
205 #define COPYBREAK_DEFAULT 256
207 #define TSO_HEADER_SIZE 128
208 /* Max number of allowed TCP segments for software TSO */
209 #define FEC_MAX_TSO_SEGS 100
210 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
212 #define IS_TSO_HEADER(txq, addr) \
213 ((addr >= txq->tso_hdrs_dma) && \
214 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
219 struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
220 struct fec_enet_private *fep,
223 struct bufdesc *new_bd = bdp + 1;
224 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
225 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
226 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
227 struct bufdesc_ex *ex_base;
228 struct bufdesc *base;
231 if (bdp >= txq->tx_bd_base) {
232 base = txq->tx_bd_base;
233 ring_size = txq->tx_ring_size;
234 ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
236 base = rxq->rx_bd_base;
237 ring_size = rxq->rx_ring_size;
238 ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
242 return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
243 ex_base : ex_new_bd);
245 return (new_bd >= (base + ring_size)) ?
250 struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
251 struct fec_enet_private *fep,
254 struct bufdesc *new_bd = bdp - 1;
255 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
256 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
257 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
258 struct bufdesc_ex *ex_base;
259 struct bufdesc *base;
262 if (bdp >= txq->tx_bd_base) {
263 base = txq->tx_bd_base;
264 ring_size = txq->tx_ring_size;
265 ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
267 base = rxq->rx_bd_base;
268 ring_size = rxq->rx_ring_size;
269 ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
273 return (struct bufdesc *)((ex_new_bd < ex_base) ?
274 (ex_new_bd + ring_size) : ex_new_bd);
276 return (new_bd < base) ? (new_bd + ring_size) : new_bd;
279 static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
280 struct fec_enet_private *fep)
282 return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
285 static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep,
286 struct fec_enet_priv_tx_q *txq)
290 entries = ((const char *)txq->dirty_tx -
291 (const char *)txq->cur_tx) / fep->bufdesc_size - 1;
293 return entries > 0 ? entries : entries + txq->tx_ring_size;
296 static void swap_buffer(void *bufaddr, int len)
299 unsigned int *buf = bufaddr;
301 for (i = 0; i < len; i += 4, buf++)
305 static void swap_buffer2(void *dst_buf, void *src_buf, int len)
308 unsigned int *src = src_buf;
309 unsigned int *dst = dst_buf;
311 for (i = 0; i < len; i += 4, src++, dst++)
315 static void fec_dump(struct net_device *ndev)
317 struct fec_enet_private *fep = netdev_priv(ndev);
319 struct fec_enet_priv_tx_q *txq;
322 netdev_info(ndev, "TX ring dump\n");
323 pr_info("Nr SC addr len SKB\n");
325 txq = fep->tx_queue[0];
326 bdp = txq->tx_bd_base;
329 pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
331 bdp == txq->cur_tx ? 'S' : ' ',
332 bdp == txq->dirty_tx ? 'H' : ' ',
333 bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
334 txq->tx_skbuff[index]);
335 bdp = fec_enet_get_nextdesc(bdp, fep, 0);
337 } while (bdp != txq->tx_bd_base);
340 static inline bool is_ipv4_pkt(struct sk_buff *skb)
342 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
346 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
348 /* Only run for packets requiring a checksum. */
349 if (skb->ip_summed != CHECKSUM_PARTIAL)
352 if (unlikely(skb_cow_head(skb, 0)))
355 if (is_ipv4_pkt(skb))
356 ip_hdr(skb)->check = 0;
357 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
363 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
365 struct net_device *ndev)
367 struct fec_enet_private *fep = netdev_priv(ndev);
368 struct bufdesc *bdp = txq->cur_tx;
369 struct bufdesc_ex *ebdp;
370 int nr_frags = skb_shinfo(skb)->nr_frags;
371 unsigned short queue = skb_get_queue_mapping(skb);
373 unsigned short status;
374 unsigned int estatus = 0;
375 skb_frag_t *this_frag;
381 for (frag = 0; frag < nr_frags; frag++) {
382 this_frag = &skb_shinfo(skb)->frags[frag];
383 bdp = fec_enet_get_nextdesc(bdp, fep, queue);
384 ebdp = (struct bufdesc_ex *)bdp;
386 status = bdp->cbd_sc;
387 status &= ~BD_ENET_TX_STATS;
388 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
389 frag_len = skb_shinfo(skb)->frags[frag].size;
391 /* Handle the last BD specially */
392 if (frag == nr_frags - 1) {
393 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
394 if (fep->bufdesc_ex) {
395 estatus |= BD_ENET_TX_INT;
396 if (unlikely(skb_shinfo(skb)->tx_flags &
397 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
398 estatus |= BD_ENET_TX_TS;
402 if (fep->bufdesc_ex) {
403 if (fep->quirks & FEC_QUIRK_HAS_AVB)
404 estatus |= FEC_TX_BD_FTYPE(queue);
405 if (skb->ip_summed == CHECKSUM_PARTIAL)
406 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
408 ebdp->cbd_esc = estatus;
411 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
413 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
414 if (((unsigned long) bufaddr) & fep->tx_align ||
415 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
416 memcpy(txq->tx_bounce[index], bufaddr, frag_len);
417 bufaddr = txq->tx_bounce[index];
419 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
420 swap_buffer(bufaddr, frag_len);
423 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
425 if (dma_mapping_error(&fep->pdev->dev, addr)) {
426 dev_kfree_skb_any(skb);
428 netdev_err(ndev, "Tx DMA memory map failed\n");
429 goto dma_mapping_error;
432 bdp->cbd_bufaddr = addr;
433 bdp->cbd_datlen = frag_len;
434 bdp->cbd_sc = status;
443 for (i = 0; i < frag; i++) {
444 bdp = fec_enet_get_nextdesc(bdp, fep, queue);
445 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
446 bdp->cbd_datlen, DMA_TO_DEVICE);
451 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
452 struct sk_buff *skb, struct net_device *ndev)
454 struct fec_enet_private *fep = netdev_priv(ndev);
455 int nr_frags = skb_shinfo(skb)->nr_frags;
456 struct bufdesc *bdp, *last_bdp;
459 unsigned short status;
460 unsigned short buflen;
461 unsigned short queue;
462 unsigned int estatus = 0;
467 entries_free = fec_enet_get_free_txdesc_num(fep, txq);
468 if (entries_free < MAX_SKB_FRAGS + 1) {
469 dev_kfree_skb_any(skb);
471 netdev_err(ndev, "NOT enough BD for SG!\n");
475 /* Protocol checksum off-load for TCP and UDP. */
476 if (fec_enet_clear_csum(skb, ndev)) {
477 dev_kfree_skb_any(skb);
481 /* Fill in a Tx ring entry */
483 status = bdp->cbd_sc;
484 status &= ~BD_ENET_TX_STATS;
486 /* Set buffer length and buffer pointer */
488 buflen = skb_headlen(skb);
490 queue = skb_get_queue_mapping(skb);
491 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
492 if (((unsigned long) bufaddr) & fep->tx_align ||
493 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
494 memcpy(txq->tx_bounce[index], skb->data, buflen);
495 bufaddr = txq->tx_bounce[index];
497 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
498 swap_buffer(bufaddr, buflen);
501 /* Push the data cache so the CPM does not get stale memory data. */
502 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
503 if (dma_mapping_error(&fep->pdev->dev, addr)) {
504 dev_kfree_skb_any(skb);
506 netdev_err(ndev, "Tx DMA memory map failed\n");
511 ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
515 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
516 if (fep->bufdesc_ex) {
517 estatus = BD_ENET_TX_INT;
518 if (unlikely(skb_shinfo(skb)->tx_flags &
519 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
520 estatus |= BD_ENET_TX_TS;
524 if (fep->bufdesc_ex) {
526 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
528 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
530 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
532 if (fep->quirks & FEC_QUIRK_HAS_AVB)
533 estatus |= FEC_TX_BD_FTYPE(queue);
535 if (skb->ip_summed == CHECKSUM_PARTIAL)
536 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
539 ebdp->cbd_esc = estatus;
542 last_bdp = txq->cur_tx;
543 index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
544 /* Save skb pointer */
545 txq->tx_skbuff[index] = skb;
547 bdp->cbd_datlen = buflen;
548 bdp->cbd_bufaddr = addr;
550 /* Send it on its way. Tell FEC it's ready, interrupt when done,
551 * it's the last BD of the frame, and to put the CRC on the end.
553 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
554 bdp->cbd_sc = status;
556 /* If this was the last BD in the ring, start at the beginning again. */
557 bdp = fec_enet_get_nextdesc(last_bdp, fep, queue);
559 skb_tx_timestamp(skb);
563 /* Trigger transmission start */
564 writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
570 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
571 struct net_device *ndev,
572 struct bufdesc *bdp, int index, char *data,
573 int size, bool last_tcp, bool is_last)
575 struct fec_enet_private *fep = netdev_priv(ndev);
576 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
577 unsigned short queue = skb_get_queue_mapping(skb);
578 unsigned short status;
579 unsigned int estatus = 0;
582 status = bdp->cbd_sc;
583 status &= ~BD_ENET_TX_STATS;
585 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
587 if (((unsigned long) data) & fep->tx_align ||
588 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
589 memcpy(txq->tx_bounce[index], data, size);
590 data = txq->tx_bounce[index];
592 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
593 swap_buffer(data, size);
596 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
597 if (dma_mapping_error(&fep->pdev->dev, addr)) {
598 dev_kfree_skb_any(skb);
600 netdev_err(ndev, "Tx DMA memory map failed\n");
601 return NETDEV_TX_BUSY;
604 bdp->cbd_datlen = size;
605 bdp->cbd_bufaddr = addr;
607 if (fep->bufdesc_ex) {
608 if (fep->quirks & FEC_QUIRK_HAS_AVB)
609 estatus |= FEC_TX_BD_FTYPE(queue);
610 if (skb->ip_summed == CHECKSUM_PARTIAL)
611 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
613 ebdp->cbd_esc = estatus;
616 /* Handle the last BD specially */
618 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
620 status |= BD_ENET_TX_INTR;
622 ebdp->cbd_esc |= BD_ENET_TX_INT;
625 bdp->cbd_sc = status;
631 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
632 struct sk_buff *skb, struct net_device *ndev,
633 struct bufdesc *bdp, int index)
635 struct fec_enet_private *fep = netdev_priv(ndev);
636 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
637 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
638 unsigned short queue = skb_get_queue_mapping(skb);
640 unsigned long dmabuf;
641 unsigned short status;
642 unsigned int estatus = 0;
644 status = bdp->cbd_sc;
645 status &= ~BD_ENET_TX_STATS;
646 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
648 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
649 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
650 if (((unsigned long)bufaddr) & fep->tx_align ||
651 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
652 memcpy(txq->tx_bounce[index], skb->data, hdr_len);
653 bufaddr = txq->tx_bounce[index];
655 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
656 swap_buffer(bufaddr, hdr_len);
658 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
659 hdr_len, DMA_TO_DEVICE);
660 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
661 dev_kfree_skb_any(skb);
663 netdev_err(ndev, "Tx DMA memory map failed\n");
664 return NETDEV_TX_BUSY;
668 bdp->cbd_bufaddr = dmabuf;
669 bdp->cbd_datlen = hdr_len;
671 if (fep->bufdesc_ex) {
672 if (fep->quirks & FEC_QUIRK_HAS_AVB)
673 estatus |= FEC_TX_BD_FTYPE(queue);
674 if (skb->ip_summed == CHECKSUM_PARTIAL)
675 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
677 ebdp->cbd_esc = estatus;
680 bdp->cbd_sc = status;
685 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
687 struct net_device *ndev)
689 struct fec_enet_private *fep = netdev_priv(ndev);
690 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
691 int total_len, data_left;
692 struct bufdesc *bdp = txq->cur_tx;
693 unsigned short queue = skb_get_queue_mapping(skb);
695 unsigned int index = 0;
698 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) {
699 dev_kfree_skb_any(skb);
701 netdev_err(ndev, "NOT enough BD for TSO!\n");
705 /* Protocol checksum off-load for TCP and UDP. */
706 if (fec_enet_clear_csum(skb, ndev)) {
707 dev_kfree_skb_any(skb);
711 /* Initialize the TSO handler, and prepare the first payload */
712 tso_start(skb, &tso);
714 total_len = skb->len - hdr_len;
715 while (total_len > 0) {
718 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
719 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
720 total_len -= data_left;
722 /* prepare packet headers: MAC + IP + TCP */
723 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
724 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
725 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
729 while (data_left > 0) {
732 size = min_t(int, tso.size, data_left);
733 bdp = fec_enet_get_nextdesc(bdp, fep, queue);
734 index = fec_enet_get_bd_index(txq->tx_bd_base,
736 ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
745 tso_build_data(skb, &tso, size);
748 bdp = fec_enet_get_nextdesc(bdp, fep, queue);
751 /* Save skb pointer */
752 txq->tx_skbuff[index] = skb;
754 skb_tx_timestamp(skb);
757 /* Trigger transmission start */
758 if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
759 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
760 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
761 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
762 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)))
763 writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
768 /* TODO: Release all used data descriptors for TSO */
773 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
775 struct fec_enet_private *fep = netdev_priv(ndev);
777 unsigned short queue;
778 struct fec_enet_priv_tx_q *txq;
779 struct netdev_queue *nq;
782 queue = skb_get_queue_mapping(skb);
783 txq = fep->tx_queue[queue];
784 nq = netdev_get_tx_queue(ndev, queue);
787 ret = fec_enet_txq_submit_tso(txq, skb, ndev);
789 ret = fec_enet_txq_submit_skb(txq, skb, ndev);
793 entries_free = fec_enet_get_free_txdesc_num(fep, txq);
794 if (entries_free <= txq->tx_stop_threshold)
795 netif_tx_stop_queue(nq);
800 /* Init RX & TX buffer descriptors
802 static void fec_enet_bd_init(struct net_device *dev)
804 struct fec_enet_private *fep = netdev_priv(dev);
805 struct fec_enet_priv_tx_q *txq;
806 struct fec_enet_priv_rx_q *rxq;
811 for (q = 0; q < fep->num_rx_queues; q++) {
812 /* Initialize the receive buffer descriptors. */
813 rxq = fep->rx_queue[q];
814 bdp = rxq->rx_bd_base;
816 for (i = 0; i < rxq->rx_ring_size; i++) {
818 /* Initialize the BD for every fragment in the page. */
819 if (bdp->cbd_bufaddr)
820 bdp->cbd_sc = BD_ENET_RX_EMPTY;
823 bdp = fec_enet_get_nextdesc(bdp, fep, q);
826 /* Set the last buffer to wrap */
827 bdp = fec_enet_get_prevdesc(bdp, fep, q);
828 bdp->cbd_sc |= BD_SC_WRAP;
830 rxq->cur_rx = rxq->rx_bd_base;
833 for (q = 0; q < fep->num_tx_queues; q++) {
834 /* ...and the same for transmit */
835 txq = fep->tx_queue[q];
836 bdp = txq->tx_bd_base;
839 for (i = 0; i < txq->tx_ring_size; i++) {
840 /* Initialize the BD for every fragment in the page. */
842 if (txq->tx_skbuff[i]) {
843 dev_kfree_skb_any(txq->tx_skbuff[i]);
844 txq->tx_skbuff[i] = NULL;
846 bdp->cbd_bufaddr = 0;
847 bdp = fec_enet_get_nextdesc(bdp, fep, q);
850 /* Set the last buffer to wrap */
851 bdp = fec_enet_get_prevdesc(bdp, fep, q);
852 bdp->cbd_sc |= BD_SC_WRAP;
857 static void fec_enet_active_rxring(struct net_device *ndev)
859 struct fec_enet_private *fep = netdev_priv(ndev);
862 for (i = 0; i < fep->num_rx_queues; i++)
863 writel(0, fep->hwp + FEC_R_DES_ACTIVE(i));
866 static void fec_enet_enable_ring(struct net_device *ndev)
868 struct fec_enet_private *fep = netdev_priv(ndev);
869 struct fec_enet_priv_tx_q *txq;
870 struct fec_enet_priv_rx_q *rxq;
873 for (i = 0; i < fep->num_rx_queues; i++) {
874 rxq = fep->rx_queue[i];
875 writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i));
876 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
880 writel(RCMR_MATCHEN | RCMR_CMP(i),
881 fep->hwp + FEC_RCMR(i));
884 for (i = 0; i < fep->num_tx_queues; i++) {
885 txq = fep->tx_queue[i];
886 writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i));
890 writel(DMA_CLASS_EN | IDLE_SLOPE(i),
891 fep->hwp + FEC_DMA_CFG(i));
895 static void fec_enet_reset_skb(struct net_device *ndev)
897 struct fec_enet_private *fep = netdev_priv(ndev);
898 struct fec_enet_priv_tx_q *txq;
901 for (i = 0; i < fep->num_tx_queues; i++) {
902 txq = fep->tx_queue[i];
904 for (j = 0; j < txq->tx_ring_size; j++) {
905 if (txq->tx_skbuff[j]) {
906 dev_kfree_skb_any(txq->tx_skbuff[j]);
907 txq->tx_skbuff[j] = NULL;
914 * This function is called to start or restart the FEC during a link
915 * change, transmit timeout, or to reconfigure the FEC. The network
916 * packet processing for this device must be stopped before this call.
919 fec_restart(struct net_device *ndev)
921 struct fec_enet_private *fep = netdev_priv(ndev);
924 u32 rcntl = OPT_FRAME_SIZE | 0x04;
925 u32 ecntl = 0x2; /* ETHEREN */
927 /* Whack a reset. We should wait for this.
928 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
929 * instead of reset MAC itself.
931 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
932 writel(0, fep->hwp + FEC_ECNTRL);
934 writel(1, fep->hwp + FEC_ECNTRL);
939 * enet-mac reset will reset mac address registers too,
940 * so need to reconfigure it.
942 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
943 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
944 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
945 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
948 /* Clear any outstanding interrupt. */
949 writel(0xffffffff, fep->hwp + FEC_IEVENT);
951 fec_enet_bd_init(ndev);
953 fec_enet_enable_ring(ndev);
955 /* Reset tx SKB buffers. */
956 fec_enet_reset_skb(ndev);
958 /* Enable MII mode */
959 if (fep->full_duplex == DUPLEX_FULL) {
961 writel(0x04, fep->hwp + FEC_X_CNTRL);
965 writel(0x0, fep->hwp + FEC_X_CNTRL);
969 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
971 #if !defined(CONFIG_M5272)
972 /* set RX checksum */
973 val = readl(fep->hwp + FEC_RACC);
974 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
975 val |= FEC_RACC_OPTIONS;
977 val &= ~FEC_RACC_OPTIONS;
978 writel(val, fep->hwp + FEC_RACC);
982 * The phy interface and speed need to get configured
983 * differently on enet-mac.
985 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
986 /* Enable flow control and length check */
987 rcntl |= 0x40000000 | 0x00000020;
989 /* RGMII, RMII or MII */
990 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII)
992 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
997 /* 1G, 100M or 10M */
999 if (fep->phy_dev->speed == SPEED_1000)
1001 else if (fep->phy_dev->speed == SPEED_100)
1007 #ifdef FEC_MIIGSK_ENR
1008 if (fep->quirks & FEC_QUIRK_USE_GASKET) {
1010 /* disable the gasket and wait */
1011 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1012 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1016 * configure the gasket:
1017 * RMII, 50 MHz, no loopback, no echo
1018 * MII, 25 MHz, no loopback, no echo
1020 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1021 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
1022 if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
1023 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
1024 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1026 /* re-enable the gasket */
1027 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1032 #if !defined(CONFIG_M5272)
1033 /* enable pause frame*/
1034 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
1035 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1036 fep->phy_dev && fep->phy_dev->pause)) {
1037 rcntl |= FEC_ENET_FCE;
1039 /* set FIFO threshold parameter to reduce overrun */
1040 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
1041 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
1042 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
1043 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
1046 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
1048 rcntl &= ~FEC_ENET_FCE;
1050 #endif /* !defined(CONFIG_M5272) */
1052 writel(rcntl, fep->hwp + FEC_R_CNTRL);
1054 /* Setup multicast filter. */
1055 set_multicast_list(ndev);
1056 #ifndef CONFIG_M5272
1057 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1058 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1061 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1062 /* enable ENET endian swap */
1064 /* enable ENET store and forward mode */
1065 writel(1 << 8, fep->hwp + FEC_X_WMRK);
1068 if (fep->bufdesc_ex)
1071 #ifndef CONFIG_M5272
1072 /* Enable the MIB statistic event counters */
1073 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
1076 /* And last, enable the transmit and receive processing */
1077 writel(ecntl, fep->hwp + FEC_ECNTRL);
1078 fec_enet_active_rxring(ndev);
1080 if (fep->bufdesc_ex)
1081 fec_ptp_start_cyclecounter(ndev);
1083 /* Enable interrupts we wish to service */
1085 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1087 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1089 /* Init the interrupt coalescing */
1090 fec_enet_itr_coal_init(ndev);
1095 fec_stop(struct net_device *ndev)
1097 struct fec_enet_private *fep = netdev_priv(ndev);
1098 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1099 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
1102 /* We cannot expect a graceful transmit stop without link !!! */
1104 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1106 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1107 netdev_err(ndev, "Graceful transmit stop did not complete!\n");
1110 /* Whack a reset. We should wait for this.
1111 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1112 * instead of reset MAC itself.
1114 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1115 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
1116 writel(0, fep->hwp + FEC_ECNTRL);
1118 writel(1, fep->hwp + FEC_ECNTRL);
1121 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1123 writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
1124 val = readl(fep->hwp + FEC_ECNTRL);
1125 val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
1126 writel(val, fep->hwp + FEC_ECNTRL);
1128 if (pdata && pdata->sleep_mode_enable)
1129 pdata->sleep_mode_enable(true);
1131 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1133 /* We have to keep ENET enabled to have MII interrupt stay working */
1134 if (fep->quirks & FEC_QUIRK_ENET_MAC &&
1135 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1136 writel(2, fep->hwp + FEC_ECNTRL);
1137 writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1143 fec_timeout(struct net_device *ndev)
1145 struct fec_enet_private *fep = netdev_priv(ndev);
1149 ndev->stats.tx_errors++;
1151 schedule_work(&fep->tx_timeout_work);
1154 static void fec_enet_timeout_work(struct work_struct *work)
1156 struct fec_enet_private *fep =
1157 container_of(work, struct fec_enet_private, tx_timeout_work);
1158 struct net_device *ndev = fep->netdev;
1161 if (netif_device_present(ndev) || netif_running(ndev)) {
1162 napi_disable(&fep->napi);
1163 netif_tx_lock_bh(ndev);
1165 netif_wake_queue(ndev);
1166 netif_tx_unlock_bh(ndev);
1167 napi_enable(&fep->napi);
1173 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
1174 struct skb_shared_hwtstamps *hwtstamps)
1176 unsigned long flags;
1179 spin_lock_irqsave(&fep->tmreg_lock, flags);
1180 ns = timecounter_cyc2time(&fep->tc, ts);
1181 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1183 memset(hwtstamps, 0, sizeof(*hwtstamps));
1184 hwtstamps->hwtstamp = ns_to_ktime(ns);
1188 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1190 struct fec_enet_private *fep;
1191 struct bufdesc *bdp;
1192 unsigned short status;
1193 struct sk_buff *skb;
1194 struct fec_enet_priv_tx_q *txq;
1195 struct netdev_queue *nq;
1199 fep = netdev_priv(ndev);
1201 queue_id = FEC_ENET_GET_QUQUE(queue_id);
1203 txq = fep->tx_queue[queue_id];
1204 /* get next bdp of dirty_tx */
1205 nq = netdev_get_tx_queue(ndev, queue_id);
1206 bdp = txq->dirty_tx;
1208 /* get next bdp of dirty_tx */
1209 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1211 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
1213 /* current queue is empty */
1214 if (bdp == txq->cur_tx)
1217 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
1219 skb = txq->tx_skbuff[index];
1220 txq->tx_skbuff[index] = NULL;
1221 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
1222 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1223 bdp->cbd_datlen, DMA_TO_DEVICE);
1224 bdp->cbd_bufaddr = 0;
1226 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1230 /* Check for errors. */
1231 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1232 BD_ENET_TX_RL | BD_ENET_TX_UN |
1234 ndev->stats.tx_errors++;
1235 if (status & BD_ENET_TX_HB) /* No heartbeat */
1236 ndev->stats.tx_heartbeat_errors++;
1237 if (status & BD_ENET_TX_LC) /* Late collision */
1238 ndev->stats.tx_window_errors++;
1239 if (status & BD_ENET_TX_RL) /* Retrans limit */
1240 ndev->stats.tx_aborted_errors++;
1241 if (status & BD_ENET_TX_UN) /* Underrun */
1242 ndev->stats.tx_fifo_errors++;
1243 if (status & BD_ENET_TX_CSL) /* Carrier lost */
1244 ndev->stats.tx_carrier_errors++;
1246 ndev->stats.tx_packets++;
1247 ndev->stats.tx_bytes += skb->len;
1250 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
1252 struct skb_shared_hwtstamps shhwtstamps;
1253 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1255 fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps);
1256 skb_tstamp_tx(skb, &shhwtstamps);
1259 /* Deferred means some collisions occurred during transmit,
1260 * but we eventually sent the packet OK.
1262 if (status & BD_ENET_TX_DEF)
1263 ndev->stats.collisions++;
1265 /* Free the sk buffer associated with this last transmit */
1266 dev_kfree_skb_any(skb);
1268 txq->dirty_tx = bdp;
1270 /* Update pointer to next buffer descriptor to be transmitted */
1271 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1273 /* Since we have freed up a buffer, the ring is no longer full
1275 if (netif_queue_stopped(ndev)) {
1276 entries_free = fec_enet_get_free_txdesc_num(fep, txq);
1277 if (entries_free >= txq->tx_wake_threshold)
1278 netif_tx_wake_queue(nq);
1282 /* ERR006538: Keep the transmitter going */
1283 if (bdp != txq->cur_tx &&
1284 readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0)
1285 writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id));
1289 fec_enet_tx(struct net_device *ndev)
1291 struct fec_enet_private *fep = netdev_priv(ndev);
1293 /* First process class A queue, then Class B and Best Effort queue */
1294 for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) {
1295 clear_bit(queue_id, &fep->work_tx);
1296 fec_enet_tx_queue(ndev, queue_id);
1302 fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
1304 struct fec_enet_private *fep = netdev_priv(ndev);
1307 off = ((unsigned long)skb->data) & fep->rx_align;
1309 skb_reserve(skb, fep->rx_align + 1 - off);
1311 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
1312 FEC_ENET_RX_FRSIZE - fep->rx_align,
1314 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
1315 if (net_ratelimit())
1316 netdev_err(ndev, "Rx DMA memory map failed\n");
1323 static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1324 struct bufdesc *bdp, u32 length, bool swap)
1326 struct fec_enet_private *fep = netdev_priv(ndev);
1327 struct sk_buff *new_skb;
1329 if (length > fep->rx_copybreak)
1332 new_skb = netdev_alloc_skb(ndev, length);
1336 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
1337 FEC_ENET_RX_FRSIZE - fep->rx_align,
1340 memcpy(new_skb->data, (*skb)->data, length);
1342 swap_buffer2(new_skb->data, (*skb)->data, length);
1348 /* During a receive, the cur_rx points to the current incoming buffer.
1349 * When we update through the ring, if the next incoming buffer has
1350 * not been given to the system, we just set the empty indicator,
1351 * effectively tossing the packet.
1354 fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1356 struct fec_enet_private *fep = netdev_priv(ndev);
1357 struct fec_enet_priv_rx_q *rxq;
1358 struct bufdesc *bdp;
1359 unsigned short status;
1360 struct sk_buff *skb_new = NULL;
1361 struct sk_buff *skb;
1364 int pkt_received = 0;
1365 struct bufdesc_ex *ebdp = NULL;
1366 bool vlan_packet_rcvd = false;
1370 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
1375 queue_id = FEC_ENET_GET_QUQUE(queue_id);
1376 rxq = fep->rx_queue[queue_id];
1378 /* First, grab all of the stats for the incoming packet.
1379 * These get messed up if we get called due to a busy condition.
1383 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
1385 if (pkt_received >= budget)
1389 /* Since we have allocated space to hold a complete frame,
1390 * the last indicator should be set.
1392 if ((status & BD_ENET_RX_LAST) == 0)
1393 netdev_err(ndev, "rcv is not +last\n");
1396 /* Check for errors. */
1397 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
1398 BD_ENET_RX_CR | BD_ENET_RX_OV)) {
1399 ndev->stats.rx_errors++;
1400 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
1401 /* Frame too long or too short. */
1402 ndev->stats.rx_length_errors++;
1404 if (status & BD_ENET_RX_NO) /* Frame alignment */
1405 ndev->stats.rx_frame_errors++;
1406 if (status & BD_ENET_RX_CR) /* CRC Error */
1407 ndev->stats.rx_crc_errors++;
1408 if (status & BD_ENET_RX_OV) /* FIFO overrun */
1409 ndev->stats.rx_fifo_errors++;
1412 /* Report late collisions as a frame error.
1413 * On this error, the BD is closed, but we don't know what we
1414 * have in the buffer. So, just drop this frame on the floor.
1416 if (status & BD_ENET_RX_CL) {
1417 ndev->stats.rx_errors++;
1418 ndev->stats.rx_frame_errors++;
1419 goto rx_processing_done;
1422 /* Process the incoming frame. */
1423 ndev->stats.rx_packets++;
1424 pkt_len = bdp->cbd_datlen;
1425 ndev->stats.rx_bytes += pkt_len;
1427 index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
1428 skb = rxq->rx_skbuff[index];
1430 /* The packet length includes FCS, but we don't want to
1431 * include that when passing upstream as it messes up
1432 * bridging applications.
1434 is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
1436 if (!is_copybreak) {
1437 skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
1438 if (unlikely(!skb_new)) {
1439 ndev->stats.rx_dropped++;
1440 goto rx_processing_done;
1442 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1443 FEC_ENET_RX_FRSIZE - fep->rx_align,
1447 prefetch(skb->data - NET_IP_ALIGN);
1448 skb_put(skb, pkt_len - 4);
1450 if (!is_copybreak && need_swap)
1451 swap_buffer(data, pkt_len);
1453 /* Extract the enhanced buffer descriptor */
1455 if (fep->bufdesc_ex)
1456 ebdp = (struct bufdesc_ex *)bdp;
1458 /* If this is a VLAN packet remove the VLAN Tag */
1459 vlan_packet_rcvd = false;
1460 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1461 fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
1462 /* Push and remove the vlan tag */
1463 struct vlan_hdr *vlan_header =
1464 (struct vlan_hdr *) (data + ETH_HLEN);
1465 vlan_tag = ntohs(vlan_header->h_vlan_TCI);
1467 vlan_packet_rcvd = true;
1469 skb_copy_to_linear_data_offset(skb, VLAN_HLEN,
1470 data, (2 * ETH_ALEN));
1471 skb_pull(skb, VLAN_HLEN);
1474 skb->protocol = eth_type_trans(skb, ndev);
1476 /* Get receive timestamp from the skb */
1477 if (fep->hwts_rx_en && fep->bufdesc_ex)
1478 fec_enet_hwtstamp(fep, ebdp->ts,
1479 skb_hwtstamps(skb));
1481 if (fep->bufdesc_ex &&
1482 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1483 if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
1484 /* don't check it */
1485 skb->ip_summed = CHECKSUM_UNNECESSARY;
1487 skb_checksum_none_assert(skb);
1491 /* Handle received VLAN packets */
1492 if (vlan_packet_rcvd)
1493 __vlan_hwaccel_put_tag(skb,
1497 napi_gro_receive(&fep->napi, skb);
1500 dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
1501 FEC_ENET_RX_FRSIZE - fep->rx_align,
1504 rxq->rx_skbuff[index] = skb_new;
1505 fec_enet_new_rxbdp(ndev, bdp, skb_new);
1509 /* Clear the status flags for this buffer */
1510 status &= ~BD_ENET_RX_STATS;
1512 /* Mark the buffer empty */
1513 status |= BD_ENET_RX_EMPTY;
1514 bdp->cbd_sc = status;
1516 if (fep->bufdesc_ex) {
1517 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1519 ebdp->cbd_esc = BD_ENET_RX_INT;
1524 /* Update BD pointer to next entry */
1525 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1527 /* Doing this here will keep the FEC running while we process
1528 * incoming frames. On a heavily loaded network, we should be
1529 * able to keep up at the expense of system resources.
1531 writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id));
1534 return pkt_received;
1538 fec_enet_rx(struct net_device *ndev, int budget)
1540 int pkt_received = 0;
1542 struct fec_enet_private *fep = netdev_priv(ndev);
1544 for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
1545 clear_bit(queue_id, &fep->work_rx);
1546 pkt_received += fec_enet_rx_queue(ndev,
1547 budget - pkt_received, queue_id);
1549 return pkt_received;
1553 fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
1555 if (int_events == 0)
1558 if (int_events & FEC_ENET_RXF)
1559 fep->work_rx |= (1 << 2);
1560 if (int_events & FEC_ENET_RXF_1)
1561 fep->work_rx |= (1 << 0);
1562 if (int_events & FEC_ENET_RXF_2)
1563 fep->work_rx |= (1 << 1);
1565 if (int_events & FEC_ENET_TXF)
1566 fep->work_tx |= (1 << 2);
1567 if (int_events & FEC_ENET_TXF_1)
1568 fep->work_tx |= (1 << 0);
1569 if (int_events & FEC_ENET_TXF_2)
1570 fep->work_tx |= (1 << 1);
1576 fec_enet_interrupt(int irq, void *dev_id)
1578 struct net_device *ndev = dev_id;
1579 struct fec_enet_private *fep = netdev_priv(ndev);
1581 irqreturn_t ret = IRQ_NONE;
1583 int_events = readl(fep->hwp + FEC_IEVENT);
1584 writel(int_events, fep->hwp + FEC_IEVENT);
1585 fec_enet_collect_events(fep, int_events);
1587 if (fep->work_tx || fep->work_rx) {
1590 if (napi_schedule_prep(&fep->napi)) {
1591 /* Disable the NAPI interrupts */
1592 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1593 __napi_schedule(&fep->napi);
1597 if (int_events & FEC_ENET_MII) {
1599 complete(&fep->mdio_done);
1603 fec_ptp_check_pps_event(fep);
1608 static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
1610 struct net_device *ndev = napi->dev;
1611 struct fec_enet_private *fep = netdev_priv(ndev);
1614 pkts = fec_enet_rx(ndev, budget);
1618 if (pkts < budget) {
1619 napi_complete(napi);
1620 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1625 /* ------------------------------------------------------------------------- */
1626 static void fec_get_mac(struct net_device *ndev)
1628 struct fec_enet_private *fep = netdev_priv(ndev);
1629 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1630 unsigned char *iap, tmpaddr[ETH_ALEN];
1633 * try to get mac address in following order:
1635 * 1) module parameter via kernel command line in form
1636 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
1641 * 2) from device tree data
1643 if (!is_valid_ether_addr(iap)) {
1644 struct device_node *np = fep->pdev->dev.of_node;
1646 const char *mac = of_get_mac_address(np);
1648 iap = (unsigned char *) mac;
1653 * 3) from flash or fuse (via platform data)
1655 if (!is_valid_ether_addr(iap)) {
1658 iap = (unsigned char *)FEC_FLASHMAC;
1661 iap = (unsigned char *)&pdata->mac;
1666 * 4) FEC mac registers set by bootloader
1668 if (!is_valid_ether_addr(iap)) {
1669 *((__be32 *) &tmpaddr[0]) =
1670 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
1671 *((__be16 *) &tmpaddr[4]) =
1672 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1677 * 5) random mac address
1679 if (!is_valid_ether_addr(iap)) {
1680 /* Report it and use a random ethernet address instead */
1681 netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
1682 eth_hw_addr_random(ndev);
1683 netdev_info(ndev, "Using random MAC address: %pM\n",
1688 memcpy(ndev->dev_addr, iap, ETH_ALEN);
1690 /* Adjust MAC if using macaddr */
1692 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
1695 /* ------------------------------------------------------------------------- */
1700 static void fec_enet_adjust_link(struct net_device *ndev)
1702 struct fec_enet_private *fep = netdev_priv(ndev);
1703 struct phy_device *phy_dev = fep->phy_dev;
1704 int status_change = 0;
1706 /* Prevent a state halted on mii error */
1707 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
1708 phy_dev->state = PHY_RESUMING;
1713 * If the netdev is down, or is going down, we're not interested
1714 * in link state events, so just mark our idea of the link as down
1715 * and ignore the event.
1717 if (!netif_running(ndev) || !netif_device_present(ndev)) {
1719 } else if (phy_dev->link) {
1721 fep->link = phy_dev->link;
1725 if (fep->full_duplex != phy_dev->duplex) {
1726 fep->full_duplex = phy_dev->duplex;
1730 if (phy_dev->speed != fep->speed) {
1731 fep->speed = phy_dev->speed;
1735 /* if any of the above changed restart the FEC */
1736 if (status_change) {
1737 napi_disable(&fep->napi);
1738 netif_tx_lock_bh(ndev);
1740 netif_wake_queue(ndev);
1741 netif_tx_unlock_bh(ndev);
1742 napi_enable(&fep->napi);
1746 napi_disable(&fep->napi);
1747 netif_tx_lock_bh(ndev);
1749 netif_tx_unlock_bh(ndev);
1750 napi_enable(&fep->napi);
1751 fep->link = phy_dev->link;
1757 phy_print_status(phy_dev);
1760 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1762 struct fec_enet_private *fep = bus->priv;
1763 unsigned long time_left;
1765 fep->mii_timeout = 0;
1766 init_completion(&fep->mdio_done);
1768 /* start a read op */
1769 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
1770 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
1771 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
1773 /* wait for end of transfer */
1774 time_left = wait_for_completion_timeout(&fep->mdio_done,
1775 usecs_to_jiffies(FEC_MII_TIMEOUT));
1776 if (time_left == 0) {
1777 fep->mii_timeout = 1;
1778 netdev_err(fep->netdev, "MDIO read timeout\n");
1783 return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1786 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1789 struct fec_enet_private *fep = bus->priv;
1790 unsigned long time_left;
1792 fep->mii_timeout = 0;
1793 init_completion(&fep->mdio_done);
1795 /* start a write op */
1796 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
1797 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
1798 FEC_MMFR_TA | FEC_MMFR_DATA(value),
1799 fep->hwp + FEC_MII_DATA);
1801 /* wait for end of transfer */
1802 time_left = wait_for_completion_timeout(&fep->mdio_done,
1803 usecs_to_jiffies(FEC_MII_TIMEOUT));
1804 if (time_left == 0) {
1805 fep->mii_timeout = 1;
1806 netdev_err(fep->netdev, "MDIO write timeout\n");
1813 static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1815 struct fec_enet_private *fep = netdev_priv(ndev);
1819 ret = clk_prepare_enable(fep->clk_ahb);
1822 ret = clk_prepare_enable(fep->clk_ipg);
1824 goto failed_clk_ipg;
1825 if (fep->clk_enet_out) {
1826 ret = clk_prepare_enable(fep->clk_enet_out);
1828 goto failed_clk_enet_out;
1831 mutex_lock(&fep->ptp_clk_mutex);
1832 ret = clk_prepare_enable(fep->clk_ptp);
1834 mutex_unlock(&fep->ptp_clk_mutex);
1835 goto failed_clk_ptp;
1837 fep->ptp_clk_on = true;
1839 mutex_unlock(&fep->ptp_clk_mutex);
1842 ret = clk_prepare_enable(fep->clk_ref);
1844 goto failed_clk_ref;
1847 clk_disable_unprepare(fep->clk_ahb);
1848 clk_disable_unprepare(fep->clk_ipg);
1849 if (fep->clk_enet_out)
1850 clk_disable_unprepare(fep->clk_enet_out);
1852 mutex_lock(&fep->ptp_clk_mutex);
1853 clk_disable_unprepare(fep->clk_ptp);
1854 fep->ptp_clk_on = false;
1855 mutex_unlock(&fep->ptp_clk_mutex);
1858 clk_disable_unprepare(fep->clk_ref);
1865 clk_disable_unprepare(fep->clk_ref);
1867 if (fep->clk_enet_out)
1868 clk_disable_unprepare(fep->clk_enet_out);
1869 failed_clk_enet_out:
1870 clk_disable_unprepare(fep->clk_ipg);
1872 clk_disable_unprepare(fep->clk_ahb);
1877 static int fec_enet_mii_probe(struct net_device *ndev)
1879 struct fec_enet_private *fep = netdev_priv(ndev);
1880 struct phy_device *phy_dev = NULL;
1881 char mdio_bus_id[MII_BUS_ID_SIZE];
1882 char phy_name[MII_BUS_ID_SIZE + 3];
1884 int dev_id = fep->dev_id;
1886 fep->phy_dev = NULL;
1888 if (fep->phy_node) {
1889 phy_dev = of_phy_connect(ndev, fep->phy_node,
1890 &fec_enet_adjust_link, 0,
1891 fep->phy_interface);
1895 /* check for attached phy */
1896 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
1897 if ((fep->mii_bus->phy_mask & (1 << phy_id)))
1899 if (fep->mii_bus->phy_map[phy_id] == NULL)
1901 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
1905 strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
1909 if (phy_id >= PHY_MAX_ADDR) {
1910 netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
1911 strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
1915 snprintf(phy_name, sizeof(phy_name),
1916 PHY_ID_FMT, mdio_bus_id, phy_id);
1917 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
1918 fep->phy_interface);
1921 if (IS_ERR(phy_dev)) {
1922 netdev_err(ndev, "could not attach to PHY\n");
1923 return PTR_ERR(phy_dev);
1926 /* mask with MAC supported features */
1927 if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
1928 phy_dev->supported &= PHY_GBIT_FEATURES;
1929 phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
1930 #if !defined(CONFIG_M5272)
1931 phy_dev->supported |= SUPPORTED_Pause;
1935 phy_dev->supported &= PHY_BASIC_FEATURES;
1937 phy_dev->advertising = phy_dev->supported;
1939 fep->phy_dev = phy_dev;
1941 fep->full_duplex = 0;
1943 netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
1944 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
1950 static int fec_enet_mii_init(struct platform_device *pdev)
1952 static struct mii_bus *fec0_mii_bus;
1953 struct net_device *ndev = platform_get_drvdata(pdev);
1954 struct fec_enet_private *fep = netdev_priv(ndev);
1955 struct device_node *node;
1956 int err = -ENXIO, i;
1959 * The dual fec interfaces are not equivalent with enet-mac.
1960 * Here are the differences:
1962 * - fec0 supports MII & RMII modes while fec1 only supports RMII
1963 * - fec0 acts as the 1588 time master while fec1 is slave
1964 * - external phys can only be configured by fec0
1966 * That is to say fec1 can not work independently. It only works
1967 * when fec0 is working. The reason behind this design is that the
1968 * second interface is added primarily for Switch mode.
1970 * Because of the last point above, both phys are attached on fec0
1971 * mdio interface in board design, and need to be configured by
1974 if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
1975 /* fec1 uses fec0 mii_bus */
1976 if (mii_cnt && fec0_mii_bus) {
1977 fep->mii_bus = fec0_mii_bus;
1984 fep->mii_timeout = 0;
1987 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
1989 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
1990 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
1991 * Reference Manual has an error on this, and gets fixed on i.MX6Q
1994 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
1995 if (fep->quirks & FEC_QUIRK_ENET_MAC)
1997 fep->phy_speed <<= 1;
1998 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
2000 fep->mii_bus = mdiobus_alloc();
2001 if (fep->mii_bus == NULL) {
2006 fep->mii_bus->name = "fec_enet_mii_bus";
2007 fep->mii_bus->read = fec_enet_mdio_read;
2008 fep->mii_bus->write = fec_enet_mdio_write;
2009 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2010 pdev->name, fep->dev_id + 1);
2011 fep->mii_bus->priv = fep;
2012 fep->mii_bus->parent = &pdev->dev;
2014 fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
2015 if (!fep->mii_bus->irq) {
2017 goto err_out_free_mdiobus;
2020 for (i = 0; i < PHY_MAX_ADDR; i++)
2021 fep->mii_bus->irq[i] = PHY_POLL;
2023 node = of_get_child_by_name(pdev->dev.of_node, "mdio");
2025 err = of_mdiobus_register(fep->mii_bus, node);
2028 err = mdiobus_register(fep->mii_bus);
2032 goto err_out_free_mdio_irq;
2036 /* save fec0 mii_bus */
2037 if (fep->quirks & FEC_QUIRK_ENET_MAC)
2038 fec0_mii_bus = fep->mii_bus;
2042 err_out_free_mdio_irq:
2043 kfree(fep->mii_bus->irq);
2044 err_out_free_mdiobus:
2045 mdiobus_free(fep->mii_bus);
2050 static void fec_enet_mii_remove(struct fec_enet_private *fep)
2052 if (--mii_cnt == 0) {
2053 mdiobus_unregister(fep->mii_bus);
2054 kfree(fep->mii_bus->irq);
2055 mdiobus_free(fep->mii_bus);
2059 static int fec_enet_get_settings(struct net_device *ndev,
2060 struct ethtool_cmd *cmd)
2062 struct fec_enet_private *fep = netdev_priv(ndev);
2063 struct phy_device *phydev = fep->phy_dev;
2068 return phy_ethtool_gset(phydev, cmd);
2071 static int fec_enet_set_settings(struct net_device *ndev,
2072 struct ethtool_cmd *cmd)
2074 struct fec_enet_private *fep = netdev_priv(ndev);
2075 struct phy_device *phydev = fep->phy_dev;
2080 return phy_ethtool_sset(phydev, cmd);
2083 static void fec_enet_get_drvinfo(struct net_device *ndev,
2084 struct ethtool_drvinfo *info)
2086 struct fec_enet_private *fep = netdev_priv(ndev);
2088 strlcpy(info->driver, fep->pdev->dev.driver->name,
2089 sizeof(info->driver));
2090 strlcpy(info->version, "Revision: 1.0", sizeof(info->version));
2091 strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
2094 static int fec_enet_get_ts_info(struct net_device *ndev,
2095 struct ethtool_ts_info *info)
2097 struct fec_enet_private *fep = netdev_priv(ndev);
2099 if (fep->bufdesc_ex) {
2101 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
2102 SOF_TIMESTAMPING_RX_SOFTWARE |
2103 SOF_TIMESTAMPING_SOFTWARE |
2104 SOF_TIMESTAMPING_TX_HARDWARE |
2105 SOF_TIMESTAMPING_RX_HARDWARE |
2106 SOF_TIMESTAMPING_RAW_HARDWARE;
2108 info->phc_index = ptp_clock_index(fep->ptp_clock);
2110 info->phc_index = -1;
2112 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
2113 (1 << HWTSTAMP_TX_ON);
2115 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2116 (1 << HWTSTAMP_FILTER_ALL);
2119 return ethtool_op_get_ts_info(ndev, info);
2123 #if !defined(CONFIG_M5272)
2125 static void fec_enet_get_pauseparam(struct net_device *ndev,
2126 struct ethtool_pauseparam *pause)
2128 struct fec_enet_private *fep = netdev_priv(ndev);
2130 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
2131 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
2132 pause->rx_pause = pause->tx_pause;
2135 static int fec_enet_set_pauseparam(struct net_device *ndev,
2136 struct ethtool_pauseparam *pause)
2138 struct fec_enet_private *fep = netdev_priv(ndev);
2143 if (pause->tx_pause != pause->rx_pause) {
2145 "hardware only support enable/disable both tx and rx");
2149 fep->pause_flag = 0;
2151 /* tx pause must be same as rx pause */
2152 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
2153 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
2155 if (pause->rx_pause || pause->autoneg) {
2156 fep->phy_dev->supported |= ADVERTISED_Pause;
2157 fep->phy_dev->advertising |= ADVERTISED_Pause;
2159 fep->phy_dev->supported &= ~ADVERTISED_Pause;
2160 fep->phy_dev->advertising &= ~ADVERTISED_Pause;
2163 if (pause->autoneg) {
2164 if (netif_running(ndev))
2166 phy_start_aneg(fep->phy_dev);
2168 if (netif_running(ndev)) {
2169 napi_disable(&fep->napi);
2170 netif_tx_lock_bh(ndev);
2172 netif_wake_queue(ndev);
2173 netif_tx_unlock_bh(ndev);
2174 napi_enable(&fep->napi);
2180 static const struct fec_stat {
2181 char name[ETH_GSTRING_LEN];
2185 { "tx_dropped", RMON_T_DROP },
2186 { "tx_packets", RMON_T_PACKETS },
2187 { "tx_broadcast", RMON_T_BC_PKT },
2188 { "tx_multicast", RMON_T_MC_PKT },
2189 { "tx_crc_errors", RMON_T_CRC_ALIGN },
2190 { "tx_undersize", RMON_T_UNDERSIZE },
2191 { "tx_oversize", RMON_T_OVERSIZE },
2192 { "tx_fragment", RMON_T_FRAG },
2193 { "tx_jabber", RMON_T_JAB },
2194 { "tx_collision", RMON_T_COL },
2195 { "tx_64byte", RMON_T_P64 },
2196 { "tx_65to127byte", RMON_T_P65TO127 },
2197 { "tx_128to255byte", RMON_T_P128TO255 },
2198 { "tx_256to511byte", RMON_T_P256TO511 },
2199 { "tx_512to1023byte", RMON_T_P512TO1023 },
2200 { "tx_1024to2047byte", RMON_T_P1024TO2047 },
2201 { "tx_GTE2048byte", RMON_T_P_GTE2048 },
2202 { "tx_octets", RMON_T_OCTETS },
2205 { "IEEE_tx_drop", IEEE_T_DROP },
2206 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
2207 { "IEEE_tx_1col", IEEE_T_1COL },
2208 { "IEEE_tx_mcol", IEEE_T_MCOL },
2209 { "IEEE_tx_def", IEEE_T_DEF },
2210 { "IEEE_tx_lcol", IEEE_T_LCOL },
2211 { "IEEE_tx_excol", IEEE_T_EXCOL },
2212 { "IEEE_tx_macerr", IEEE_T_MACERR },
2213 { "IEEE_tx_cserr", IEEE_T_CSERR },
2214 { "IEEE_tx_sqe", IEEE_T_SQE },
2215 { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
2216 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
2219 { "rx_packets", RMON_R_PACKETS },
2220 { "rx_broadcast", RMON_R_BC_PKT },
2221 { "rx_multicast", RMON_R_MC_PKT },
2222 { "rx_crc_errors", RMON_R_CRC_ALIGN },
2223 { "rx_undersize", RMON_R_UNDERSIZE },
2224 { "rx_oversize", RMON_R_OVERSIZE },
2225 { "rx_fragment", RMON_R_FRAG },
2226 { "rx_jabber", RMON_R_JAB },
2227 { "rx_64byte", RMON_R_P64 },
2228 { "rx_65to127byte", RMON_R_P65TO127 },
2229 { "rx_128to255byte", RMON_R_P128TO255 },
2230 { "rx_256to511byte", RMON_R_P256TO511 },
2231 { "rx_512to1023byte", RMON_R_P512TO1023 },
2232 { "rx_1024to2047byte", RMON_R_P1024TO2047 },
2233 { "rx_GTE2048byte", RMON_R_P_GTE2048 },
2234 { "rx_octets", RMON_R_OCTETS },
2237 { "IEEE_rx_drop", IEEE_R_DROP },
2238 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
2239 { "IEEE_rx_crc", IEEE_R_CRC },
2240 { "IEEE_rx_align", IEEE_R_ALIGN },
2241 { "IEEE_rx_macerr", IEEE_R_MACERR },
2242 { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
2243 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2246 static void fec_enet_get_ethtool_stats(struct net_device *dev,
2247 struct ethtool_stats *stats, u64 *data)
2249 struct fec_enet_private *fep = netdev_priv(dev);
2252 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2253 data[i] = readl(fep->hwp + fec_stats[i].offset);
2256 static void fec_enet_get_strings(struct net_device *netdev,
2257 u32 stringset, u8 *data)
2260 switch (stringset) {
2262 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2263 memcpy(data + i * ETH_GSTRING_LEN,
2264 fec_stats[i].name, ETH_GSTRING_LEN);
2269 static int fec_enet_get_sset_count(struct net_device *dev, int sset)
2273 return ARRAY_SIZE(fec_stats);
2278 #endif /* !defined(CONFIG_M5272) */
2280 static int fec_enet_nway_reset(struct net_device *dev)
2282 struct fec_enet_private *fep = netdev_priv(dev);
2283 struct phy_device *phydev = fep->phy_dev;
2288 return genphy_restart_aneg(phydev);
2291 /* ITR clock source is enet system clock (clk_ahb).
2292 * TCTT unit is cycle_ns * 64 cycle
2293 * So, the ICTT value = X us / (cycle_ns * 64)
2295 static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
2297 struct fec_enet_private *fep = netdev_priv(ndev);
2299 return us * (fep->itr_clk_rate / 64000) / 1000;
2302 /* Set threshold for interrupt coalescing */
2303 static void fec_enet_itr_coal_set(struct net_device *ndev)
2305 struct fec_enet_private *fep = netdev_priv(ndev);
2308 if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
2311 /* Must be greater than zero to avoid unpredictable behavior */
2312 if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
2313 !fep->tx_time_itr || !fep->tx_pkts_itr)
2316 /* Select enet system clock as Interrupt Coalescing
2317 * timer Clock Source
2319 rx_itr = FEC_ITR_CLK_SEL;
2320 tx_itr = FEC_ITR_CLK_SEL;
2322 /* set ICFT and ICTT */
2323 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
2324 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
2325 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
2326 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
2328 rx_itr |= FEC_ITR_EN;
2329 tx_itr |= FEC_ITR_EN;
2331 writel(tx_itr, fep->hwp + FEC_TXIC0);
2332 writel(rx_itr, fep->hwp + FEC_RXIC0);
2333 writel(tx_itr, fep->hwp + FEC_TXIC1);
2334 writel(rx_itr, fep->hwp + FEC_RXIC1);
2335 writel(tx_itr, fep->hwp + FEC_TXIC2);
2336 writel(rx_itr, fep->hwp + FEC_RXIC2);
2340 fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
2342 struct fec_enet_private *fep = netdev_priv(ndev);
2344 if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
2347 ec->rx_coalesce_usecs = fep->rx_time_itr;
2348 ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
2350 ec->tx_coalesce_usecs = fep->tx_time_itr;
2351 ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
2357 fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
2359 struct fec_enet_private *fep = netdev_priv(ndev);
2362 if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
2365 if (ec->rx_max_coalesced_frames > 255) {
2366 pr_err("Rx coalesced frames exceed hardware limiation");
2370 if (ec->tx_max_coalesced_frames > 255) {
2371 pr_err("Tx coalesced frame exceed hardware limiation");
2375 cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
2376 if (cycle > 0xFFFF) {
2377 pr_err("Rx coalesed usec exceeed hardware limiation");
2381 cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
2382 if (cycle > 0xFFFF) {
2383 pr_err("Rx coalesed usec exceeed hardware limiation");
2387 fep->rx_time_itr = ec->rx_coalesce_usecs;
2388 fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
2390 fep->tx_time_itr = ec->tx_coalesce_usecs;
2391 fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
2393 fec_enet_itr_coal_set(ndev);
2398 static void fec_enet_itr_coal_init(struct net_device *ndev)
2400 struct ethtool_coalesce ec;
2402 ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
2403 ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
2405 ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
2406 ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
2408 fec_enet_set_coalesce(ndev, &ec);
2411 static int fec_enet_get_tunable(struct net_device *netdev,
2412 const struct ethtool_tunable *tuna,
2415 struct fec_enet_private *fep = netdev_priv(netdev);
2419 case ETHTOOL_RX_COPYBREAK:
2420 *(u32 *)data = fep->rx_copybreak;
2430 static int fec_enet_set_tunable(struct net_device *netdev,
2431 const struct ethtool_tunable *tuna,
2434 struct fec_enet_private *fep = netdev_priv(netdev);
2438 case ETHTOOL_RX_COPYBREAK:
2439 fep->rx_copybreak = *(u32 *)data;
2450 fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2452 struct fec_enet_private *fep = netdev_priv(ndev);
2454 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
2455 wol->supported = WAKE_MAGIC;
2456 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
2458 wol->supported = wol->wolopts = 0;
2463 fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2465 struct fec_enet_private *fep = netdev_priv(ndev);
2467 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
2470 if (wol->wolopts & ~WAKE_MAGIC)
2473 device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
2474 if (device_may_wakeup(&ndev->dev)) {
2475 fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
2476 if (fep->irq[0] > 0)
2477 enable_irq_wake(fep->irq[0]);
2479 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
2480 if (fep->irq[0] > 0)
2481 disable_irq_wake(fep->irq[0]);
2487 static const struct ethtool_ops fec_enet_ethtool_ops = {
2488 .get_settings = fec_enet_get_settings,
2489 .set_settings = fec_enet_set_settings,
2490 .get_drvinfo = fec_enet_get_drvinfo,
2491 .nway_reset = fec_enet_nway_reset,
2492 .get_link = ethtool_op_get_link,
2493 .get_coalesce = fec_enet_get_coalesce,
2494 .set_coalesce = fec_enet_set_coalesce,
2495 #ifndef CONFIG_M5272
2496 .get_pauseparam = fec_enet_get_pauseparam,
2497 .set_pauseparam = fec_enet_set_pauseparam,
2498 .get_strings = fec_enet_get_strings,
2499 .get_ethtool_stats = fec_enet_get_ethtool_stats,
2500 .get_sset_count = fec_enet_get_sset_count,
2502 .get_ts_info = fec_enet_get_ts_info,
2503 .get_tunable = fec_enet_get_tunable,
2504 .set_tunable = fec_enet_set_tunable,
2505 .get_wol = fec_enet_get_wol,
2506 .set_wol = fec_enet_set_wol,
2509 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2511 struct fec_enet_private *fep = netdev_priv(ndev);
2512 struct phy_device *phydev = fep->phy_dev;
2514 if (!netif_running(ndev))
2520 if (fep->bufdesc_ex) {
2521 if (cmd == SIOCSHWTSTAMP)
2522 return fec_ptp_set(ndev, rq);
2523 if (cmd == SIOCGHWTSTAMP)
2524 return fec_ptp_get(ndev, rq);
2527 return phy_mii_ioctl(phydev, rq, cmd);
2530 static void fec_enet_free_buffers(struct net_device *ndev)
2532 struct fec_enet_private *fep = netdev_priv(ndev);
2534 struct sk_buff *skb;
2535 struct bufdesc *bdp;
2536 struct fec_enet_priv_tx_q *txq;
2537 struct fec_enet_priv_rx_q *rxq;
2540 for (q = 0; q < fep->num_rx_queues; q++) {
2541 rxq = fep->rx_queue[q];
2542 bdp = rxq->rx_bd_base;
2543 for (i = 0; i < rxq->rx_ring_size; i++) {
2544 skb = rxq->rx_skbuff[i];
2545 rxq->rx_skbuff[i] = NULL;
2547 dma_unmap_single(&fep->pdev->dev,
2549 FEC_ENET_RX_FRSIZE - fep->rx_align,
2553 bdp = fec_enet_get_nextdesc(bdp, fep, q);
2557 for (q = 0; q < fep->num_tx_queues; q++) {
2558 txq = fep->tx_queue[q];
2559 bdp = txq->tx_bd_base;
2560 for (i = 0; i < txq->tx_ring_size; i++) {
2561 kfree(txq->tx_bounce[i]);
2562 txq->tx_bounce[i] = NULL;
2563 skb = txq->tx_skbuff[i];
2564 txq->tx_skbuff[i] = NULL;
2570 static void fec_enet_free_queue(struct net_device *ndev)
2572 struct fec_enet_private *fep = netdev_priv(ndev);
2574 struct fec_enet_priv_tx_q *txq;
2576 for (i = 0; i < fep->num_tx_queues; i++)
2577 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
2578 txq = fep->tx_queue[i];
2579 dma_free_coherent(NULL,
2580 txq->tx_ring_size * TSO_HEADER_SIZE,
2585 for (i = 0; i < fep->num_rx_queues; i++)
2586 if (fep->rx_queue[i])
2587 kfree(fep->rx_queue[i]);
2589 for (i = 0; i < fep->num_tx_queues; i++)
2590 if (fep->tx_queue[i])
2591 kfree(fep->tx_queue[i]);
2594 static int fec_enet_alloc_queue(struct net_device *ndev)
2596 struct fec_enet_private *fep = netdev_priv(ndev);
2599 struct fec_enet_priv_tx_q *txq;
2601 for (i = 0; i < fep->num_tx_queues; i++) {
2602 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
2608 fep->tx_queue[i] = txq;
2609 txq->tx_ring_size = TX_RING_SIZE;
2610 fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size;
2612 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
2613 txq->tx_wake_threshold =
2614 (txq->tx_ring_size - txq->tx_stop_threshold) / 2;
2616 txq->tso_hdrs = dma_alloc_coherent(NULL,
2617 txq->tx_ring_size * TSO_HEADER_SIZE,
2620 if (!txq->tso_hdrs) {
2626 for (i = 0; i < fep->num_rx_queues; i++) {
2627 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
2629 if (!fep->rx_queue[i]) {
2634 fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE;
2635 fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size;
2640 fec_enet_free_queue(ndev);
2645 fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
2647 struct fec_enet_private *fep = netdev_priv(ndev);
2649 struct sk_buff *skb;
2650 struct bufdesc *bdp;
2651 struct fec_enet_priv_rx_q *rxq;
2653 rxq = fep->rx_queue[queue];
2654 bdp = rxq->rx_bd_base;
2655 for (i = 0; i < rxq->rx_ring_size; i++) {
2656 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
2660 if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
2665 rxq->rx_skbuff[i] = skb;
2666 bdp->cbd_sc = BD_ENET_RX_EMPTY;
2668 if (fep->bufdesc_ex) {
2669 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2670 ebdp->cbd_esc = BD_ENET_RX_INT;
2673 bdp = fec_enet_get_nextdesc(bdp, fep, queue);
2676 /* Set the last buffer to wrap. */
2677 bdp = fec_enet_get_prevdesc(bdp, fep, queue);
2678 bdp->cbd_sc |= BD_SC_WRAP;
2682 fec_enet_free_buffers(ndev);
2687 fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
2689 struct fec_enet_private *fep = netdev_priv(ndev);
2691 struct bufdesc *bdp;
2692 struct fec_enet_priv_tx_q *txq;
2694 txq = fep->tx_queue[queue];
2695 bdp = txq->tx_bd_base;
2696 for (i = 0; i < txq->tx_ring_size; i++) {
2697 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
2698 if (!txq->tx_bounce[i])
2702 bdp->cbd_bufaddr = 0;
2704 if (fep->bufdesc_ex) {
2705 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2706 ebdp->cbd_esc = BD_ENET_TX_INT;
2709 bdp = fec_enet_get_nextdesc(bdp, fep, queue);
2712 /* Set the last buffer to wrap. */
2713 bdp = fec_enet_get_prevdesc(bdp, fep, queue);
2714 bdp->cbd_sc |= BD_SC_WRAP;
2719 fec_enet_free_buffers(ndev);
2723 static int fec_enet_alloc_buffers(struct net_device *ndev)
2725 struct fec_enet_private *fep = netdev_priv(ndev);
2728 for (i = 0; i < fep->num_rx_queues; i++)
2729 if (fec_enet_alloc_rxq_buffers(ndev, i))
2732 for (i = 0; i < fep->num_tx_queues; i++)
2733 if (fec_enet_alloc_txq_buffers(ndev, i))
2739 fec_enet_open(struct net_device *ndev)
2741 struct fec_enet_private *fep = netdev_priv(ndev);
2744 pinctrl_pm_select_default_state(&fep->pdev->dev);
2745 ret = fec_enet_clk_enable(ndev, true);
2749 /* I should reset the ring buffers here, but I don't yet know
2750 * a simple way to do that.
2753 ret = fec_enet_alloc_buffers(ndev);
2755 goto err_enet_alloc;
2757 /* Probe and connect to PHY when open the interface */
2758 ret = fec_enet_mii_probe(ndev);
2760 goto err_enet_mii_probe;
2763 napi_enable(&fep->napi);
2764 phy_start(fep->phy_dev);
2765 netif_tx_start_all_queues(ndev);
2767 device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
2768 FEC_WOL_FLAG_ENABLE);
2773 fec_enet_free_buffers(ndev);
2775 fec_enet_clk_enable(ndev, false);
2776 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2781 fec_enet_close(struct net_device *ndev)
2783 struct fec_enet_private *fep = netdev_priv(ndev);
2785 phy_stop(fep->phy_dev);
2787 if (netif_device_present(ndev)) {
2788 napi_disable(&fep->napi);
2789 netif_tx_disable(ndev);
2793 phy_disconnect(fep->phy_dev);
2794 fep->phy_dev = NULL;
2796 fec_enet_clk_enable(ndev, false);
2797 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2798 fec_enet_free_buffers(ndev);
2803 /* Set or clear the multicast filter for this adaptor.
2804 * Skeleton taken from sunlance driver.
2805 * The CPM Ethernet implementation allows Multicast as well as individual
2806 * MAC address filtering. Some of the drivers check to make sure it is
2807 * a group multicast address, and discard those that are not. I guess I
2808 * will do the same for now, but just remove the test if you want
2809 * individual filtering as well (do the upper net layers want or support
2810 * this kind of feature?).
2813 #define HASH_BITS 6 /* #bits in hash */
2814 #define CRC32_POLY 0xEDB88320
2816 static void set_multicast_list(struct net_device *ndev)
2818 struct fec_enet_private *fep = netdev_priv(ndev);
2819 struct netdev_hw_addr *ha;
2820 unsigned int i, bit, data, crc, tmp;
2823 if (ndev->flags & IFF_PROMISC) {
2824 tmp = readl(fep->hwp + FEC_R_CNTRL);
2826 writel(tmp, fep->hwp + FEC_R_CNTRL);
2830 tmp = readl(fep->hwp + FEC_R_CNTRL);
2832 writel(tmp, fep->hwp + FEC_R_CNTRL);
2834 if (ndev->flags & IFF_ALLMULTI) {
2835 /* Catch all multicast addresses, so set the
2838 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2839 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2844 /* Clear filter and add the addresses in hash register
2846 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2847 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2849 netdev_for_each_mc_addr(ha, ndev) {
2850 /* calculate crc32 value of mac address */
2853 for (i = 0; i < ndev->addr_len; i++) {
2855 for (bit = 0; bit < 8; bit++, data >>= 1) {
2857 (((crc ^ data) & 1) ? CRC32_POLY : 0);
2861 /* only upper 6 bits (HASH_BITS) are used
2862 * which point to specific bit in he hash registers
2864 hash = (crc >> (32 - HASH_BITS)) & 0x3f;
2867 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2868 tmp |= 1 << (hash - 32);
2869 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2871 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2873 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2878 /* Set a MAC change in hardware. */
2880 fec_set_mac_address(struct net_device *ndev, void *p)
2882 struct fec_enet_private *fep = netdev_priv(ndev);
2883 struct sockaddr *addr = p;
2886 if (!is_valid_ether_addr(addr->sa_data))
2887 return -EADDRNOTAVAIL;
2888 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
2891 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
2892 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
2893 fep->hwp + FEC_ADDR_LOW);
2894 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
2895 fep->hwp + FEC_ADDR_HIGH);
2899 #ifdef CONFIG_NET_POLL_CONTROLLER
2901 * fec_poll_controller - FEC Poll controller function
2902 * @dev: The FEC network adapter
2904 * Polled functionality used by netconsole and others in non interrupt mode
2907 static void fec_poll_controller(struct net_device *dev)
2910 struct fec_enet_private *fep = netdev_priv(dev);
2912 for (i = 0; i < FEC_IRQ_NUM; i++) {
2913 if (fep->irq[i] > 0) {
2914 disable_irq(fep->irq[i]);
2915 fec_enet_interrupt(fep->irq[i], dev);
2916 enable_irq(fep->irq[i]);
2922 #define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM
2923 static inline void fec_enet_set_netdev_features(struct net_device *netdev,
2924 netdev_features_t features)
2926 struct fec_enet_private *fep = netdev_priv(netdev);
2927 netdev_features_t changed = features ^ netdev->features;
2929 netdev->features = features;
2931 /* Receive checksum has been changed */
2932 if (changed & NETIF_F_RXCSUM) {
2933 if (features & NETIF_F_RXCSUM)
2934 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
2936 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
2940 static int fec_set_features(struct net_device *netdev,
2941 netdev_features_t features)
2943 struct fec_enet_private *fep = netdev_priv(netdev);
2944 netdev_features_t changed = features ^ netdev->features;
2946 if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
2947 napi_disable(&fep->napi);
2948 netif_tx_lock_bh(netdev);
2950 fec_enet_set_netdev_features(netdev, features);
2951 fec_restart(netdev);
2952 netif_tx_wake_all_queues(netdev);
2953 netif_tx_unlock_bh(netdev);
2954 napi_enable(&fep->napi);
2956 fec_enet_set_netdev_features(netdev, features);
2962 static const struct net_device_ops fec_netdev_ops = {
2963 .ndo_open = fec_enet_open,
2964 .ndo_stop = fec_enet_close,
2965 .ndo_start_xmit = fec_enet_start_xmit,
2966 .ndo_set_rx_mode = set_multicast_list,
2967 .ndo_change_mtu = eth_change_mtu,
2968 .ndo_validate_addr = eth_validate_addr,
2969 .ndo_tx_timeout = fec_timeout,
2970 .ndo_set_mac_address = fec_set_mac_address,
2971 .ndo_do_ioctl = fec_enet_ioctl,
2972 #ifdef CONFIG_NET_POLL_CONTROLLER
2973 .ndo_poll_controller = fec_poll_controller,
2975 .ndo_set_features = fec_set_features,
2979 * XXX: We need to clean up on failure exits here.
2982 static int fec_enet_init(struct net_device *ndev)
2984 struct fec_enet_private *fep = netdev_priv(ndev);
2985 struct fec_enet_priv_tx_q *txq;
2986 struct fec_enet_priv_rx_q *rxq;
2987 struct bufdesc *cbd_base;
2992 #if defined(CONFIG_ARM)
2993 fep->rx_align = 0xf;
2994 fep->tx_align = 0xf;
2996 fep->rx_align = 0x3;
2997 fep->tx_align = 0x3;
3000 fec_enet_alloc_queue(ndev);
3002 if (fep->bufdesc_ex)
3003 fep->bufdesc_size = sizeof(struct bufdesc_ex);
3005 fep->bufdesc_size = sizeof(struct bufdesc);
3006 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) *
3009 /* Allocate memory for buffer descriptors. */
3010 cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma,
3016 memset(cbd_base, 0, bd_size);
3018 /* Get the Ethernet address */
3020 /* make sure MAC we just acquired is programmed into the hw */
3021 fec_set_mac_address(ndev, NULL);
3023 /* Set receive and transmit descriptor base. */
3024 for (i = 0; i < fep->num_rx_queues; i++) {
3025 rxq = fep->rx_queue[i];
3027 rxq->rx_bd_base = (struct bufdesc *)cbd_base;
3028 rxq->bd_dma = bd_dma;
3029 if (fep->bufdesc_ex) {
3030 bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size;
3031 cbd_base = (struct bufdesc *)
3032 (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size);
3034 bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size;
3035 cbd_base += rxq->rx_ring_size;
3039 for (i = 0; i < fep->num_tx_queues; i++) {
3040 txq = fep->tx_queue[i];
3042 txq->tx_bd_base = (struct bufdesc *)cbd_base;
3043 txq->bd_dma = bd_dma;
3044 if (fep->bufdesc_ex) {
3045 bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size;
3046 cbd_base = (struct bufdesc *)
3047 (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size);
3049 bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size;
3050 cbd_base += txq->tx_ring_size;
3055 /* The FEC Ethernet specific entries in the device structure */
3056 ndev->watchdog_timeo = TX_TIMEOUT;
3057 ndev->netdev_ops = &fec_netdev_ops;
3058 ndev->ethtool_ops = &fec_enet_ethtool_ops;
3060 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
3061 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
3063 if (fep->quirks & FEC_QUIRK_HAS_VLAN)
3064 /* enable hw VLAN support */
3065 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3067 if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
3068 ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
3070 /* enable hw accelerator */
3071 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
3072 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
3073 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3076 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
3078 fep->rx_align = 0x3f;
3081 ndev->hw_features = ndev->features;
3089 static void fec_reset_phy(struct platform_device *pdev)
3093 struct device_node *np = pdev->dev.of_node;
3098 of_property_read_u32(np, "phy-reset-duration", &msec);
3099 /* A sane reset duration should not be longer than 1s */
3103 phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
3104 if (!gpio_is_valid(phy_reset))
3107 err = devm_gpio_request_one(&pdev->dev, phy_reset,
3108 GPIOF_OUT_INIT_LOW, "phy-reset");
3110 dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
3114 gpio_set_value(phy_reset, 1);
3116 #else /* CONFIG_OF */
3117 static void fec_reset_phy(struct platform_device *pdev)
3120 * In case of platform probe, the reset has been done
3124 #endif /* CONFIG_OF */
3127 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
3129 struct device_node *np = pdev->dev.of_node;
3132 *num_tx = *num_rx = 1;
3134 if (!np || !of_device_is_available(np))
3137 /* parse the num of tx and rx queues */
3138 err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
3142 err = of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
3146 if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
3147 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
3153 if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
3154 dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
3163 fec_probe(struct platform_device *pdev)
3165 struct fec_enet_private *fep;
3166 struct fec_platform_data *pdata;
3167 struct net_device *ndev;
3168 int i, irq, ret = 0;
3170 const struct of_device_id *of_id;
3172 struct device_node *np = pdev->dev.of_node, *phy_node;
3176 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
3178 /* Init network device */
3179 ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private),
3180 num_tx_qs, num_rx_qs);
3184 SET_NETDEV_DEV(ndev, &pdev->dev);
3186 /* setup board info structure */
3187 fep = netdev_priv(ndev);
3189 of_id = of_match_device(fec_dt_ids, &pdev->dev);
3191 pdev->id_entry = of_id->data;
3192 fep->quirks = pdev->id_entry->driver_data;
3194 fep->num_rx_queues = num_rx_qs;
3195 fep->num_tx_queues = num_tx_qs;
3197 #if !defined(CONFIG_M5272)
3198 /* default enable pause frame auto negotiation */
3199 if (fep->quirks & FEC_QUIRK_HAS_GBIT)
3200 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
3203 /* Select default pin state */
3204 pinctrl_pm_select_default_state(&pdev->dev);
3206 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3207 fep->hwp = devm_ioremap_resource(&pdev->dev, r);
3208 if (IS_ERR(fep->hwp)) {
3209 ret = PTR_ERR(fep->hwp);
3210 goto failed_ioremap;
3214 fep->dev_id = dev_id++;
3216 platform_set_drvdata(pdev, ndev);
3218 if (of_get_property(np, "fsl,magic-packet", NULL))
3219 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
3221 phy_node = of_parse_phandle(np, "phy-handle", 0);
3222 if (!phy_node && of_phy_is_fixed_link(np)) {
3223 ret = of_phy_register_fixed_link(np);
3226 "broken fixed-link specification\n");
3229 phy_node = of_node_get(np);
3231 fep->phy_node = phy_node;
3233 ret = of_get_phy_mode(pdev->dev.of_node);
3235 pdata = dev_get_platdata(&pdev->dev);
3237 fep->phy_interface = pdata->phy;
3239 fep->phy_interface = PHY_INTERFACE_MODE_MII;
3241 fep->phy_interface = ret;
3244 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
3245 if (IS_ERR(fep->clk_ipg)) {
3246 ret = PTR_ERR(fep->clk_ipg);
3250 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
3251 if (IS_ERR(fep->clk_ahb)) {
3252 ret = PTR_ERR(fep->clk_ahb);
3256 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
3258 /* enet_out is optional, depends on board */
3259 fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
3260 if (IS_ERR(fep->clk_enet_out))
3261 fep->clk_enet_out = NULL;
3263 fep->ptp_clk_on = false;
3264 mutex_init(&fep->ptp_clk_mutex);
3266 /* clk_ref is optional, depends on board */
3267 fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
3268 if (IS_ERR(fep->clk_ref))
3269 fep->clk_ref = NULL;
3271 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
3272 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
3273 if (IS_ERR(fep->clk_ptp)) {
3274 fep->clk_ptp = NULL;
3275 fep->bufdesc_ex = false;
3278 ret = fec_enet_clk_enable(ndev, true);
3282 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
3283 if (!IS_ERR(fep->reg_phy)) {
3284 ret = regulator_enable(fep->reg_phy);
3287 "Failed to enable phy regulator: %d\n", ret);
3288 goto failed_regulator;
3291 fep->reg_phy = NULL;
3294 fec_reset_phy(pdev);
3296 if (fep->bufdesc_ex)
3299 ret = fec_enet_init(ndev);
3303 for (i = 0; i < FEC_IRQ_NUM; i++) {
3304 irq = platform_get_irq(pdev, i);
3311 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
3312 0, pdev->name, ndev);
3319 init_completion(&fep->mdio_done);
3320 ret = fec_enet_mii_init(pdev);
3322 goto failed_mii_init;
3324 /* Carrier starts down, phylib will bring it up */
3325 netif_carrier_off(ndev);
3326 fec_enet_clk_enable(ndev, false);
3327 pinctrl_pm_select_sleep_state(&pdev->dev);
3329 ret = register_netdev(ndev);
3331 goto failed_register;
3333 device_init_wakeup(&ndev->dev, fep->wol_flag &
3334 FEC_WOL_HAS_MAGIC_PACKET);
3336 if (fep->bufdesc_ex && fep->ptp_clock)
3337 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
3339 fep->rx_copybreak = COPYBREAK_DEFAULT;
3340 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
3344 fec_enet_mii_remove(fep);
3349 regulator_disable(fep->reg_phy);
3351 fec_enet_clk_enable(ndev, false);
3354 of_node_put(phy_node);
3362 fec_drv_remove(struct platform_device *pdev)
3364 struct net_device *ndev = platform_get_drvdata(pdev);
3365 struct fec_enet_private *fep = netdev_priv(ndev);
3367 cancel_delayed_work_sync(&fep->time_keep);
3368 cancel_work_sync(&fep->tx_timeout_work);
3369 unregister_netdev(ndev);
3370 fec_enet_mii_remove(fep);
3372 regulator_disable(fep->reg_phy);
3374 ptp_clock_unregister(fep->ptp_clock);
3375 fec_enet_clk_enable(ndev, false);
3376 of_node_put(fep->phy_node);
3382 static int __maybe_unused fec_suspend(struct device *dev)
3384 struct net_device *ndev = dev_get_drvdata(dev);
3385 struct fec_enet_private *fep = netdev_priv(ndev);
3388 if (netif_running(ndev)) {
3389 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
3390 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
3391 phy_stop(fep->phy_dev);
3392 napi_disable(&fep->napi);
3393 netif_tx_lock_bh(ndev);
3394 netif_device_detach(ndev);
3395 netif_tx_unlock_bh(ndev);
3397 fec_enet_clk_enable(ndev, false);
3398 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
3399 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3403 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
3404 regulator_disable(fep->reg_phy);
3406 /* SOC supply clock to phy, when clock is disabled, phy link down
3407 * SOC control phy regulator, when regulator is disabled, phy link down
3409 if (fep->clk_enet_out || fep->reg_phy)
3415 static int __maybe_unused fec_resume(struct device *dev)
3417 struct net_device *ndev = dev_get_drvdata(dev);
3418 struct fec_enet_private *fep = netdev_priv(ndev);
3419 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
3423 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
3424 ret = regulator_enable(fep->reg_phy);
3430 if (netif_running(ndev)) {
3431 ret = fec_enet_clk_enable(ndev, true);
3436 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
3437 if (pdata && pdata->sleep_mode_enable)
3438 pdata->sleep_mode_enable(false);
3439 val = readl(fep->hwp + FEC_ECNTRL);
3440 val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
3441 writel(val, fep->hwp + FEC_ECNTRL);
3442 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
3444 pinctrl_pm_select_default_state(&fep->pdev->dev);
3447 netif_tx_lock_bh(ndev);
3448 netif_device_attach(ndev);
3449 netif_tx_unlock_bh(ndev);
3450 napi_enable(&fep->napi);
3451 phy_start(fep->phy_dev);
3459 regulator_disable(fep->reg_phy);
3463 static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
3465 static struct platform_driver fec_driver = {
3467 .name = DRIVER_NAME,
3469 .of_match_table = fec_dt_ids,
3471 .id_table = fec_devtype,
3473 .remove = fec_drv_remove,
3476 module_platform_driver(fec_driver);
3478 MODULE_ALIAS("platform:"DRIVER_NAME);
3479 MODULE_LICENSE("GPL");