Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cascardo/linux.git] / drivers / net / ethernet / freescale / gianfar.c
1 /* drivers/net/ethernet/freescale/gianfar.c
2  *
3  * Gianfar Ethernet Driver
4  * This driver is designed for the non-CPM ethernet controllers
5  * on the 85xx and 83xx family of integrated processors
6  * Based on 8260_io/fcc_enet.c
7  *
8  * Author: Andy Fleming
9  * Maintainer: Kumar Gala
10  * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11  *
12  * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
13  * Copyright 2007 MontaVista Software, Inc.
14  *
15  * This program is free software; you can redistribute  it and/or modify it
16  * under  the terms of  the GNU General  Public License as published by the
17  * Free Software Foundation;  either version 2 of the  License, or (at your
18  * option) any later version.
19  *
20  *  Gianfar:  AKA Lambda Draconis, "Dragon"
21  *  RA 11 31 24.2
22  *  Dec +69 19 52
23  *  V 3.84
24  *  B-V +1.62
25  *
26  *  Theory of operation
27  *
28  *  The driver is initialized through of_device. Configuration information
29  *  is therefore conveyed through an OF-style device tree.
30  *
31  *  The Gianfar Ethernet Controller uses a ring of buffer
32  *  descriptors.  The beginning is indicated by a register
33  *  pointing to the physical address of the start of the ring.
34  *  The end is determined by a "wrap" bit being set in the
35  *  last descriptor of the ring.
36  *
37  *  When a packet is received, the RXF bit in the
38  *  IEVENT register is set, triggering an interrupt when the
39  *  corresponding bit in the IMASK register is also set (if
40  *  interrupt coalescing is active, then the interrupt may not
41  *  happen immediately, but will wait until either a set number
42  *  of frames or amount of time have passed).  In NAPI, the
43  *  interrupt handler will signal there is work to be done, and
44  *  exit. This method will start at the last known empty
45  *  descriptor, and process every subsequent descriptor until there
46  *  are none left with data (NAPI will stop after a set number of
47  *  packets to give time to other tasks, but will eventually
48  *  process all the packets).  The data arrives inside a
49  *  pre-allocated skb, and so after the skb is passed up to the
50  *  stack, a new skb must be allocated, and the address field in
51  *  the buffer descriptor must be updated to indicate this new
52  *  skb.
53  *
54  *  When the kernel requests that a packet be transmitted, the
55  *  driver starts where it left off last time, and points the
56  *  descriptor at the buffer which was passed in.  The driver
57  *  then informs the DMA engine that there are packets ready to
58  *  be transmitted.  Once the controller is finished transmitting
59  *  the packet, an interrupt may be triggered (under the same
60  *  conditions as for reception, but depending on the TXF bit).
61  *  The driver then cleans up the buffer.
62  */
63
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65 #define DEBUG
66
67 #include <linux/kernel.h>
68 #include <linux/string.h>
69 #include <linux/errno.h>
70 #include <linux/unistd.h>
71 #include <linux/slab.h>
72 #include <linux/interrupt.h>
73 #include <linux/delay.h>
74 #include <linux/netdevice.h>
75 #include <linux/etherdevice.h>
76 #include <linux/skbuff.h>
77 #include <linux/if_vlan.h>
78 #include <linux/spinlock.h>
79 #include <linux/mm.h>
80 #include <linux/of_address.h>
81 #include <linux/of_irq.h>
82 #include <linux/of_mdio.h>
83 #include <linux/of_platform.h>
84 #include <linux/ip.h>
85 #include <linux/tcp.h>
86 #include <linux/udp.h>
87 #include <linux/in.h>
88 #include <linux/net_tstamp.h>
89
90 #include <asm/io.h>
91 #ifdef CONFIG_PPC
92 #include <asm/reg.h>
93 #include <asm/mpc85xx.h>
94 #endif
95 #include <asm/irq.h>
96 #include <asm/uaccess.h>
97 #include <linux/module.h>
98 #include <linux/dma-mapping.h>
99 #include <linux/crc32.h>
100 #include <linux/mii.h>
101 #include <linux/phy.h>
102 #include <linux/phy_fixed.h>
103 #include <linux/of.h>
104 #include <linux/of_net.h>
105 #include <linux/of_address.h>
106 #include <linux/of_irq.h>
107
108 #include "gianfar.h"
109
110 #define TX_TIMEOUT      (1*HZ)
111
112 const char gfar_driver_version[] = "1.3";
113
114 static int gfar_enet_open(struct net_device *dev);
115 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
116 static void gfar_reset_task(struct work_struct *work);
117 static void gfar_timeout(struct net_device *dev);
118 static int gfar_close(struct net_device *dev);
119 static struct sk_buff *gfar_new_skb(struct net_device *dev,
120                                     dma_addr_t *bufaddr);
121 static int gfar_set_mac_address(struct net_device *dev);
122 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
123 static irqreturn_t gfar_error(int irq, void *dev_id);
124 static irqreturn_t gfar_transmit(int irq, void *dev_id);
125 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
126 static void adjust_link(struct net_device *dev);
127 static noinline void gfar_update_link_state(struct gfar_private *priv);
128 static int init_phy(struct net_device *dev);
129 static int gfar_probe(struct platform_device *ofdev);
130 static int gfar_remove(struct platform_device *ofdev);
131 static void free_skb_resources(struct gfar_private *priv);
132 static void gfar_set_multi(struct net_device *dev);
133 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
134 static void gfar_configure_serdes(struct net_device *dev);
135 static int gfar_poll_rx(struct napi_struct *napi, int budget);
136 static int gfar_poll_tx(struct napi_struct *napi, int budget);
137 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
138 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
139 #ifdef CONFIG_NET_POLL_CONTROLLER
140 static void gfar_netpoll(struct net_device *dev);
141 #endif
142 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
143 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
144 static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
145                                int amount_pull, struct napi_struct *napi);
146 static void gfar_halt_nodisable(struct gfar_private *priv);
147 static void gfar_clear_exact_match(struct net_device *dev);
148 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
149                                   const u8 *addr);
150 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
151
152 MODULE_AUTHOR("Freescale Semiconductor, Inc");
153 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
154 MODULE_LICENSE("GPL");
155
156 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
157                             dma_addr_t buf)
158 {
159         u32 lstatus;
160
161         bdp->bufPtr = cpu_to_be32(buf);
162
163         lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
164         if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
165                 lstatus |= BD_LFLAG(RXBD_WRAP);
166
167         gfar_wmb();
168
169         bdp->lstatus = cpu_to_be32(lstatus);
170 }
171
172 static int gfar_init_bds(struct net_device *ndev)
173 {
174         struct gfar_private *priv = netdev_priv(ndev);
175         struct gfar __iomem *regs = priv->gfargrp[0].regs;
176         struct gfar_priv_tx_q *tx_queue = NULL;
177         struct gfar_priv_rx_q *rx_queue = NULL;
178         struct txbd8 *txbdp;
179         struct rxbd8 *rxbdp;
180         u32 __iomem *rfbptr;
181         int i, j;
182         dma_addr_t bufaddr;
183
184         for (i = 0; i < priv->num_tx_queues; i++) {
185                 tx_queue = priv->tx_queue[i];
186                 /* Initialize some variables in our dev structure */
187                 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
188                 tx_queue->dirty_tx = tx_queue->tx_bd_base;
189                 tx_queue->cur_tx = tx_queue->tx_bd_base;
190                 tx_queue->skb_curtx = 0;
191                 tx_queue->skb_dirtytx = 0;
192
193                 /* Initialize Transmit Descriptor Ring */
194                 txbdp = tx_queue->tx_bd_base;
195                 for (j = 0; j < tx_queue->tx_ring_size; j++) {
196                         txbdp->lstatus = 0;
197                         txbdp->bufPtr = 0;
198                         txbdp++;
199                 }
200
201                 /* Set the last descriptor in the ring to indicate wrap */
202                 txbdp--;
203                 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
204                                             TXBD_WRAP);
205         }
206
207         rfbptr = &regs->rfbptr0;
208         for (i = 0; i < priv->num_rx_queues; i++) {
209                 rx_queue = priv->rx_queue[i];
210                 rx_queue->cur_rx = rx_queue->rx_bd_base;
211                 rx_queue->skb_currx = 0;
212                 rxbdp = rx_queue->rx_bd_base;
213
214                 for (j = 0; j < rx_queue->rx_ring_size; j++) {
215                         struct sk_buff *skb = rx_queue->rx_skbuff[j];
216
217                         if (skb) {
218                                 bufaddr = be32_to_cpu(rxbdp->bufPtr);
219                         } else {
220                                 skb = gfar_new_skb(ndev, &bufaddr);
221                                 if (!skb) {
222                                         netdev_err(ndev, "Can't allocate RX buffers\n");
223                                         return -ENOMEM;
224                                 }
225                                 rx_queue->rx_skbuff[j] = skb;
226                         }
227
228                         gfar_init_rxbdp(rx_queue, rxbdp, bufaddr);
229                         rxbdp++;
230                 }
231
232                 rx_queue->rfbptr = rfbptr;
233                 rfbptr += 2;
234         }
235
236         return 0;
237 }
238
239 static int gfar_alloc_skb_resources(struct net_device *ndev)
240 {
241         void *vaddr;
242         dma_addr_t addr;
243         int i, j, k;
244         struct gfar_private *priv = netdev_priv(ndev);
245         struct device *dev = priv->dev;
246         struct gfar_priv_tx_q *tx_queue = NULL;
247         struct gfar_priv_rx_q *rx_queue = NULL;
248
249         priv->total_tx_ring_size = 0;
250         for (i = 0; i < priv->num_tx_queues; i++)
251                 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
252
253         priv->total_rx_ring_size = 0;
254         for (i = 0; i < priv->num_rx_queues; i++)
255                 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
256
257         /* Allocate memory for the buffer descriptors */
258         vaddr = dma_alloc_coherent(dev,
259                                    (priv->total_tx_ring_size *
260                                     sizeof(struct txbd8)) +
261                                    (priv->total_rx_ring_size *
262                                     sizeof(struct rxbd8)),
263                                    &addr, GFP_KERNEL);
264         if (!vaddr)
265                 return -ENOMEM;
266
267         for (i = 0; i < priv->num_tx_queues; i++) {
268                 tx_queue = priv->tx_queue[i];
269                 tx_queue->tx_bd_base = vaddr;
270                 tx_queue->tx_bd_dma_base = addr;
271                 tx_queue->dev = ndev;
272                 /* enet DMA only understands physical addresses */
273                 addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
274                 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
275         }
276
277         /* Start the rx descriptor ring where the tx ring leaves off */
278         for (i = 0; i < priv->num_rx_queues; i++) {
279                 rx_queue = priv->rx_queue[i];
280                 rx_queue->rx_bd_base = vaddr;
281                 rx_queue->rx_bd_dma_base = addr;
282                 rx_queue->dev = ndev;
283                 addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
284                 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
285         }
286
287         /* Setup the skbuff rings */
288         for (i = 0; i < priv->num_tx_queues; i++) {
289                 tx_queue = priv->tx_queue[i];
290                 tx_queue->tx_skbuff =
291                         kmalloc_array(tx_queue->tx_ring_size,
292                                       sizeof(*tx_queue->tx_skbuff),
293                                       GFP_KERNEL);
294                 if (!tx_queue->tx_skbuff)
295                         goto cleanup;
296
297                 for (k = 0; k < tx_queue->tx_ring_size; k++)
298                         tx_queue->tx_skbuff[k] = NULL;
299         }
300
301         for (i = 0; i < priv->num_rx_queues; i++) {
302                 rx_queue = priv->rx_queue[i];
303                 rx_queue->rx_skbuff =
304                         kmalloc_array(rx_queue->rx_ring_size,
305                                       sizeof(*rx_queue->rx_skbuff),
306                                       GFP_KERNEL);
307                 if (!rx_queue->rx_skbuff)
308                         goto cleanup;
309
310                 for (j = 0; j < rx_queue->rx_ring_size; j++)
311                         rx_queue->rx_skbuff[j] = NULL;
312         }
313
314         if (gfar_init_bds(ndev))
315                 goto cleanup;
316
317         return 0;
318
319 cleanup:
320         free_skb_resources(priv);
321         return -ENOMEM;
322 }
323
324 static void gfar_init_tx_rx_base(struct gfar_private *priv)
325 {
326         struct gfar __iomem *regs = priv->gfargrp[0].regs;
327         u32 __iomem *baddr;
328         int i;
329
330         baddr = &regs->tbase0;
331         for (i = 0; i < priv->num_tx_queues; i++) {
332                 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
333                 baddr += 2;
334         }
335
336         baddr = &regs->rbase0;
337         for (i = 0; i < priv->num_rx_queues; i++) {
338                 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
339                 baddr += 2;
340         }
341 }
342
343 static void gfar_init_rqprm(struct gfar_private *priv)
344 {
345         struct gfar __iomem *regs = priv->gfargrp[0].regs;
346         u32 __iomem *baddr;
347         int i;
348
349         baddr = &regs->rqprm0;
350         for (i = 0; i < priv->num_rx_queues; i++) {
351                 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
352                            (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
353                 baddr++;
354         }
355 }
356
357 static void gfar_rx_buff_size_config(struct gfar_private *priv)
358 {
359         int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
360
361         /* set this when rx hw offload (TOE) functions are being used */
362         priv->uses_rxfcb = 0;
363
364         if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
365                 priv->uses_rxfcb = 1;
366
367         if (priv->hwts_rx_en)
368                 priv->uses_rxfcb = 1;
369
370         if (priv->uses_rxfcb)
371                 frame_size += GMAC_FCB_LEN;
372
373         frame_size += priv->padding;
374
375         frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
376                      INCREMENTAL_BUFFER_SIZE;
377
378         priv->rx_buffer_size = frame_size;
379 }
380
381 static void gfar_mac_rx_config(struct gfar_private *priv)
382 {
383         struct gfar __iomem *regs = priv->gfargrp[0].regs;
384         u32 rctrl = 0;
385
386         if (priv->rx_filer_enable) {
387                 rctrl |= RCTRL_FILREN;
388                 /* Program the RIR0 reg with the required distribution */
389                 if (priv->poll_mode == GFAR_SQ_POLLING)
390                         gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
391                 else /* GFAR_MQ_POLLING */
392                         gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
393         }
394
395         /* Restore PROMISC mode */
396         if (priv->ndev->flags & IFF_PROMISC)
397                 rctrl |= RCTRL_PROM;
398
399         if (priv->ndev->features & NETIF_F_RXCSUM)
400                 rctrl |= RCTRL_CHECKSUMMING;
401
402         if (priv->extended_hash)
403                 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
404
405         if (priv->padding) {
406                 rctrl &= ~RCTRL_PAL_MASK;
407                 rctrl |= RCTRL_PADDING(priv->padding);
408         }
409
410         /* Enable HW time stamping if requested from user space */
411         if (priv->hwts_rx_en)
412                 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
413
414         if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
415                 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
416
417         /* Clear the LFC bit */
418         gfar_write(&regs->rctrl, rctrl);
419         /* Init flow control threshold values */
420         gfar_init_rqprm(priv);
421         gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
422         rctrl |= RCTRL_LFC;
423
424         /* Init rctrl based on our settings */
425         gfar_write(&regs->rctrl, rctrl);
426 }
427
428 static void gfar_mac_tx_config(struct gfar_private *priv)
429 {
430         struct gfar __iomem *regs = priv->gfargrp[0].regs;
431         u32 tctrl = 0;
432
433         if (priv->ndev->features & NETIF_F_IP_CSUM)
434                 tctrl |= TCTRL_INIT_CSUM;
435
436         if (priv->prio_sched_en)
437                 tctrl |= TCTRL_TXSCHED_PRIO;
438         else {
439                 tctrl |= TCTRL_TXSCHED_WRRS;
440                 gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
441                 gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
442         }
443
444         if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
445                 tctrl |= TCTRL_VLINS;
446
447         gfar_write(&regs->tctrl, tctrl);
448 }
449
450 static void gfar_configure_coalescing(struct gfar_private *priv,
451                                unsigned long tx_mask, unsigned long rx_mask)
452 {
453         struct gfar __iomem *regs = priv->gfargrp[0].regs;
454         u32 __iomem *baddr;
455
456         if (priv->mode == MQ_MG_MODE) {
457                 int i = 0;
458
459                 baddr = &regs->txic0;
460                 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
461                         gfar_write(baddr + i, 0);
462                         if (likely(priv->tx_queue[i]->txcoalescing))
463                                 gfar_write(baddr + i, priv->tx_queue[i]->txic);
464                 }
465
466                 baddr = &regs->rxic0;
467                 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
468                         gfar_write(baddr + i, 0);
469                         if (likely(priv->rx_queue[i]->rxcoalescing))
470                                 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
471                 }
472         } else {
473                 /* Backward compatible case -- even if we enable
474                  * multiple queues, there's only single reg to program
475                  */
476                 gfar_write(&regs->txic, 0);
477                 if (likely(priv->tx_queue[0]->txcoalescing))
478                         gfar_write(&regs->txic, priv->tx_queue[0]->txic);
479
480                 gfar_write(&regs->rxic, 0);
481                 if (unlikely(priv->rx_queue[0]->rxcoalescing))
482                         gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
483         }
484 }
485
486 void gfar_configure_coalescing_all(struct gfar_private *priv)
487 {
488         gfar_configure_coalescing(priv, 0xFF, 0xFF);
489 }
490
491 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
492 {
493         struct gfar_private *priv = netdev_priv(dev);
494         unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
495         unsigned long tx_packets = 0, tx_bytes = 0;
496         int i;
497
498         for (i = 0; i < priv->num_rx_queues; i++) {
499                 rx_packets += priv->rx_queue[i]->stats.rx_packets;
500                 rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
501                 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
502         }
503
504         dev->stats.rx_packets = rx_packets;
505         dev->stats.rx_bytes   = rx_bytes;
506         dev->stats.rx_dropped = rx_dropped;
507
508         for (i = 0; i < priv->num_tx_queues; i++) {
509                 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
510                 tx_packets += priv->tx_queue[i]->stats.tx_packets;
511         }
512
513         dev->stats.tx_bytes   = tx_bytes;
514         dev->stats.tx_packets = tx_packets;
515
516         return &dev->stats;
517 }
518
519 static int gfar_set_mac_addr(struct net_device *dev, void *p)
520 {
521         eth_mac_addr(dev, p);
522
523         gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
524
525         return 0;
526 }
527
528 static const struct net_device_ops gfar_netdev_ops = {
529         .ndo_open = gfar_enet_open,
530         .ndo_start_xmit = gfar_start_xmit,
531         .ndo_stop = gfar_close,
532         .ndo_change_mtu = gfar_change_mtu,
533         .ndo_set_features = gfar_set_features,
534         .ndo_set_rx_mode = gfar_set_multi,
535         .ndo_tx_timeout = gfar_timeout,
536         .ndo_do_ioctl = gfar_ioctl,
537         .ndo_get_stats = gfar_get_stats,
538         .ndo_set_mac_address = gfar_set_mac_addr,
539         .ndo_validate_addr = eth_validate_addr,
540 #ifdef CONFIG_NET_POLL_CONTROLLER
541         .ndo_poll_controller = gfar_netpoll,
542 #endif
543 };
544
545 static void gfar_ints_disable(struct gfar_private *priv)
546 {
547         int i;
548         for (i = 0; i < priv->num_grps; i++) {
549                 struct gfar __iomem *regs = priv->gfargrp[i].regs;
550                 /* Clear IEVENT */
551                 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
552
553                 /* Initialize IMASK */
554                 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
555         }
556 }
557
558 static void gfar_ints_enable(struct gfar_private *priv)
559 {
560         int i;
561         for (i = 0; i < priv->num_grps; i++) {
562                 struct gfar __iomem *regs = priv->gfargrp[i].regs;
563                 /* Unmask the interrupts we look for */
564                 gfar_write(&regs->imask, IMASK_DEFAULT);
565         }
566 }
567
568 static int gfar_alloc_tx_queues(struct gfar_private *priv)
569 {
570         int i;
571
572         for (i = 0; i < priv->num_tx_queues; i++) {
573                 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
574                                             GFP_KERNEL);
575                 if (!priv->tx_queue[i])
576                         return -ENOMEM;
577
578                 priv->tx_queue[i]->tx_skbuff = NULL;
579                 priv->tx_queue[i]->qindex = i;
580                 priv->tx_queue[i]->dev = priv->ndev;
581                 spin_lock_init(&(priv->tx_queue[i]->txlock));
582         }
583         return 0;
584 }
585
586 static int gfar_alloc_rx_queues(struct gfar_private *priv)
587 {
588         int i;
589
590         for (i = 0; i < priv->num_rx_queues; i++) {
591                 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
592                                             GFP_KERNEL);
593                 if (!priv->rx_queue[i])
594                         return -ENOMEM;
595
596                 priv->rx_queue[i]->rx_skbuff = NULL;
597                 priv->rx_queue[i]->qindex = i;
598                 priv->rx_queue[i]->dev = priv->ndev;
599         }
600         return 0;
601 }
602
603 static void gfar_free_tx_queues(struct gfar_private *priv)
604 {
605         int i;
606
607         for (i = 0; i < priv->num_tx_queues; i++)
608                 kfree(priv->tx_queue[i]);
609 }
610
611 static void gfar_free_rx_queues(struct gfar_private *priv)
612 {
613         int i;
614
615         for (i = 0; i < priv->num_rx_queues; i++)
616                 kfree(priv->rx_queue[i]);
617 }
618
619 static void unmap_group_regs(struct gfar_private *priv)
620 {
621         int i;
622
623         for (i = 0; i < MAXGROUPS; i++)
624                 if (priv->gfargrp[i].regs)
625                         iounmap(priv->gfargrp[i].regs);
626 }
627
628 static void free_gfar_dev(struct gfar_private *priv)
629 {
630         int i, j;
631
632         for (i = 0; i < priv->num_grps; i++)
633                 for (j = 0; j < GFAR_NUM_IRQS; j++) {
634                         kfree(priv->gfargrp[i].irqinfo[j]);
635                         priv->gfargrp[i].irqinfo[j] = NULL;
636                 }
637
638         free_netdev(priv->ndev);
639 }
640
641 static void disable_napi(struct gfar_private *priv)
642 {
643         int i;
644
645         for (i = 0; i < priv->num_grps; i++) {
646                 napi_disable(&priv->gfargrp[i].napi_rx);
647                 napi_disable(&priv->gfargrp[i].napi_tx);
648         }
649 }
650
651 static void enable_napi(struct gfar_private *priv)
652 {
653         int i;
654
655         for (i = 0; i < priv->num_grps; i++) {
656                 napi_enable(&priv->gfargrp[i].napi_rx);
657                 napi_enable(&priv->gfargrp[i].napi_tx);
658         }
659 }
660
661 static int gfar_parse_group(struct device_node *np,
662                             struct gfar_private *priv, const char *model)
663 {
664         struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
665         int i;
666
667         for (i = 0; i < GFAR_NUM_IRQS; i++) {
668                 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
669                                           GFP_KERNEL);
670                 if (!grp->irqinfo[i])
671                         return -ENOMEM;
672         }
673
674         grp->regs = of_iomap(np, 0);
675         if (!grp->regs)
676                 return -ENOMEM;
677
678         gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
679
680         /* If we aren't the FEC we have multiple interrupts */
681         if (model && strcasecmp(model, "FEC")) {
682                 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
683                 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
684                 if (gfar_irq(grp, TX)->irq == NO_IRQ ||
685                     gfar_irq(grp, RX)->irq == NO_IRQ ||
686                     gfar_irq(grp, ER)->irq == NO_IRQ)
687                         return -EINVAL;
688         }
689
690         grp->priv = priv;
691         spin_lock_init(&grp->grplock);
692         if (priv->mode == MQ_MG_MODE) {
693                 u32 rxq_mask, txq_mask;
694                 int ret;
695
696                 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
697                 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
698
699                 ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
700                 if (!ret) {
701                         grp->rx_bit_map = rxq_mask ?
702                         rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
703                 }
704
705                 ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
706                 if (!ret) {
707                         grp->tx_bit_map = txq_mask ?
708                         txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
709                 }
710
711                 if (priv->poll_mode == GFAR_SQ_POLLING) {
712                         /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
713                         grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
714                         grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
715                 }
716         } else {
717                 grp->rx_bit_map = 0xFF;
718                 grp->tx_bit_map = 0xFF;
719         }
720
721         /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
722          * right to left, so we need to revert the 8 bits to get the q index
723          */
724         grp->rx_bit_map = bitrev8(grp->rx_bit_map);
725         grp->tx_bit_map = bitrev8(grp->tx_bit_map);
726
727         /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
728          * also assign queues to groups
729          */
730         for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
731                 if (!grp->rx_queue)
732                         grp->rx_queue = priv->rx_queue[i];
733                 grp->num_rx_queues++;
734                 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
735                 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
736                 priv->rx_queue[i]->grp = grp;
737         }
738
739         for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
740                 if (!grp->tx_queue)
741                         grp->tx_queue = priv->tx_queue[i];
742                 grp->num_tx_queues++;
743                 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
744                 priv->tqueue |= (TQUEUE_EN0 >> i);
745                 priv->tx_queue[i]->grp = grp;
746         }
747
748         priv->num_grps++;
749
750         return 0;
751 }
752
753 static int gfar_of_group_count(struct device_node *np)
754 {
755         struct device_node *child;
756         int num = 0;
757
758         for_each_available_child_of_node(np, child)
759                 if (!of_node_cmp(child->name, "queue-group"))
760                         num++;
761
762         return num;
763 }
764
765 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
766 {
767         const char *model;
768         const char *ctype;
769         const void *mac_addr;
770         int err = 0, i;
771         struct net_device *dev = NULL;
772         struct gfar_private *priv = NULL;
773         struct device_node *np = ofdev->dev.of_node;
774         struct device_node *child = NULL;
775         struct property *stash;
776         u32 stash_len = 0;
777         u32 stash_idx = 0;
778         unsigned int num_tx_qs, num_rx_qs;
779         unsigned short mode, poll_mode;
780
781         if (!np)
782                 return -ENODEV;
783
784         if (of_device_is_compatible(np, "fsl,etsec2")) {
785                 mode = MQ_MG_MODE;
786                 poll_mode = GFAR_SQ_POLLING;
787         } else {
788                 mode = SQ_SG_MODE;
789                 poll_mode = GFAR_SQ_POLLING;
790         }
791
792         if (mode == SQ_SG_MODE) {
793                 num_tx_qs = 1;
794                 num_rx_qs = 1;
795         } else { /* MQ_MG_MODE */
796                 /* get the actual number of supported groups */
797                 unsigned int num_grps = gfar_of_group_count(np);
798
799                 if (num_grps == 0 || num_grps > MAXGROUPS) {
800                         dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
801                                 num_grps);
802                         pr_err("Cannot do alloc_etherdev, aborting\n");
803                         return -EINVAL;
804                 }
805
806                 if (poll_mode == GFAR_SQ_POLLING) {
807                         num_tx_qs = num_grps; /* one txq per int group */
808                         num_rx_qs = num_grps; /* one rxq per int group */
809                 } else { /* GFAR_MQ_POLLING */
810                         u32 tx_queues, rx_queues;
811                         int ret;
812
813                         /* parse the num of HW tx and rx queues */
814                         ret = of_property_read_u32(np, "fsl,num_tx_queues",
815                                                    &tx_queues);
816                         num_tx_qs = ret ? 1 : tx_queues;
817
818                         ret = of_property_read_u32(np, "fsl,num_rx_queues",
819                                                    &rx_queues);
820                         num_rx_qs = ret ? 1 : rx_queues;
821                 }
822         }
823
824         if (num_tx_qs > MAX_TX_QS) {
825                 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
826                        num_tx_qs, MAX_TX_QS);
827                 pr_err("Cannot do alloc_etherdev, aborting\n");
828                 return -EINVAL;
829         }
830
831         if (num_rx_qs > MAX_RX_QS) {
832                 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
833                        num_rx_qs, MAX_RX_QS);
834                 pr_err("Cannot do alloc_etherdev, aborting\n");
835                 return -EINVAL;
836         }
837
838         *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
839         dev = *pdev;
840         if (NULL == dev)
841                 return -ENOMEM;
842
843         priv = netdev_priv(dev);
844         priv->ndev = dev;
845
846         priv->mode = mode;
847         priv->poll_mode = poll_mode;
848
849         priv->num_tx_queues = num_tx_qs;
850         netif_set_real_num_rx_queues(dev, num_rx_qs);
851         priv->num_rx_queues = num_rx_qs;
852
853         err = gfar_alloc_tx_queues(priv);
854         if (err)
855                 goto tx_alloc_failed;
856
857         err = gfar_alloc_rx_queues(priv);
858         if (err)
859                 goto rx_alloc_failed;
860
861         err = of_property_read_string(np, "model", &model);
862         if (err) {
863                 pr_err("Device model property missing, aborting\n");
864                 goto rx_alloc_failed;
865         }
866
867         /* Init Rx queue filer rule set linked list */
868         INIT_LIST_HEAD(&priv->rx_list.list);
869         priv->rx_list.count = 0;
870         mutex_init(&priv->rx_queue_access);
871
872         for (i = 0; i < MAXGROUPS; i++)
873                 priv->gfargrp[i].regs = NULL;
874
875         /* Parse and initialize group specific information */
876         if (priv->mode == MQ_MG_MODE) {
877                 for_each_available_child_of_node(np, child) {
878                         if (of_node_cmp(child->name, "queue-group"))
879                                 continue;
880
881                         err = gfar_parse_group(child, priv, model);
882                         if (err)
883                                 goto err_grp_init;
884                 }
885         } else { /* SQ_SG_MODE */
886                 err = gfar_parse_group(np, priv, model);
887                 if (err)
888                         goto err_grp_init;
889         }
890
891         stash = of_find_property(np, "bd-stash", NULL);
892
893         if (stash) {
894                 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
895                 priv->bd_stash_en = 1;
896         }
897
898         err = of_property_read_u32(np, "rx-stash-len", &stash_len);
899
900         if (err == 0)
901                 priv->rx_stash_size = stash_len;
902
903         err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
904
905         if (err == 0)
906                 priv->rx_stash_index = stash_idx;
907
908         if (stash_len || stash_idx)
909                 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
910
911         mac_addr = of_get_mac_address(np);
912
913         if (mac_addr)
914                 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
915
916         if (model && !strcasecmp(model, "TSEC"))
917                 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
918                                      FSL_GIANFAR_DEV_HAS_COALESCE |
919                                      FSL_GIANFAR_DEV_HAS_RMON |
920                                      FSL_GIANFAR_DEV_HAS_MULTI_INTR;
921
922         if (model && !strcasecmp(model, "eTSEC"))
923                 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
924                                      FSL_GIANFAR_DEV_HAS_COALESCE |
925                                      FSL_GIANFAR_DEV_HAS_RMON |
926                                      FSL_GIANFAR_DEV_HAS_MULTI_INTR |
927                                      FSL_GIANFAR_DEV_HAS_CSUM |
928                                      FSL_GIANFAR_DEV_HAS_VLAN |
929                                      FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
930                                      FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
931                                      FSL_GIANFAR_DEV_HAS_TIMER;
932
933         err = of_property_read_string(np, "phy-connection-type", &ctype);
934
935         /* We only care about rgmii-id.  The rest are autodetected */
936         if (err == 0 && !strcmp(ctype, "rgmii-id"))
937                 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
938         else
939                 priv->interface = PHY_INTERFACE_MODE_MII;
940
941         if (of_find_property(np, "fsl,magic-packet", NULL))
942                 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
943
944         priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
945
946         /* In the case of a fixed PHY, the DT node associated
947          * to the PHY is the Ethernet MAC DT node.
948          */
949         if (!priv->phy_node && of_phy_is_fixed_link(np)) {
950                 err = of_phy_register_fixed_link(np);
951                 if (err)
952                         goto err_grp_init;
953
954                 priv->phy_node = of_node_get(np);
955         }
956
957         /* Find the TBI PHY.  If it's not there, we don't support SGMII */
958         priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
959
960         return 0;
961
962 err_grp_init:
963         unmap_group_regs(priv);
964 rx_alloc_failed:
965         gfar_free_rx_queues(priv);
966 tx_alloc_failed:
967         gfar_free_tx_queues(priv);
968         free_gfar_dev(priv);
969         return err;
970 }
971
972 static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
973 {
974         struct hwtstamp_config config;
975         struct gfar_private *priv = netdev_priv(netdev);
976
977         if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
978                 return -EFAULT;
979
980         /* reserved for future extensions */
981         if (config.flags)
982                 return -EINVAL;
983
984         switch (config.tx_type) {
985         case HWTSTAMP_TX_OFF:
986                 priv->hwts_tx_en = 0;
987                 break;
988         case HWTSTAMP_TX_ON:
989                 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
990                         return -ERANGE;
991                 priv->hwts_tx_en = 1;
992                 break;
993         default:
994                 return -ERANGE;
995         }
996
997         switch (config.rx_filter) {
998         case HWTSTAMP_FILTER_NONE:
999                 if (priv->hwts_rx_en) {
1000                         priv->hwts_rx_en = 0;
1001                         reset_gfar(netdev);
1002                 }
1003                 break;
1004         default:
1005                 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
1006                         return -ERANGE;
1007                 if (!priv->hwts_rx_en) {
1008                         priv->hwts_rx_en = 1;
1009                         reset_gfar(netdev);
1010                 }
1011                 config.rx_filter = HWTSTAMP_FILTER_ALL;
1012                 break;
1013         }
1014
1015         return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1016                 -EFAULT : 0;
1017 }
1018
1019 static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
1020 {
1021         struct hwtstamp_config config;
1022         struct gfar_private *priv = netdev_priv(netdev);
1023
1024         config.flags = 0;
1025         config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1026         config.rx_filter = (priv->hwts_rx_en ?
1027                             HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
1028
1029         return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1030                 -EFAULT : 0;
1031 }
1032
1033 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1034 {
1035         struct gfar_private *priv = netdev_priv(dev);
1036
1037         if (!netif_running(dev))
1038                 return -EINVAL;
1039
1040         if (cmd == SIOCSHWTSTAMP)
1041                 return gfar_hwtstamp_set(dev, rq);
1042         if (cmd == SIOCGHWTSTAMP)
1043                 return gfar_hwtstamp_get(dev, rq);
1044
1045         if (!priv->phydev)
1046                 return -ENODEV;
1047
1048         return phy_mii_ioctl(priv->phydev, rq, cmd);
1049 }
1050
1051 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
1052                                    u32 class)
1053 {
1054         u32 rqfpr = FPR_FILER_MASK;
1055         u32 rqfcr = 0x0;
1056
1057         rqfar--;
1058         rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
1059         priv->ftp_rqfpr[rqfar] = rqfpr;
1060         priv->ftp_rqfcr[rqfar] = rqfcr;
1061         gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1062
1063         rqfar--;
1064         rqfcr = RQFCR_CMP_NOMATCH;
1065         priv->ftp_rqfpr[rqfar] = rqfpr;
1066         priv->ftp_rqfcr[rqfar] = rqfcr;
1067         gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1068
1069         rqfar--;
1070         rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
1071         rqfpr = class;
1072         priv->ftp_rqfcr[rqfar] = rqfcr;
1073         priv->ftp_rqfpr[rqfar] = rqfpr;
1074         gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1075
1076         rqfar--;
1077         rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
1078         rqfpr = class;
1079         priv->ftp_rqfcr[rqfar] = rqfcr;
1080         priv->ftp_rqfpr[rqfar] = rqfpr;
1081         gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1082
1083         return rqfar;
1084 }
1085
1086 static void gfar_init_filer_table(struct gfar_private *priv)
1087 {
1088         int i = 0x0;
1089         u32 rqfar = MAX_FILER_IDX;
1090         u32 rqfcr = 0x0;
1091         u32 rqfpr = FPR_FILER_MASK;
1092
1093         /* Default rule */
1094         rqfcr = RQFCR_CMP_MATCH;
1095         priv->ftp_rqfcr[rqfar] = rqfcr;
1096         priv->ftp_rqfpr[rqfar] = rqfpr;
1097         gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1098
1099         rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
1100         rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
1101         rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
1102         rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
1103         rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
1104         rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
1105
1106         /* cur_filer_idx indicated the first non-masked rule */
1107         priv->cur_filer_idx = rqfar;
1108
1109         /* Rest are masked rules */
1110         rqfcr = RQFCR_CMP_NOMATCH;
1111         for (i = 0; i < rqfar; i++) {
1112                 priv->ftp_rqfcr[i] = rqfcr;
1113                 priv->ftp_rqfpr[i] = rqfpr;
1114                 gfar_write_filer(priv, i, rqfcr, rqfpr);
1115         }
1116 }
1117
1118 #ifdef CONFIG_PPC
1119 static void __gfar_detect_errata_83xx(struct gfar_private *priv)
1120 {
1121         unsigned int pvr = mfspr(SPRN_PVR);
1122         unsigned int svr = mfspr(SPRN_SVR);
1123         unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
1124         unsigned int rev = svr & 0xffff;
1125
1126         /* MPC8313 Rev 2.0 and higher; All MPC837x */
1127         if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
1128             (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1129                 priv->errata |= GFAR_ERRATA_74;
1130
1131         /* MPC8313 and MPC837x all rev */
1132         if ((pvr == 0x80850010 && mod == 0x80b0) ||
1133             (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1134                 priv->errata |= GFAR_ERRATA_76;
1135
1136         /* MPC8313 Rev < 2.0 */
1137         if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
1138                 priv->errata |= GFAR_ERRATA_12;
1139 }
1140
1141 static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1142 {
1143         unsigned int svr = mfspr(SPRN_SVR);
1144
1145         if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1146                 priv->errata |= GFAR_ERRATA_12;
1147         if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
1148             ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
1149                 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
1150 }
1151 #endif
1152
1153 static void gfar_detect_errata(struct gfar_private *priv)
1154 {
1155         struct device *dev = &priv->ofdev->dev;
1156
1157         /* no plans to fix */
1158         priv->errata |= GFAR_ERRATA_A002;
1159
1160 #ifdef CONFIG_PPC
1161         if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1162                 __gfar_detect_errata_85xx(priv);
1163         else /* non-mpc85xx parts, i.e. e300 core based */
1164                 __gfar_detect_errata_83xx(priv);
1165 #endif
1166
1167         if (priv->errata)
1168                 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1169                          priv->errata);
1170 }
1171
1172 void gfar_mac_reset(struct gfar_private *priv)
1173 {
1174         struct gfar __iomem *regs = priv->gfargrp[0].regs;
1175         u32 tempval;
1176
1177         /* Reset MAC layer */
1178         gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
1179
1180         /* We need to delay at least 3 TX clocks */
1181         udelay(3);
1182
1183         /* the soft reset bit is not self-resetting, so we need to
1184          * clear it before resuming normal operation
1185          */
1186         gfar_write(&regs->maccfg1, 0);
1187
1188         udelay(3);
1189
1190         /* Compute rx_buff_size based on config flags */
1191         gfar_rx_buff_size_config(priv);
1192
1193         /* Initialize the max receive frame/buffer lengths */
1194         gfar_write(&regs->maxfrm, priv->rx_buffer_size);
1195         gfar_write(&regs->mrblr, priv->rx_buffer_size);
1196
1197         /* Initialize the Minimum Frame Length Register */
1198         gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1199
1200         /* Initialize MACCFG2. */
1201         tempval = MACCFG2_INIT_SETTINGS;
1202
1203         /* If the mtu is larger than the max size for standard
1204          * ethernet frames (ie, a jumbo frame), then set maccfg2
1205          * to allow huge frames, and to check the length
1206          */
1207         if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
1208             gfar_has_errata(priv, GFAR_ERRATA_74))
1209                 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1210
1211         gfar_write(&regs->maccfg2, tempval);
1212
1213         /* Clear mac addr hash registers */
1214         gfar_write(&regs->igaddr0, 0);
1215         gfar_write(&regs->igaddr1, 0);
1216         gfar_write(&regs->igaddr2, 0);
1217         gfar_write(&regs->igaddr3, 0);
1218         gfar_write(&regs->igaddr4, 0);
1219         gfar_write(&regs->igaddr5, 0);
1220         gfar_write(&regs->igaddr6, 0);
1221         gfar_write(&regs->igaddr7, 0);
1222
1223         gfar_write(&regs->gaddr0, 0);
1224         gfar_write(&regs->gaddr1, 0);
1225         gfar_write(&regs->gaddr2, 0);
1226         gfar_write(&regs->gaddr3, 0);
1227         gfar_write(&regs->gaddr4, 0);
1228         gfar_write(&regs->gaddr5, 0);
1229         gfar_write(&regs->gaddr6, 0);
1230         gfar_write(&regs->gaddr7, 0);
1231
1232         if (priv->extended_hash)
1233                 gfar_clear_exact_match(priv->ndev);
1234
1235         gfar_mac_rx_config(priv);
1236
1237         gfar_mac_tx_config(priv);
1238
1239         gfar_set_mac_address(priv->ndev);
1240
1241         gfar_set_multi(priv->ndev);
1242
1243         /* clear ievent and imask before configuring coalescing */
1244         gfar_ints_disable(priv);
1245
1246         /* Configure the coalescing support */
1247         gfar_configure_coalescing_all(priv);
1248 }
1249
1250 static void gfar_hw_init(struct gfar_private *priv)
1251 {
1252         struct gfar __iomem *regs = priv->gfargrp[0].regs;
1253         u32 attrs;
1254
1255         /* Stop the DMA engine now, in case it was running before
1256          * (The firmware could have used it, and left it running).
1257          */
1258         gfar_halt(priv);
1259
1260         gfar_mac_reset(priv);
1261
1262         /* Zero out the rmon mib registers if it has them */
1263         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1264                 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1265
1266                 /* Mask off the CAM interrupts */
1267                 gfar_write(&regs->rmon.cam1, 0xffffffff);
1268                 gfar_write(&regs->rmon.cam2, 0xffffffff);
1269         }
1270
1271         /* Initialize ECNTRL */
1272         gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
1273
1274         /* Set the extraction length and index */
1275         attrs = ATTRELI_EL(priv->rx_stash_size) |
1276                 ATTRELI_EI(priv->rx_stash_index);
1277
1278         gfar_write(&regs->attreli, attrs);
1279
1280         /* Start with defaults, and add stashing
1281          * depending on driver parameters
1282          */
1283         attrs = ATTR_INIT_SETTINGS;
1284
1285         if (priv->bd_stash_en)
1286                 attrs |= ATTR_BDSTASH;
1287
1288         if (priv->rx_stash_size != 0)
1289                 attrs |= ATTR_BUFSTASH;
1290
1291         gfar_write(&regs->attr, attrs);
1292
1293         /* FIFO configs */
1294         gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1295         gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1296         gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1297
1298         /* Program the interrupt steering regs, only for MG devices */
1299         if (priv->num_grps > 1)
1300                 gfar_write_isrg(priv);
1301 }
1302
1303 static void gfar_init_addr_hash_table(struct gfar_private *priv)
1304 {
1305         struct gfar __iomem *regs = priv->gfargrp[0].regs;
1306
1307         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1308                 priv->extended_hash = 1;
1309                 priv->hash_width = 9;
1310
1311                 priv->hash_regs[0] = &regs->igaddr0;
1312                 priv->hash_regs[1] = &regs->igaddr1;
1313                 priv->hash_regs[2] = &regs->igaddr2;
1314                 priv->hash_regs[3] = &regs->igaddr3;
1315                 priv->hash_regs[4] = &regs->igaddr4;
1316                 priv->hash_regs[5] = &regs->igaddr5;
1317                 priv->hash_regs[6] = &regs->igaddr6;
1318                 priv->hash_regs[7] = &regs->igaddr7;
1319                 priv->hash_regs[8] = &regs->gaddr0;
1320                 priv->hash_regs[9] = &regs->gaddr1;
1321                 priv->hash_regs[10] = &regs->gaddr2;
1322                 priv->hash_regs[11] = &regs->gaddr3;
1323                 priv->hash_regs[12] = &regs->gaddr4;
1324                 priv->hash_regs[13] = &regs->gaddr5;
1325                 priv->hash_regs[14] = &regs->gaddr6;
1326                 priv->hash_regs[15] = &regs->gaddr7;
1327
1328         } else {
1329                 priv->extended_hash = 0;
1330                 priv->hash_width = 8;
1331
1332                 priv->hash_regs[0] = &regs->gaddr0;
1333                 priv->hash_regs[1] = &regs->gaddr1;
1334                 priv->hash_regs[2] = &regs->gaddr2;
1335                 priv->hash_regs[3] = &regs->gaddr3;
1336                 priv->hash_regs[4] = &regs->gaddr4;
1337                 priv->hash_regs[5] = &regs->gaddr5;
1338                 priv->hash_regs[6] = &regs->gaddr6;
1339                 priv->hash_regs[7] = &regs->gaddr7;
1340         }
1341 }
1342
1343 /* Set up the ethernet device structure, private data,
1344  * and anything else we need before we start
1345  */
1346 static int gfar_probe(struct platform_device *ofdev)
1347 {
1348         struct net_device *dev = NULL;
1349         struct gfar_private *priv = NULL;
1350         int err = 0, i;
1351
1352         err = gfar_of_init(ofdev, &dev);
1353
1354         if (err)
1355                 return err;
1356
1357         priv = netdev_priv(dev);
1358         priv->ndev = dev;
1359         priv->ofdev = ofdev;
1360         priv->dev = &ofdev->dev;
1361         SET_NETDEV_DEV(dev, &ofdev->dev);
1362
1363         INIT_WORK(&priv->reset_task, gfar_reset_task);
1364
1365         platform_set_drvdata(ofdev, priv);
1366
1367         gfar_detect_errata(priv);
1368
1369         /* Set the dev->base_addr to the gfar reg region */
1370         dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1371
1372         /* Fill in the dev structure */
1373         dev->watchdog_timeo = TX_TIMEOUT;
1374         dev->mtu = 1500;
1375         dev->netdev_ops = &gfar_netdev_ops;
1376         dev->ethtool_ops = &gfar_ethtool_ops;
1377
1378         /* Register for napi ...We are registering NAPI for each grp */
1379         for (i = 0; i < priv->num_grps; i++) {
1380                 if (priv->poll_mode == GFAR_SQ_POLLING) {
1381                         netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1382                                        gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
1383                         netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1384                                        gfar_poll_tx_sq, 2);
1385                 } else {
1386                         netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1387                                        gfar_poll_rx, GFAR_DEV_WEIGHT);
1388                         netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1389                                        gfar_poll_tx, 2);
1390                 }
1391         }
1392
1393         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1394                 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1395                                    NETIF_F_RXCSUM;
1396                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1397                                  NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1398         }
1399
1400         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1401                 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1402                                     NETIF_F_HW_VLAN_CTAG_RX;
1403                 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1404         }
1405
1406         dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1407
1408         gfar_init_addr_hash_table(priv);
1409
1410         /* Insert receive time stamps into padding alignment bytes */
1411         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1412                 priv->padding = 8;
1413
1414         if (dev->features & NETIF_F_IP_CSUM ||
1415             priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1416                 dev->needed_headroom = GMAC_FCB_LEN;
1417
1418         priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1419
1420         /* Initializing some of the rx/tx queue level parameters */
1421         for (i = 0; i < priv->num_tx_queues; i++) {
1422                 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1423                 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1424                 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1425                 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1426         }
1427
1428         for (i = 0; i < priv->num_rx_queues; i++) {
1429                 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1430                 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1431                 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1432         }
1433
1434         /* always enable rx filer */
1435         priv->rx_filer_enable = 1;
1436         /* Enable most messages by default */
1437         priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1438         /* use pritority h/w tx queue scheduling for single queue devices */
1439         if (priv->num_tx_queues == 1)
1440                 priv->prio_sched_en = 1;
1441
1442         set_bit(GFAR_DOWN, &priv->state);
1443
1444         gfar_hw_init(priv);
1445
1446         /* Carrier starts down, phylib will bring it up */
1447         netif_carrier_off(dev);
1448
1449         err = register_netdev(dev);
1450
1451         if (err) {
1452                 pr_err("%s: Cannot register net device, aborting\n", dev->name);
1453                 goto register_fail;
1454         }
1455
1456         device_set_wakeup_capable(&dev->dev, priv->device_flags &
1457                                   FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1458
1459         /* fill out IRQ number and name fields */
1460         for (i = 0; i < priv->num_grps; i++) {
1461                 struct gfar_priv_grp *grp = &priv->gfargrp[i];
1462                 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1463                         sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
1464                                 dev->name, "_g", '0' + i, "_tx");
1465                         sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
1466                                 dev->name, "_g", '0' + i, "_rx");
1467                         sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
1468                                 dev->name, "_g", '0' + i, "_er");
1469                 } else
1470                         strcpy(gfar_irq(grp, TX)->name, dev->name);
1471         }
1472
1473         /* Initialize the filer table */
1474         gfar_init_filer_table(priv);
1475
1476         /* Print out the device info */
1477         netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1478
1479         /* Even more device info helps when determining which kernel
1480          * provided which set of benchmarks.
1481          */
1482         netdev_info(dev, "Running with NAPI enabled\n");
1483         for (i = 0; i < priv->num_rx_queues; i++)
1484                 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1485                             i, priv->rx_queue[i]->rx_ring_size);
1486         for (i = 0; i < priv->num_tx_queues; i++)
1487                 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1488                             i, priv->tx_queue[i]->tx_ring_size);
1489
1490         return 0;
1491
1492 register_fail:
1493         unmap_group_regs(priv);
1494         gfar_free_rx_queues(priv);
1495         gfar_free_tx_queues(priv);
1496         of_node_put(priv->phy_node);
1497         of_node_put(priv->tbi_node);
1498         free_gfar_dev(priv);
1499         return err;
1500 }
1501
1502 static int gfar_remove(struct platform_device *ofdev)
1503 {
1504         struct gfar_private *priv = platform_get_drvdata(ofdev);
1505
1506         of_node_put(priv->phy_node);
1507         of_node_put(priv->tbi_node);
1508
1509         unregister_netdev(priv->ndev);
1510         unmap_group_regs(priv);
1511         gfar_free_rx_queues(priv);
1512         gfar_free_tx_queues(priv);
1513         free_gfar_dev(priv);
1514
1515         return 0;
1516 }
1517
1518 #ifdef CONFIG_PM
1519
1520 static int gfar_suspend(struct device *dev)
1521 {
1522         struct gfar_private *priv = dev_get_drvdata(dev);
1523         struct net_device *ndev = priv->ndev;
1524         struct gfar __iomem *regs = priv->gfargrp[0].regs;
1525         u32 tempval;
1526         int magic_packet = priv->wol_en &&
1527                            (priv->device_flags &
1528                             FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1529
1530         if (!netif_running(ndev))
1531                 return 0;
1532
1533         disable_napi(priv);
1534         netif_tx_lock(ndev);
1535         netif_device_detach(ndev);
1536         netif_tx_unlock(ndev);
1537
1538         gfar_halt(priv);
1539
1540         if (magic_packet) {
1541                 /* Enable interrupt on Magic Packet */
1542                 gfar_write(&regs->imask, IMASK_MAG);
1543
1544                 /* Enable Magic Packet mode */
1545                 tempval = gfar_read(&regs->maccfg2);
1546                 tempval |= MACCFG2_MPEN;
1547                 gfar_write(&regs->maccfg2, tempval);
1548
1549                 /* re-enable the Rx block */
1550                 tempval = gfar_read(&regs->maccfg1);
1551                 tempval |= MACCFG1_RX_EN;
1552                 gfar_write(&regs->maccfg1, tempval);
1553
1554         } else {
1555                 phy_stop(priv->phydev);
1556         }
1557
1558         return 0;
1559 }
1560
1561 static int gfar_resume(struct device *dev)
1562 {
1563         struct gfar_private *priv = dev_get_drvdata(dev);
1564         struct net_device *ndev = priv->ndev;
1565         struct gfar __iomem *regs = priv->gfargrp[0].regs;
1566         u32 tempval;
1567         int magic_packet = priv->wol_en &&
1568                            (priv->device_flags &
1569                             FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1570
1571         if (!netif_running(ndev))
1572                 return 0;
1573
1574         if (magic_packet) {
1575                 /* Disable Magic Packet mode */
1576                 tempval = gfar_read(&regs->maccfg2);
1577                 tempval &= ~MACCFG2_MPEN;
1578                 gfar_write(&regs->maccfg2, tempval);
1579         } else {
1580                 phy_start(priv->phydev);
1581         }
1582
1583         gfar_start(priv);
1584
1585         netif_device_attach(ndev);
1586         enable_napi(priv);
1587
1588         return 0;
1589 }
1590
1591 static int gfar_restore(struct device *dev)
1592 {
1593         struct gfar_private *priv = dev_get_drvdata(dev);
1594         struct net_device *ndev = priv->ndev;
1595
1596         if (!netif_running(ndev)) {
1597                 netif_device_attach(ndev);
1598
1599                 return 0;
1600         }
1601
1602         if (gfar_init_bds(ndev)) {
1603                 free_skb_resources(priv);
1604                 return -ENOMEM;
1605         }
1606
1607         gfar_mac_reset(priv);
1608
1609         gfar_init_tx_rx_base(priv);
1610
1611         gfar_start(priv);
1612
1613         priv->oldlink = 0;
1614         priv->oldspeed = 0;
1615         priv->oldduplex = -1;
1616
1617         if (priv->phydev)
1618                 phy_start(priv->phydev);
1619
1620         netif_device_attach(ndev);
1621         enable_napi(priv);
1622
1623         return 0;
1624 }
1625
1626 static struct dev_pm_ops gfar_pm_ops = {
1627         .suspend = gfar_suspend,
1628         .resume = gfar_resume,
1629         .freeze = gfar_suspend,
1630         .thaw = gfar_resume,
1631         .restore = gfar_restore,
1632 };
1633
1634 #define GFAR_PM_OPS (&gfar_pm_ops)
1635
1636 #else
1637
1638 #define GFAR_PM_OPS NULL
1639
1640 #endif
1641
1642 /* Reads the controller's registers to determine what interface
1643  * connects it to the PHY.
1644  */
1645 static phy_interface_t gfar_get_interface(struct net_device *dev)
1646 {
1647         struct gfar_private *priv = netdev_priv(dev);
1648         struct gfar __iomem *regs = priv->gfargrp[0].regs;
1649         u32 ecntrl;
1650
1651         ecntrl = gfar_read(&regs->ecntrl);
1652
1653         if (ecntrl & ECNTRL_SGMII_MODE)
1654                 return PHY_INTERFACE_MODE_SGMII;
1655
1656         if (ecntrl & ECNTRL_TBI_MODE) {
1657                 if (ecntrl & ECNTRL_REDUCED_MODE)
1658                         return PHY_INTERFACE_MODE_RTBI;
1659                 else
1660                         return PHY_INTERFACE_MODE_TBI;
1661         }
1662
1663         if (ecntrl & ECNTRL_REDUCED_MODE) {
1664                 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1665                         return PHY_INTERFACE_MODE_RMII;
1666                 }
1667                 else {
1668                         phy_interface_t interface = priv->interface;
1669
1670                         /* This isn't autodetected right now, so it must
1671                          * be set by the device tree or platform code.
1672                          */
1673                         if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1674                                 return PHY_INTERFACE_MODE_RGMII_ID;
1675
1676                         return PHY_INTERFACE_MODE_RGMII;
1677                 }
1678         }
1679
1680         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1681                 return PHY_INTERFACE_MODE_GMII;
1682
1683         return PHY_INTERFACE_MODE_MII;
1684 }
1685
1686
1687 /* Initializes driver's PHY state, and attaches to the PHY.
1688  * Returns 0 on success.
1689  */
1690 static int init_phy(struct net_device *dev)
1691 {
1692         struct gfar_private *priv = netdev_priv(dev);
1693         uint gigabit_support =
1694                 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1695                 GFAR_SUPPORTED_GBIT : 0;
1696         phy_interface_t interface;
1697
1698         priv->oldlink = 0;
1699         priv->oldspeed = 0;
1700         priv->oldduplex = -1;
1701
1702         interface = gfar_get_interface(dev);
1703
1704         priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1705                                       interface);
1706         if (!priv->phydev) {
1707                 dev_err(&dev->dev, "could not attach to PHY\n");
1708                 return -ENODEV;
1709         }
1710
1711         if (interface == PHY_INTERFACE_MODE_SGMII)
1712                 gfar_configure_serdes(dev);
1713
1714         /* Remove any features not supported by the controller */
1715         priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1716         priv->phydev->advertising = priv->phydev->supported;
1717
1718         /* Add support for flow control, but don't advertise it by default */
1719         priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1720
1721         return 0;
1722 }
1723
1724 /* Initialize TBI PHY interface for communicating with the
1725  * SERDES lynx PHY on the chip.  We communicate with this PHY
1726  * through the MDIO bus on each controller, treating it as a
1727  * "normal" PHY at the address found in the TBIPA register.  We assume
1728  * that the TBIPA register is valid.  Either the MDIO bus code will set
1729  * it to a value that doesn't conflict with other PHYs on the bus, or the
1730  * value doesn't matter, as there are no other PHYs on the bus.
1731  */
1732 static void gfar_configure_serdes(struct net_device *dev)
1733 {
1734         struct gfar_private *priv = netdev_priv(dev);
1735         struct phy_device *tbiphy;
1736
1737         if (!priv->tbi_node) {
1738                 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1739                                     "device tree specify a tbi-handle\n");
1740                 return;
1741         }
1742
1743         tbiphy = of_phy_find_device(priv->tbi_node);
1744         if (!tbiphy) {
1745                 dev_err(&dev->dev, "error: Could not get TBI device\n");
1746                 return;
1747         }
1748
1749         /* If the link is already up, we must already be ok, and don't need to
1750          * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
1751          * everything for us?  Resetting it takes the link down and requires
1752          * several seconds for it to come back.
1753          */
1754         if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1755                 return;
1756
1757         /* Single clk mode, mii mode off(for serdes communication) */
1758         phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1759
1760         phy_write(tbiphy, MII_ADVERTISE,
1761                   ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1762                   ADVERTISE_1000XPSE_ASYM);
1763
1764         phy_write(tbiphy, MII_BMCR,
1765                   BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1766                   BMCR_SPEED1000);
1767 }
1768
1769 static int __gfar_is_rx_idle(struct gfar_private *priv)
1770 {
1771         u32 res;
1772
1773         /* Normaly TSEC should not hang on GRS commands, so we should
1774          * actually wait for IEVENT_GRSC flag.
1775          */
1776         if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1777                 return 0;
1778
1779         /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1780          * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1781          * and the Rx can be safely reset.
1782          */
1783         res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1784         res &= 0x7f807f80;
1785         if ((res & 0xffff) == (res >> 16))
1786                 return 1;
1787
1788         return 0;
1789 }
1790
1791 /* Halt the receive and transmit queues */
1792 static void gfar_halt_nodisable(struct gfar_private *priv)
1793 {
1794         struct gfar __iomem *regs = priv->gfargrp[0].regs;
1795         u32 tempval;
1796         unsigned int timeout;
1797         int stopped;
1798
1799         gfar_ints_disable(priv);
1800
1801         if (gfar_is_dma_stopped(priv))
1802                 return;
1803
1804         /* Stop the DMA, and wait for it to stop */
1805         tempval = gfar_read(&regs->dmactrl);
1806         tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1807         gfar_write(&regs->dmactrl, tempval);
1808
1809 retry:
1810         timeout = 1000;
1811         while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1812                 cpu_relax();
1813                 timeout--;
1814         }
1815
1816         if (!timeout)
1817                 stopped = gfar_is_dma_stopped(priv);
1818
1819         if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1820             !__gfar_is_rx_idle(priv))
1821                 goto retry;
1822 }
1823
1824 /* Halt the receive and transmit queues */
1825 void gfar_halt(struct gfar_private *priv)
1826 {
1827         struct gfar __iomem *regs = priv->gfargrp[0].regs;
1828         u32 tempval;
1829
1830         /* Dissable the Rx/Tx hw queues */
1831         gfar_write(&regs->rqueue, 0);
1832         gfar_write(&regs->tqueue, 0);
1833
1834         mdelay(10);
1835
1836         gfar_halt_nodisable(priv);
1837
1838         /* Disable Rx/Tx DMA */
1839         tempval = gfar_read(&regs->maccfg1);
1840         tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1841         gfar_write(&regs->maccfg1, tempval);
1842 }
1843
1844 void stop_gfar(struct net_device *dev)
1845 {
1846         struct gfar_private *priv = netdev_priv(dev);
1847
1848         netif_tx_stop_all_queues(dev);
1849
1850         smp_mb__before_atomic();
1851         set_bit(GFAR_DOWN, &priv->state);
1852         smp_mb__after_atomic();
1853
1854         disable_napi(priv);
1855
1856         /* disable ints and gracefully shut down Rx/Tx DMA */
1857         gfar_halt(priv);
1858
1859         phy_stop(priv->phydev);
1860
1861         free_skb_resources(priv);
1862 }
1863
1864 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1865 {
1866         struct txbd8 *txbdp;
1867         struct gfar_private *priv = netdev_priv(tx_queue->dev);
1868         int i, j;
1869
1870         txbdp = tx_queue->tx_bd_base;
1871
1872         for (i = 0; i < tx_queue->tx_ring_size; i++) {
1873                 if (!tx_queue->tx_skbuff[i])
1874                         continue;
1875
1876                 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1877                                  be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1878                 txbdp->lstatus = 0;
1879                 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1880                      j++) {
1881                         txbdp++;
1882                         dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1883                                        be16_to_cpu(txbdp->length),
1884                                        DMA_TO_DEVICE);
1885                 }
1886                 txbdp++;
1887                 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1888                 tx_queue->tx_skbuff[i] = NULL;
1889         }
1890         kfree(tx_queue->tx_skbuff);
1891         tx_queue->tx_skbuff = NULL;
1892 }
1893
1894 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1895 {
1896         struct rxbd8 *rxbdp;
1897         struct gfar_private *priv = netdev_priv(rx_queue->dev);
1898         int i;
1899
1900         rxbdp = rx_queue->rx_bd_base;
1901
1902         for (i = 0; i < rx_queue->rx_ring_size; i++) {
1903                 if (rx_queue->rx_skbuff[i]) {
1904                         dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr),
1905                                          priv->rx_buffer_size,
1906                                          DMA_FROM_DEVICE);
1907                         dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1908                         rx_queue->rx_skbuff[i] = NULL;
1909                 }
1910                 rxbdp->lstatus = 0;
1911                 rxbdp->bufPtr = 0;
1912                 rxbdp++;
1913         }
1914         kfree(rx_queue->rx_skbuff);
1915         rx_queue->rx_skbuff = NULL;
1916 }
1917
1918 /* If there are any tx skbs or rx skbs still around, free them.
1919  * Then free tx_skbuff and rx_skbuff
1920  */
1921 static void free_skb_resources(struct gfar_private *priv)
1922 {
1923         struct gfar_priv_tx_q *tx_queue = NULL;
1924         struct gfar_priv_rx_q *rx_queue = NULL;
1925         int i;
1926
1927         /* Go through all the buffer descriptors and free their data buffers */
1928         for (i = 0; i < priv->num_tx_queues; i++) {
1929                 struct netdev_queue *txq;
1930
1931                 tx_queue = priv->tx_queue[i];
1932                 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1933                 if (tx_queue->tx_skbuff)
1934                         free_skb_tx_queue(tx_queue);
1935                 netdev_tx_reset_queue(txq);
1936         }
1937
1938         for (i = 0; i < priv->num_rx_queues; i++) {
1939                 rx_queue = priv->rx_queue[i];
1940                 if (rx_queue->rx_skbuff)
1941                         free_skb_rx_queue(rx_queue);
1942         }
1943
1944         dma_free_coherent(priv->dev,
1945                           sizeof(struct txbd8) * priv->total_tx_ring_size +
1946                           sizeof(struct rxbd8) * priv->total_rx_ring_size,
1947                           priv->tx_queue[0]->tx_bd_base,
1948                           priv->tx_queue[0]->tx_bd_dma_base);
1949 }
1950
1951 void gfar_start(struct gfar_private *priv)
1952 {
1953         struct gfar __iomem *regs = priv->gfargrp[0].regs;
1954         u32 tempval;
1955         int i = 0;
1956
1957         /* Enable Rx/Tx hw queues */
1958         gfar_write(&regs->rqueue, priv->rqueue);
1959         gfar_write(&regs->tqueue, priv->tqueue);
1960
1961         /* Initialize DMACTRL to have WWR and WOP */
1962         tempval = gfar_read(&regs->dmactrl);
1963         tempval |= DMACTRL_INIT_SETTINGS;
1964         gfar_write(&regs->dmactrl, tempval);
1965
1966         /* Make sure we aren't stopped */
1967         tempval = gfar_read(&regs->dmactrl);
1968         tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1969         gfar_write(&regs->dmactrl, tempval);
1970
1971         for (i = 0; i < priv->num_grps; i++) {
1972                 regs = priv->gfargrp[i].regs;
1973                 /* Clear THLT/RHLT, so that the DMA starts polling now */
1974                 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1975                 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1976         }
1977
1978         /* Enable Rx/Tx DMA */
1979         tempval = gfar_read(&regs->maccfg1);
1980         tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1981         gfar_write(&regs->maccfg1, tempval);
1982
1983         gfar_ints_enable(priv);
1984
1985         priv->ndev->trans_start = jiffies; /* prevent tx timeout */
1986 }
1987
1988 static void free_grp_irqs(struct gfar_priv_grp *grp)
1989 {
1990         free_irq(gfar_irq(grp, TX)->irq, grp);
1991         free_irq(gfar_irq(grp, RX)->irq, grp);
1992         free_irq(gfar_irq(grp, ER)->irq, grp);
1993 }
1994
1995 static int register_grp_irqs(struct gfar_priv_grp *grp)
1996 {
1997         struct gfar_private *priv = grp->priv;
1998         struct net_device *dev = priv->ndev;
1999         int err;
2000
2001         /* If the device has multiple interrupts, register for
2002          * them.  Otherwise, only register for the one
2003          */
2004         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2005                 /* Install our interrupt handlers for Error,
2006                  * Transmit, and Receive
2007                  */
2008                 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error,
2009                                   IRQF_NO_SUSPEND,
2010                                   gfar_irq(grp, ER)->name, grp);
2011                 if (err < 0) {
2012                         netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2013                                   gfar_irq(grp, ER)->irq);
2014
2015                         goto err_irq_fail;
2016                 }
2017                 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2018                                   gfar_irq(grp, TX)->name, grp);
2019                 if (err < 0) {
2020                         netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2021                                   gfar_irq(grp, TX)->irq);
2022                         goto tx_irq_fail;
2023                 }
2024                 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2025                                   gfar_irq(grp, RX)->name, grp);
2026                 if (err < 0) {
2027                         netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2028                                   gfar_irq(grp, RX)->irq);
2029                         goto rx_irq_fail;
2030                 }
2031         } else {
2032                 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt,
2033                                   IRQF_NO_SUSPEND,
2034                                   gfar_irq(grp, TX)->name, grp);
2035                 if (err < 0) {
2036                         netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2037                                   gfar_irq(grp, TX)->irq);
2038                         goto err_irq_fail;
2039                 }
2040         }
2041
2042         return 0;
2043
2044 rx_irq_fail:
2045         free_irq(gfar_irq(grp, TX)->irq, grp);
2046 tx_irq_fail:
2047         free_irq(gfar_irq(grp, ER)->irq, grp);
2048 err_irq_fail:
2049         return err;
2050
2051 }
2052
2053 static void gfar_free_irq(struct gfar_private *priv)
2054 {
2055         int i;
2056
2057         /* Free the IRQs */
2058         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2059                 for (i = 0; i < priv->num_grps; i++)
2060                         free_grp_irqs(&priv->gfargrp[i]);
2061         } else {
2062                 for (i = 0; i < priv->num_grps; i++)
2063                         free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2064                                  &priv->gfargrp[i]);
2065         }
2066 }
2067
2068 static int gfar_request_irq(struct gfar_private *priv)
2069 {
2070         int err, i, j;
2071
2072         for (i = 0; i < priv->num_grps; i++) {
2073                 err = register_grp_irqs(&priv->gfargrp[i]);
2074                 if (err) {
2075                         for (j = 0; j < i; j++)
2076                                 free_grp_irqs(&priv->gfargrp[j]);
2077                         return err;
2078                 }
2079         }
2080
2081         return 0;
2082 }
2083
2084 /* Bring the controller up and running */
2085 int startup_gfar(struct net_device *ndev)
2086 {
2087         struct gfar_private *priv = netdev_priv(ndev);
2088         int err;
2089
2090         gfar_mac_reset(priv);
2091
2092         err = gfar_alloc_skb_resources(ndev);
2093         if (err)
2094                 return err;
2095
2096         gfar_init_tx_rx_base(priv);
2097
2098         smp_mb__before_atomic();
2099         clear_bit(GFAR_DOWN, &priv->state);
2100         smp_mb__after_atomic();
2101
2102         /* Start Rx/Tx DMA and enable the interrupts */
2103         gfar_start(priv);
2104
2105         phy_start(priv->phydev);
2106
2107         enable_napi(priv);
2108
2109         netif_tx_wake_all_queues(ndev);
2110
2111         return 0;
2112 }
2113
2114 /* Called when something needs to use the ethernet device
2115  * Returns 0 for success.
2116  */
2117 static int gfar_enet_open(struct net_device *dev)
2118 {
2119         struct gfar_private *priv = netdev_priv(dev);
2120         int err;
2121
2122         err = init_phy(dev);
2123         if (err)
2124                 return err;
2125
2126         err = gfar_request_irq(priv);
2127         if (err)
2128                 return err;
2129
2130         err = startup_gfar(dev);
2131         if (err)
2132                 return err;
2133
2134         return err;
2135 }
2136
2137 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
2138 {
2139         struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
2140
2141         memset(fcb, 0, GMAC_FCB_LEN);
2142
2143         return fcb;
2144 }
2145
2146 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2147                                     int fcb_length)
2148 {
2149         /* If we're here, it's a IP packet with a TCP or UDP
2150          * payload.  We set it to checksum, using a pseudo-header
2151          * we provide
2152          */
2153         u8 flags = TXFCB_DEFAULT;
2154
2155         /* Tell the controller what the protocol is
2156          * And provide the already calculated phcs
2157          */
2158         if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2159                 flags |= TXFCB_UDP;
2160                 fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
2161         } else
2162                 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
2163
2164         /* l3os is the distance between the start of the
2165          * frame (skb->data) and the start of the IP hdr.
2166          * l4os is the distance between the start of the
2167          * l3 hdr and the l4 hdr
2168          */
2169         fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
2170         fcb->l4os = skb_network_header_len(skb);
2171
2172         fcb->flags = flags;
2173 }
2174
2175 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2176 {
2177         fcb->flags |= TXFCB_VLN;
2178         fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
2179 }
2180
2181 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2182                                       struct txbd8 *base, int ring_size)
2183 {
2184         struct txbd8 *new_bd = bdp + stride;
2185
2186         return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2187 }
2188
2189 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2190                                       int ring_size)
2191 {
2192         return skip_txbd(bdp, 1, base, ring_size);
2193 }
2194
2195 /* eTSEC12: csum generation not supported for some fcb offsets */
2196 static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2197                                        unsigned long fcb_addr)
2198 {
2199         return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2200                (fcb_addr % 0x20) > 0x18);
2201 }
2202
2203 /* eTSEC76: csum generation for frames larger than 2500 may
2204  * cause excess delays before start of transmission
2205  */
2206 static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2207                                        unsigned int len)
2208 {
2209         return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2210                (len > 2500));
2211 }
2212
2213 /* This is called by the kernel when a frame is ready for transmission.
2214  * It is pointed to by the dev->hard_start_xmit function pointer
2215  */
2216 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2217 {
2218         struct gfar_private *priv = netdev_priv(dev);
2219         struct gfar_priv_tx_q *tx_queue = NULL;
2220         struct netdev_queue *txq;
2221         struct gfar __iomem *regs = NULL;
2222         struct txfcb *fcb = NULL;
2223         struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2224         u32 lstatus;
2225         int i, rq = 0;
2226         int do_tstamp, do_csum, do_vlan;
2227         u32 bufaddr;
2228         unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2229
2230         rq = skb->queue_mapping;
2231         tx_queue = priv->tx_queue[rq];
2232         txq = netdev_get_tx_queue(dev, rq);
2233         base = tx_queue->tx_bd_base;
2234         regs = tx_queue->grp->regs;
2235
2236         do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
2237         do_vlan = skb_vlan_tag_present(skb);
2238         do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2239                     priv->hwts_tx_en;
2240
2241         if (do_csum || do_vlan)
2242                 fcb_len = GMAC_FCB_LEN;
2243
2244         /* check if time stamp should be generated */
2245         if (unlikely(do_tstamp))
2246                 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2247
2248         /* make space for additional header when fcb is needed */
2249         if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
2250                 struct sk_buff *skb_new;
2251
2252                 skb_new = skb_realloc_headroom(skb, fcb_len);
2253                 if (!skb_new) {
2254                         dev->stats.tx_errors++;
2255                         dev_kfree_skb_any(skb);
2256                         return NETDEV_TX_OK;
2257                 }
2258
2259                 if (skb->sk)
2260                         skb_set_owner_w(skb_new, skb->sk);
2261                 dev_consume_skb_any(skb);
2262                 skb = skb_new;
2263         }
2264
2265         /* total number of fragments in the SKB */
2266         nr_frags = skb_shinfo(skb)->nr_frags;
2267
2268         /* calculate the required number of TxBDs for this skb */
2269         if (unlikely(do_tstamp))
2270                 nr_txbds = nr_frags + 2;
2271         else
2272                 nr_txbds = nr_frags + 1;
2273
2274         /* check if there is space to queue this packet */
2275         if (nr_txbds > tx_queue->num_txbdfree) {
2276                 /* no space, stop the queue */
2277                 netif_tx_stop_queue(txq);
2278                 dev->stats.tx_fifo_errors++;
2279                 return NETDEV_TX_BUSY;
2280         }
2281
2282         /* Update transmit stats */
2283         bytes_sent = skb->len;
2284         tx_queue->stats.tx_bytes += bytes_sent;
2285         /* keep Tx bytes on wire for BQL accounting */
2286         GFAR_CB(skb)->bytes_sent = bytes_sent;
2287         tx_queue->stats.tx_packets++;
2288
2289         txbdp = txbdp_start = tx_queue->cur_tx;
2290         lstatus = be32_to_cpu(txbdp->lstatus);
2291
2292         /* Time stamp insertion requires one additional TxBD */
2293         if (unlikely(do_tstamp))
2294                 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2295                                                  tx_queue->tx_ring_size);
2296
2297         if (nr_frags == 0) {
2298                 if (unlikely(do_tstamp)) {
2299                         u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
2300
2301                         lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2302                         txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
2303                 } else {
2304                         lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2305                 }
2306         } else {
2307                 /* Place the fragment addresses and lengths into the TxBDs */
2308                 for (i = 0; i < nr_frags; i++) {
2309                         unsigned int frag_len;
2310                         /* Point at the next BD, wrapping as needed */
2311                         txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2312
2313                         frag_len = skb_shinfo(skb)->frags[i].size;
2314
2315                         lstatus = be32_to_cpu(txbdp->lstatus) | frag_len |
2316                                   BD_LFLAG(TXBD_READY);
2317
2318                         /* Handle the last BD specially */
2319                         if (i == nr_frags - 1)
2320                                 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2321
2322                         bufaddr = skb_frag_dma_map(priv->dev,
2323                                                    &skb_shinfo(skb)->frags[i],
2324                                                    0,
2325                                                    frag_len,
2326                                                    DMA_TO_DEVICE);
2327                         if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2328                                 goto dma_map_err;
2329
2330                         /* set the TxBD length and buffer pointer */
2331                         txbdp->bufPtr = cpu_to_be32(bufaddr);
2332                         txbdp->lstatus = cpu_to_be32(lstatus);
2333                 }
2334
2335                 lstatus = be32_to_cpu(txbdp_start->lstatus);
2336         }
2337
2338         /* Add TxPAL between FCB and frame if required */
2339         if (unlikely(do_tstamp)) {
2340                 skb_push(skb, GMAC_TXPAL_LEN);
2341                 memset(skb->data, 0, GMAC_TXPAL_LEN);
2342         }
2343
2344         /* Add TxFCB if required */
2345         if (fcb_len) {
2346                 fcb = gfar_add_fcb(skb);
2347                 lstatus |= BD_LFLAG(TXBD_TOE);
2348         }
2349
2350         /* Set up checksumming */
2351         if (do_csum) {
2352                 gfar_tx_checksum(skb, fcb, fcb_len);
2353
2354                 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2355                     unlikely(gfar_csum_errata_76(priv, skb->len))) {
2356                         __skb_pull(skb, GMAC_FCB_LEN);
2357                         skb_checksum_help(skb);
2358                         if (do_vlan || do_tstamp) {
2359                                 /* put back a new fcb for vlan/tstamp TOE */
2360                                 fcb = gfar_add_fcb(skb);
2361                         } else {
2362                                 /* Tx TOE not used */
2363                                 lstatus &= ~(BD_LFLAG(TXBD_TOE));
2364                                 fcb = NULL;
2365                         }
2366                 }
2367         }
2368
2369         if (do_vlan)
2370                 gfar_tx_vlan(skb, fcb);
2371
2372         /* Setup tx hardware time stamping if requested */
2373         if (unlikely(do_tstamp)) {
2374                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2375                 fcb->ptp = 1;
2376         }
2377
2378         bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
2379                                  DMA_TO_DEVICE);
2380         if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2381                 goto dma_map_err;
2382
2383         txbdp_start->bufPtr = cpu_to_be32(bufaddr);
2384
2385         /* If time stamping is requested one additional TxBD must be set up. The
2386          * first TxBD points to the FCB and must have a data length of
2387          * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2388          * the full frame length.
2389          */
2390         if (unlikely(do_tstamp)) {
2391                 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
2392
2393                 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
2394                 bufaddr += fcb_len;
2395                 lstatus_ts |= BD_LFLAG(TXBD_READY) |
2396                               (skb_headlen(skb) - fcb_len);
2397
2398                 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
2399                 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
2400                 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2401         } else {
2402                 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2403         }
2404
2405         netdev_tx_sent_queue(txq, bytes_sent);
2406
2407         gfar_wmb();
2408
2409         txbdp_start->lstatus = cpu_to_be32(lstatus);
2410
2411         gfar_wmb(); /* force lstatus write before tx_skbuff */
2412
2413         tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2414
2415         /* Update the current skb pointer to the next entry we will use
2416          * (wrapping if necessary)
2417          */
2418         tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2419                               TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2420
2421         tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2422
2423         /* We can work in parallel with gfar_clean_tx_ring(), except
2424          * when modifying num_txbdfree. Note that we didn't grab the lock
2425          * when we were reading the num_txbdfree and checking for available
2426          * space, that's because outside of this function it can only grow.
2427          */
2428         spin_lock_bh(&tx_queue->txlock);
2429         /* reduce TxBD free count */
2430         tx_queue->num_txbdfree -= (nr_txbds);
2431         spin_unlock_bh(&tx_queue->txlock);
2432
2433         /* If the next BD still needs to be cleaned up, then the bds
2434          * are full.  We need to tell the kernel to stop sending us stuff.
2435          */
2436         if (!tx_queue->num_txbdfree) {
2437                 netif_tx_stop_queue(txq);
2438
2439                 dev->stats.tx_fifo_errors++;
2440         }
2441
2442         /* Tell the DMA to go go go */
2443         gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2444
2445         return NETDEV_TX_OK;
2446
2447 dma_map_err:
2448         txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
2449         if (do_tstamp)
2450                 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2451         for (i = 0; i < nr_frags; i++) {
2452                 lstatus = be32_to_cpu(txbdp->lstatus);
2453                 if (!(lstatus & BD_LFLAG(TXBD_READY)))
2454                         break;
2455
2456                 lstatus &= ~BD_LFLAG(TXBD_READY);
2457                 txbdp->lstatus = cpu_to_be32(lstatus);
2458                 bufaddr = be32_to_cpu(txbdp->bufPtr);
2459                 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
2460                                DMA_TO_DEVICE);
2461                 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2462         }
2463         gfar_wmb();
2464         dev_kfree_skb_any(skb);
2465         return NETDEV_TX_OK;
2466 }
2467
2468 /* Stops the kernel queue, and halts the controller */
2469 static int gfar_close(struct net_device *dev)
2470 {
2471         struct gfar_private *priv = netdev_priv(dev);
2472
2473         cancel_work_sync(&priv->reset_task);
2474         stop_gfar(dev);
2475
2476         /* Disconnect from the PHY */
2477         phy_disconnect(priv->phydev);
2478         priv->phydev = NULL;
2479
2480         gfar_free_irq(priv);
2481
2482         return 0;
2483 }
2484
2485 /* Changes the mac address if the controller is not running. */
2486 static int gfar_set_mac_address(struct net_device *dev)
2487 {
2488         gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2489
2490         return 0;
2491 }
2492
2493 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2494 {
2495         struct gfar_private *priv = netdev_priv(dev);
2496         int frame_size = new_mtu + ETH_HLEN;
2497
2498         if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2499                 netif_err(priv, drv, dev, "Invalid MTU setting\n");
2500                 return -EINVAL;
2501         }
2502
2503         while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2504                 cpu_relax();
2505
2506         if (dev->flags & IFF_UP)
2507                 stop_gfar(dev);
2508
2509         dev->mtu = new_mtu;
2510
2511         if (dev->flags & IFF_UP)
2512                 startup_gfar(dev);
2513
2514         clear_bit_unlock(GFAR_RESETTING, &priv->state);
2515
2516         return 0;
2517 }
2518
2519 void reset_gfar(struct net_device *ndev)
2520 {
2521         struct gfar_private *priv = netdev_priv(ndev);
2522
2523         while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2524                 cpu_relax();
2525
2526         stop_gfar(ndev);
2527         startup_gfar(ndev);
2528
2529         clear_bit_unlock(GFAR_RESETTING, &priv->state);
2530 }
2531
2532 /* gfar_reset_task gets scheduled when a packet has not been
2533  * transmitted after a set amount of time.
2534  * For now, assume that clearing out all the structures, and
2535  * starting over will fix the problem.
2536  */
2537 static void gfar_reset_task(struct work_struct *work)
2538 {
2539         struct gfar_private *priv = container_of(work, struct gfar_private,
2540                                                  reset_task);
2541         reset_gfar(priv->ndev);
2542 }
2543
2544 static void gfar_timeout(struct net_device *dev)
2545 {
2546         struct gfar_private *priv = netdev_priv(dev);
2547
2548         dev->stats.tx_errors++;
2549         schedule_work(&priv->reset_task);
2550 }
2551
2552 static void gfar_align_skb(struct sk_buff *skb)
2553 {
2554         /* We need the data buffer to be aligned properly.  We will reserve
2555          * as many bytes as needed to align the data properly
2556          */
2557         skb_reserve(skb, RXBUF_ALIGNMENT -
2558                     (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2559 }
2560
2561 /* Interrupt Handler for Transmit complete */
2562 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2563 {
2564         struct net_device *dev = tx_queue->dev;
2565         struct netdev_queue *txq;
2566         struct gfar_private *priv = netdev_priv(dev);
2567         struct txbd8 *bdp, *next = NULL;
2568         struct txbd8 *lbdp = NULL;
2569         struct txbd8 *base = tx_queue->tx_bd_base;
2570         struct sk_buff *skb;
2571         int skb_dirtytx;
2572         int tx_ring_size = tx_queue->tx_ring_size;
2573         int frags = 0, nr_txbds = 0;
2574         int i;
2575         int howmany = 0;
2576         int tqi = tx_queue->qindex;
2577         unsigned int bytes_sent = 0;
2578         u32 lstatus;
2579         size_t buflen;
2580
2581         txq = netdev_get_tx_queue(dev, tqi);
2582         bdp = tx_queue->dirty_tx;
2583         skb_dirtytx = tx_queue->skb_dirtytx;
2584
2585         while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2586
2587                 frags = skb_shinfo(skb)->nr_frags;
2588
2589                 /* When time stamping, one additional TxBD must be freed.
2590                  * Also, we need to dma_unmap_single() the TxPAL.
2591                  */
2592                 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2593                         nr_txbds = frags + 2;
2594                 else
2595                         nr_txbds = frags + 1;
2596
2597                 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2598
2599                 lstatus = be32_to_cpu(lbdp->lstatus);
2600
2601                 /* Only clean completed frames */
2602                 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2603                     (lstatus & BD_LENGTH_MASK))
2604                         break;
2605
2606                 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2607                         next = next_txbd(bdp, base, tx_ring_size);
2608                         buflen = be16_to_cpu(next->length) +
2609                                  GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2610                 } else
2611                         buflen = be16_to_cpu(bdp->length);
2612
2613                 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2614                                  buflen, DMA_TO_DEVICE);
2615
2616                 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2617                         struct skb_shared_hwtstamps shhwtstamps;
2618                         u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2619
2620                         memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2621                         shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2622                         skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2623                         skb_tstamp_tx(skb, &shhwtstamps);
2624                         gfar_clear_txbd_status(bdp);
2625                         bdp = next;
2626                 }
2627
2628                 gfar_clear_txbd_status(bdp);
2629                 bdp = next_txbd(bdp, base, tx_ring_size);
2630
2631                 for (i = 0; i < frags; i++) {
2632                         dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2633                                        be16_to_cpu(bdp->length),
2634                                        DMA_TO_DEVICE);
2635                         gfar_clear_txbd_status(bdp);
2636                         bdp = next_txbd(bdp, base, tx_ring_size);
2637                 }
2638
2639                 bytes_sent += GFAR_CB(skb)->bytes_sent;
2640
2641                 dev_kfree_skb_any(skb);
2642
2643                 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2644
2645                 skb_dirtytx = (skb_dirtytx + 1) &
2646                               TX_RING_MOD_MASK(tx_ring_size);
2647
2648                 howmany++;
2649                 spin_lock(&tx_queue->txlock);
2650                 tx_queue->num_txbdfree += nr_txbds;
2651                 spin_unlock(&tx_queue->txlock);
2652         }
2653
2654         /* If we freed a buffer, we can restart transmission, if necessary */
2655         if (tx_queue->num_txbdfree &&
2656             netif_tx_queue_stopped(txq) &&
2657             !(test_bit(GFAR_DOWN, &priv->state)))
2658                 netif_wake_subqueue(priv->ndev, tqi);
2659
2660         /* Update dirty indicators */
2661         tx_queue->skb_dirtytx = skb_dirtytx;
2662         tx_queue->dirty_tx = bdp;
2663
2664         netdev_tx_completed_queue(txq, howmany, bytes_sent);
2665 }
2666
2667 static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2668 {
2669         struct gfar_private *priv = netdev_priv(dev);
2670         struct sk_buff *skb;
2671
2672         skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2673         if (!skb)
2674                 return NULL;
2675
2676         gfar_align_skb(skb);
2677
2678         return skb;
2679 }
2680
2681 static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
2682 {
2683         struct gfar_private *priv = netdev_priv(dev);
2684         struct sk_buff *skb;
2685         dma_addr_t addr;
2686
2687         skb = gfar_alloc_skb(dev);
2688         if (!skb)
2689                 return NULL;
2690
2691         addr = dma_map_single(priv->dev, skb->data,
2692                               priv->rx_buffer_size, DMA_FROM_DEVICE);
2693         if (unlikely(dma_mapping_error(priv->dev, addr))) {
2694                 dev_kfree_skb_any(skb);
2695                 return NULL;
2696         }
2697
2698         *bufaddr = addr;
2699         return skb;
2700 }
2701
2702 static inline void count_errors(unsigned short status, struct net_device *dev)
2703 {
2704         struct gfar_private *priv = netdev_priv(dev);
2705         struct net_device_stats *stats = &dev->stats;
2706         struct gfar_extra_stats *estats = &priv->extra_stats;
2707
2708         /* If the packet was truncated, none of the other errors matter */
2709         if (status & RXBD_TRUNCATED) {
2710                 stats->rx_length_errors++;
2711
2712                 atomic64_inc(&estats->rx_trunc);
2713
2714                 return;
2715         }
2716         /* Count the errors, if there were any */
2717         if (status & (RXBD_LARGE | RXBD_SHORT)) {
2718                 stats->rx_length_errors++;
2719
2720                 if (status & RXBD_LARGE)
2721                         atomic64_inc(&estats->rx_large);
2722                 else
2723                         atomic64_inc(&estats->rx_short);
2724         }
2725         if (status & RXBD_NONOCTET) {
2726                 stats->rx_frame_errors++;
2727                 atomic64_inc(&estats->rx_nonoctet);
2728         }
2729         if (status & RXBD_CRCERR) {
2730                 atomic64_inc(&estats->rx_crcerr);
2731                 stats->rx_crc_errors++;
2732         }
2733         if (status & RXBD_OVERRUN) {
2734                 atomic64_inc(&estats->rx_overrun);
2735                 stats->rx_crc_errors++;
2736         }
2737 }
2738
2739 irqreturn_t gfar_receive(int irq, void *grp_id)
2740 {
2741         struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2742         unsigned long flags;
2743         u32 imask;
2744
2745         if (likely(napi_schedule_prep(&grp->napi_rx))) {
2746                 spin_lock_irqsave(&grp->grplock, flags);
2747                 imask = gfar_read(&grp->regs->imask);
2748                 imask &= IMASK_RX_DISABLED;
2749                 gfar_write(&grp->regs->imask, imask);
2750                 spin_unlock_irqrestore(&grp->grplock, flags);
2751                 __napi_schedule(&grp->napi_rx);
2752         } else {
2753                 /* Clear IEVENT, so interrupts aren't called again
2754                  * because of the packets that have already arrived.
2755                  */
2756                 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2757         }
2758
2759         return IRQ_HANDLED;
2760 }
2761
2762 /* Interrupt Handler for Transmit complete */
2763 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2764 {
2765         struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2766         unsigned long flags;
2767         u32 imask;
2768
2769         if (likely(napi_schedule_prep(&grp->napi_tx))) {
2770                 spin_lock_irqsave(&grp->grplock, flags);
2771                 imask = gfar_read(&grp->regs->imask);
2772                 imask &= IMASK_TX_DISABLED;
2773                 gfar_write(&grp->regs->imask, imask);
2774                 spin_unlock_irqrestore(&grp->grplock, flags);
2775                 __napi_schedule(&grp->napi_tx);
2776         } else {
2777                 /* Clear IEVENT, so interrupts aren't called again
2778                  * because of the packets that have already arrived.
2779                  */
2780                 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2781         }
2782
2783         return IRQ_HANDLED;
2784 }
2785
2786 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2787 {
2788         /* If valid headers were found, and valid sums
2789          * were verified, then we tell the kernel that no
2790          * checksumming is necessary.  Otherwise, it is [FIXME]
2791          */
2792         if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
2793             (RXFCB_CIP | RXFCB_CTU))
2794                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2795         else
2796                 skb_checksum_none_assert(skb);
2797 }
2798
2799 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2800 static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2801                                int amount_pull, struct napi_struct *napi)
2802 {
2803         struct gfar_private *priv = netdev_priv(dev);
2804         struct rxfcb *fcb = NULL;
2805
2806         /* fcb is at the beginning if exists */
2807         fcb = (struct rxfcb *)skb->data;
2808
2809         /* Remove the FCB from the skb
2810          * Remove the padded bytes, if there are any
2811          */
2812         if (amount_pull) {
2813                 skb_record_rx_queue(skb, fcb->rq);
2814                 skb_pull(skb, amount_pull);
2815         }
2816
2817         /* Get receive timestamp from the skb */
2818         if (priv->hwts_rx_en) {
2819                 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2820                 u64 *ns = (u64 *) skb->data;
2821
2822                 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2823                 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2824         }
2825
2826         if (priv->padding)
2827                 skb_pull(skb, priv->padding);
2828
2829         if (dev->features & NETIF_F_RXCSUM)
2830                 gfar_rx_checksum(skb, fcb);
2831
2832         /* Tell the skb what kind of packet this is */
2833         skb->protocol = eth_type_trans(skb, dev);
2834
2835         /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2836          * Even if vlan rx accel is disabled, on some chips
2837          * RXFCB_VLN is pseudo randomly set.
2838          */
2839         if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2840             be16_to_cpu(fcb->flags) & RXFCB_VLN)
2841                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2842                                        be16_to_cpu(fcb->vlctl));
2843
2844         /* Send the packet up the stack */
2845         napi_gro_receive(napi, skb);
2846
2847 }
2848
2849 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2850  * until the budget/quota has been reached. Returns the number
2851  * of frames handled
2852  */
2853 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2854 {
2855         struct net_device *dev = rx_queue->dev;
2856         struct rxbd8 *bdp, *base;
2857         struct sk_buff *skb;
2858         int pkt_len;
2859         int amount_pull;
2860         int howmany = 0;
2861         struct gfar_private *priv = netdev_priv(dev);
2862
2863         /* Get the first full descriptor */
2864         bdp = rx_queue->cur_rx;
2865         base = rx_queue->rx_bd_base;
2866
2867         amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
2868
2869         while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) {
2870                 struct sk_buff *newskb;
2871                 dma_addr_t bufaddr;
2872
2873                 rmb();
2874
2875                 /* Add another skb for the future */
2876                 newskb = gfar_new_skb(dev, &bufaddr);
2877
2878                 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2879
2880                 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2881                                  priv->rx_buffer_size, DMA_FROM_DEVICE);
2882
2883                 if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) &&
2884                              be16_to_cpu(bdp->length) > priv->rx_buffer_size))
2885                         bdp->status = cpu_to_be16(RXBD_LARGE);
2886
2887                 /* We drop the frame if we failed to allocate a new buffer */
2888                 if (unlikely(!newskb ||
2889                              !(be16_to_cpu(bdp->status) & RXBD_LAST) ||
2890                              be16_to_cpu(bdp->status) & RXBD_ERR)) {
2891                         count_errors(be16_to_cpu(bdp->status), dev);
2892
2893                         if (unlikely(!newskb)) {
2894                                 newskb = skb;
2895                                 bufaddr = be32_to_cpu(bdp->bufPtr);
2896                         } else if (skb)
2897                                 dev_kfree_skb(skb);
2898                 } else {
2899                         /* Increment the number of packets */
2900                         rx_queue->stats.rx_packets++;
2901                         howmany++;
2902
2903                         if (likely(skb)) {
2904                                 pkt_len = be16_to_cpu(bdp->length) -
2905                                           ETH_FCS_LEN;
2906                                 /* Remove the FCS from the packet length */
2907                                 skb_put(skb, pkt_len);
2908                                 rx_queue->stats.rx_bytes += pkt_len;
2909                                 skb_record_rx_queue(skb, rx_queue->qindex);
2910                                 gfar_process_frame(dev, skb, amount_pull,
2911                                                    &rx_queue->grp->napi_rx);
2912
2913                         } else {
2914                                 netif_warn(priv, rx_err, dev, "Missing skb!\n");
2915                                 rx_queue->stats.rx_dropped++;
2916                                 atomic64_inc(&priv->extra_stats.rx_skbmissing);
2917                         }
2918
2919                 }
2920
2921                 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2922
2923                 /* Setup the new bdp */
2924                 gfar_init_rxbdp(rx_queue, bdp, bufaddr);
2925
2926                 /* Update Last Free RxBD pointer for LFC */
2927                 if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
2928                         gfar_write(rx_queue->rfbptr, (u32)bdp);
2929
2930                 /* Update to the next pointer */
2931                 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2932
2933                 /* update to point at the next skb */
2934                 rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2935                                       RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2936         }
2937
2938         /* Update the current rxbd pointer to be the next one */
2939         rx_queue->cur_rx = bdp;
2940
2941         return howmany;
2942 }
2943
2944 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2945 {
2946         struct gfar_priv_grp *gfargrp =
2947                 container_of(napi, struct gfar_priv_grp, napi_rx);
2948         struct gfar __iomem *regs = gfargrp->regs;
2949         struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2950         int work_done = 0;
2951
2952         /* Clear IEVENT, so interrupts aren't called again
2953          * because of the packets that have already arrived
2954          */
2955         gfar_write(&regs->ievent, IEVENT_RX_MASK);
2956
2957         work_done = gfar_clean_rx_ring(rx_queue, budget);
2958
2959         if (work_done < budget) {
2960                 u32 imask;
2961                 napi_complete(napi);
2962                 /* Clear the halt bit in RSTAT */
2963                 gfar_write(&regs->rstat, gfargrp->rstat);
2964
2965                 spin_lock_irq(&gfargrp->grplock);
2966                 imask = gfar_read(&regs->imask);
2967                 imask |= IMASK_RX_DEFAULT;
2968                 gfar_write(&regs->imask, imask);
2969                 spin_unlock_irq(&gfargrp->grplock);
2970         }
2971
2972         return work_done;
2973 }
2974
2975 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2976 {
2977         struct gfar_priv_grp *gfargrp =
2978                 container_of(napi, struct gfar_priv_grp, napi_tx);
2979         struct gfar __iomem *regs = gfargrp->regs;
2980         struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2981         u32 imask;
2982
2983         /* Clear IEVENT, so interrupts aren't called again
2984          * because of the packets that have already arrived
2985          */
2986         gfar_write(&regs->ievent, IEVENT_TX_MASK);
2987
2988         /* run Tx cleanup to completion */
2989         if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2990                 gfar_clean_tx_ring(tx_queue);
2991
2992         napi_complete(napi);
2993
2994         spin_lock_irq(&gfargrp->grplock);
2995         imask = gfar_read(&regs->imask);
2996         imask |= IMASK_TX_DEFAULT;
2997         gfar_write(&regs->imask, imask);
2998         spin_unlock_irq(&gfargrp->grplock);
2999
3000         return 0;
3001 }
3002
3003 static int gfar_poll_rx(struct napi_struct *napi, int budget)
3004 {
3005         struct gfar_priv_grp *gfargrp =
3006                 container_of(napi, struct gfar_priv_grp, napi_rx);
3007         struct gfar_private *priv = gfargrp->priv;
3008         struct gfar __iomem *regs = gfargrp->regs;
3009         struct gfar_priv_rx_q *rx_queue = NULL;
3010         int work_done = 0, work_done_per_q = 0;
3011         int i, budget_per_q = 0;
3012         unsigned long rstat_rxf;
3013         int num_act_queues;
3014
3015         /* Clear IEVENT, so interrupts aren't called again
3016          * because of the packets that have already arrived
3017          */
3018         gfar_write(&regs->ievent, IEVENT_RX_MASK);
3019
3020         rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
3021
3022         num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
3023         if (num_act_queues)
3024                 budget_per_q = budget/num_act_queues;
3025
3026         for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
3027                 /* skip queue if not active */
3028                 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
3029                         continue;
3030
3031                 rx_queue = priv->rx_queue[i];
3032                 work_done_per_q =
3033                         gfar_clean_rx_ring(rx_queue, budget_per_q);
3034                 work_done += work_done_per_q;
3035
3036                 /* finished processing this queue */
3037                 if (work_done_per_q < budget_per_q) {
3038                         /* clear active queue hw indication */
3039                         gfar_write(&regs->rstat,
3040                                    RSTAT_CLEAR_RXF0 >> i);
3041                         num_act_queues--;
3042
3043                         if (!num_act_queues)
3044                                 break;
3045                 }
3046         }
3047
3048         if (!num_act_queues) {
3049                 u32 imask;
3050                 napi_complete(napi);
3051
3052                 /* Clear the halt bit in RSTAT */
3053                 gfar_write(&regs->rstat, gfargrp->rstat);
3054
3055                 spin_lock_irq(&gfargrp->grplock);
3056                 imask = gfar_read(&regs->imask);
3057                 imask |= IMASK_RX_DEFAULT;
3058                 gfar_write(&regs->imask, imask);
3059                 spin_unlock_irq(&gfargrp->grplock);
3060         }
3061
3062         return work_done;
3063 }
3064
3065 static int gfar_poll_tx(struct napi_struct *napi, int budget)
3066 {
3067         struct gfar_priv_grp *gfargrp =
3068                 container_of(napi, struct gfar_priv_grp, napi_tx);
3069         struct gfar_private *priv = gfargrp->priv;
3070         struct gfar __iomem *regs = gfargrp->regs;
3071         struct gfar_priv_tx_q *tx_queue = NULL;
3072         int has_tx_work = 0;
3073         int i;
3074
3075         /* Clear IEVENT, so interrupts aren't called again
3076          * because of the packets that have already arrived
3077          */
3078         gfar_write(&regs->ievent, IEVENT_TX_MASK);
3079
3080         for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
3081                 tx_queue = priv->tx_queue[i];
3082                 /* run Tx cleanup to completion */
3083                 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3084                         gfar_clean_tx_ring(tx_queue);
3085                         has_tx_work = 1;
3086                 }
3087         }
3088
3089         if (!has_tx_work) {
3090                 u32 imask;
3091                 napi_complete(napi);
3092
3093                 spin_lock_irq(&gfargrp->grplock);
3094                 imask = gfar_read(&regs->imask);
3095                 imask |= IMASK_TX_DEFAULT;
3096                 gfar_write(&regs->imask, imask);
3097                 spin_unlock_irq(&gfargrp->grplock);
3098         }
3099
3100         return 0;
3101 }
3102
3103
3104 #ifdef CONFIG_NET_POLL_CONTROLLER
3105 /* Polling 'interrupt' - used by things like netconsole to send skbs
3106  * without having to re-enable interrupts. It's not called while
3107  * the interrupt routine is executing.
3108  */
3109 static void gfar_netpoll(struct net_device *dev)
3110 {
3111         struct gfar_private *priv = netdev_priv(dev);
3112         int i;
3113
3114         /* If the device has multiple interrupts, run tx/rx */
3115         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3116                 for (i = 0; i < priv->num_grps; i++) {
3117                         struct gfar_priv_grp *grp = &priv->gfargrp[i];
3118
3119                         disable_irq(gfar_irq(grp, TX)->irq);
3120                         disable_irq(gfar_irq(grp, RX)->irq);
3121                         disable_irq(gfar_irq(grp, ER)->irq);
3122                         gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3123                         enable_irq(gfar_irq(grp, ER)->irq);
3124                         enable_irq(gfar_irq(grp, RX)->irq);
3125                         enable_irq(gfar_irq(grp, TX)->irq);
3126                 }
3127         } else {
3128                 for (i = 0; i < priv->num_grps; i++) {
3129                         struct gfar_priv_grp *grp = &priv->gfargrp[i];
3130
3131                         disable_irq(gfar_irq(grp, TX)->irq);
3132                         gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3133                         enable_irq(gfar_irq(grp, TX)->irq);
3134                 }
3135         }
3136 }
3137 #endif
3138
3139 /* The interrupt handler for devices with one interrupt */
3140 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
3141 {
3142         struct gfar_priv_grp *gfargrp = grp_id;
3143
3144         /* Save ievent for future reference */
3145         u32 events = gfar_read(&gfargrp->regs->ievent);
3146
3147         /* Check for reception */
3148         if (events & IEVENT_RX_MASK)
3149                 gfar_receive(irq, grp_id);
3150
3151         /* Check for transmit completion */
3152         if (events & IEVENT_TX_MASK)
3153                 gfar_transmit(irq, grp_id);
3154
3155         /* Check for errors */
3156         if (events & IEVENT_ERR_MASK)
3157                 gfar_error(irq, grp_id);
3158
3159         return IRQ_HANDLED;
3160 }
3161
3162 /* Called every time the controller might need to be made
3163  * aware of new link state.  The PHY code conveys this
3164  * information through variables in the phydev structure, and this
3165  * function converts those variables into the appropriate
3166  * register values, and can bring down the device if needed.
3167  */
3168 static void adjust_link(struct net_device *dev)
3169 {
3170         struct gfar_private *priv = netdev_priv(dev);
3171         struct phy_device *phydev = priv->phydev;
3172
3173         if (unlikely(phydev->link != priv->oldlink ||
3174                      (phydev->link && (phydev->duplex != priv->oldduplex ||
3175                                        phydev->speed != priv->oldspeed))))
3176                 gfar_update_link_state(priv);
3177 }
3178
3179 /* Update the hash table based on the current list of multicast
3180  * addresses we subscribe to.  Also, change the promiscuity of
3181  * the device based on the flags (this function is called
3182  * whenever dev->flags is changed
3183  */
3184 static void gfar_set_multi(struct net_device *dev)
3185 {
3186         struct netdev_hw_addr *ha;
3187         struct gfar_private *priv = netdev_priv(dev);
3188         struct gfar __iomem *regs = priv->gfargrp[0].regs;
3189         u32 tempval;
3190
3191         if (dev->flags & IFF_PROMISC) {
3192                 /* Set RCTRL to PROM */
3193                 tempval = gfar_read(&regs->rctrl);
3194                 tempval |= RCTRL_PROM;
3195                 gfar_write(&regs->rctrl, tempval);
3196         } else {
3197                 /* Set RCTRL to not PROM */
3198                 tempval = gfar_read(&regs->rctrl);
3199                 tempval &= ~(RCTRL_PROM);
3200                 gfar_write(&regs->rctrl, tempval);
3201         }
3202
3203         if (dev->flags & IFF_ALLMULTI) {
3204                 /* Set the hash to rx all multicast frames */
3205                 gfar_write(&regs->igaddr0, 0xffffffff);
3206                 gfar_write(&regs->igaddr1, 0xffffffff);
3207                 gfar_write(&regs->igaddr2, 0xffffffff);
3208                 gfar_write(&regs->igaddr3, 0xffffffff);
3209                 gfar_write(&regs->igaddr4, 0xffffffff);
3210                 gfar_write(&regs->igaddr5, 0xffffffff);
3211                 gfar_write(&regs->igaddr6, 0xffffffff);
3212                 gfar_write(&regs->igaddr7, 0xffffffff);
3213                 gfar_write(&regs->gaddr0, 0xffffffff);
3214                 gfar_write(&regs->gaddr1, 0xffffffff);
3215                 gfar_write(&regs->gaddr2, 0xffffffff);
3216                 gfar_write(&regs->gaddr3, 0xffffffff);
3217                 gfar_write(&regs->gaddr4, 0xffffffff);
3218                 gfar_write(&regs->gaddr5, 0xffffffff);
3219                 gfar_write(&regs->gaddr6, 0xffffffff);
3220                 gfar_write(&regs->gaddr7, 0xffffffff);
3221         } else {
3222                 int em_num;
3223                 int idx;
3224
3225                 /* zero out the hash */
3226                 gfar_write(&regs->igaddr0, 0x0);
3227                 gfar_write(&regs->igaddr1, 0x0);
3228                 gfar_write(&regs->igaddr2, 0x0);
3229                 gfar_write(&regs->igaddr3, 0x0);
3230                 gfar_write(&regs->igaddr4, 0x0);
3231                 gfar_write(&regs->igaddr5, 0x0);
3232                 gfar_write(&regs->igaddr6, 0x0);
3233                 gfar_write(&regs->igaddr7, 0x0);
3234                 gfar_write(&regs->gaddr0, 0x0);
3235                 gfar_write(&regs->gaddr1, 0x0);
3236                 gfar_write(&regs->gaddr2, 0x0);
3237                 gfar_write(&regs->gaddr3, 0x0);
3238                 gfar_write(&regs->gaddr4, 0x0);
3239                 gfar_write(&regs->gaddr5, 0x0);
3240                 gfar_write(&regs->gaddr6, 0x0);
3241                 gfar_write(&regs->gaddr7, 0x0);
3242
3243                 /* If we have extended hash tables, we need to
3244                  * clear the exact match registers to prepare for
3245                  * setting them
3246                  */
3247                 if (priv->extended_hash) {
3248                         em_num = GFAR_EM_NUM + 1;
3249                         gfar_clear_exact_match(dev);
3250                         idx = 1;
3251                 } else {
3252                         idx = 0;
3253                         em_num = 0;
3254                 }
3255
3256                 if (netdev_mc_empty(dev))
3257                         return;
3258
3259                 /* Parse the list, and set the appropriate bits */
3260                 netdev_for_each_mc_addr(ha, dev) {
3261                         if (idx < em_num) {
3262                                 gfar_set_mac_for_addr(dev, idx, ha->addr);
3263                                 idx++;
3264                         } else
3265                                 gfar_set_hash_for_addr(dev, ha->addr);
3266                 }
3267         }
3268 }
3269
3270
3271 /* Clears each of the exact match registers to zero, so they
3272  * don't interfere with normal reception
3273  */
3274 static void gfar_clear_exact_match(struct net_device *dev)
3275 {
3276         int idx;
3277         static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3278
3279         for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3280                 gfar_set_mac_for_addr(dev, idx, zero_arr);
3281 }
3282
3283 /* Set the appropriate hash bit for the given addr */
3284 /* The algorithm works like so:
3285  * 1) Take the Destination Address (ie the multicast address), and
3286  * do a CRC on it (little endian), and reverse the bits of the
3287  * result.
3288  * 2) Use the 8 most significant bits as a hash into a 256-entry
3289  * table.  The table is controlled through 8 32-bit registers:
3290  * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
3291  * gaddr7.  This means that the 3 most significant bits in the
3292  * hash index which gaddr register to use, and the 5 other bits
3293  * indicate which bit (assuming an IBM numbering scheme, which
3294  * for PowerPC (tm) is usually the case) in the register holds
3295  * the entry.
3296  */
3297 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3298 {
3299         u32 tempval;
3300         struct gfar_private *priv = netdev_priv(dev);
3301         u32 result = ether_crc(ETH_ALEN, addr);
3302         int width = priv->hash_width;
3303         u8 whichbit = (result >> (32 - width)) & 0x1f;
3304         u8 whichreg = result >> (32 - width + 5);
3305         u32 value = (1 << (31-whichbit));
3306
3307         tempval = gfar_read(priv->hash_regs[whichreg]);
3308         tempval |= value;
3309         gfar_write(priv->hash_regs[whichreg], tempval);
3310 }
3311
3312
3313 /* There are multiple MAC Address register pairs on some controllers
3314  * This function sets the numth pair to a given address
3315  */
3316 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3317                                   const u8 *addr)
3318 {
3319         struct gfar_private *priv = netdev_priv(dev);
3320         struct gfar __iomem *regs = priv->gfargrp[0].regs;
3321         u32 tempval;
3322         u32 __iomem *macptr = &regs->macstnaddr1;
3323
3324         macptr += num*2;
3325
3326         /* For a station address of 0x12345678ABCD in transmission
3327          * order (BE), MACnADDR1 is set to 0xCDAB7856 and
3328          * MACnADDR2 is set to 0x34120000.
3329          */
3330         tempval = (addr[5] << 24) | (addr[4] << 16) |
3331                   (addr[3] << 8)  |  addr[2];
3332
3333         gfar_write(macptr, tempval);
3334
3335         tempval = (addr[1] << 24) | (addr[0] << 16);
3336
3337         gfar_write(macptr+1, tempval);
3338 }
3339
3340 /* GFAR error interrupt handler */
3341 static irqreturn_t gfar_error(int irq, void *grp_id)
3342 {
3343         struct gfar_priv_grp *gfargrp = grp_id;
3344         struct gfar __iomem *regs = gfargrp->regs;
3345         struct gfar_private *priv= gfargrp->priv;
3346         struct net_device *dev = priv->ndev;
3347
3348         /* Save ievent for future reference */
3349         u32 events = gfar_read(&regs->ievent);
3350
3351         /* Clear IEVENT */
3352         gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
3353
3354         /* Magic Packet is not an error. */
3355         if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3356             (events & IEVENT_MAG))
3357                 events &= ~IEVENT_MAG;
3358
3359         /* Hmm... */
3360         if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3361                 netdev_dbg(dev,
3362                            "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3363                            events, gfar_read(&regs->imask));
3364
3365         /* Update the error counters */
3366         if (events & IEVENT_TXE) {
3367                 dev->stats.tx_errors++;
3368
3369                 if (events & IEVENT_LC)
3370                         dev->stats.tx_window_errors++;
3371                 if (events & IEVENT_CRL)
3372                         dev->stats.tx_aborted_errors++;
3373                 if (events & IEVENT_XFUN) {
3374                         netif_dbg(priv, tx_err, dev,
3375                                   "TX FIFO underrun, packet dropped\n");
3376                         dev->stats.tx_dropped++;
3377                         atomic64_inc(&priv->extra_stats.tx_underrun);
3378
3379                         schedule_work(&priv->reset_task);
3380                 }
3381                 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3382         }
3383         if (events & IEVENT_BSY) {
3384                 dev->stats.rx_errors++;
3385                 atomic64_inc(&priv->extra_stats.rx_bsy);
3386
3387                 gfar_receive(irq, grp_id);
3388
3389                 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3390                           gfar_read(&regs->rstat));
3391         }
3392         if (events & IEVENT_BABR) {
3393                 dev->stats.rx_errors++;
3394                 atomic64_inc(&priv->extra_stats.rx_babr);
3395
3396                 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
3397         }
3398         if (events & IEVENT_EBERR) {
3399                 atomic64_inc(&priv->extra_stats.eberr);
3400                 netif_dbg(priv, rx_err, dev, "bus error\n");
3401         }
3402         if (events & IEVENT_RXC)
3403                 netif_dbg(priv, rx_status, dev, "control frame\n");
3404
3405         if (events & IEVENT_BABT) {
3406                 atomic64_inc(&priv->extra_stats.tx_babt);
3407                 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
3408         }
3409         return IRQ_HANDLED;
3410 }
3411
3412 static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3413 {
3414         struct phy_device *phydev = priv->phydev;
3415         u32 val = 0;
3416
3417         if (!phydev->duplex)
3418                 return val;
3419
3420         if (!priv->pause_aneg_en) {
3421                 if (priv->tx_pause_en)
3422                         val |= MACCFG1_TX_FLOW;
3423                 if (priv->rx_pause_en)
3424                         val |= MACCFG1_RX_FLOW;
3425         } else {
3426                 u16 lcl_adv, rmt_adv;
3427                 u8 flowctrl;
3428                 /* get link partner capabilities */
3429                 rmt_adv = 0;
3430                 if (phydev->pause)
3431                         rmt_adv = LPA_PAUSE_CAP;
3432                 if (phydev->asym_pause)
3433                         rmt_adv |= LPA_PAUSE_ASYM;
3434
3435                 lcl_adv = 0;
3436                 if (phydev->advertising & ADVERTISED_Pause)
3437                         lcl_adv |= ADVERTISE_PAUSE_CAP;
3438                 if (phydev->advertising & ADVERTISED_Asym_Pause)
3439                         lcl_adv |= ADVERTISE_PAUSE_ASYM;
3440
3441                 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3442                 if (flowctrl & FLOW_CTRL_TX)
3443                         val |= MACCFG1_TX_FLOW;
3444                 if (flowctrl & FLOW_CTRL_RX)
3445                         val |= MACCFG1_RX_FLOW;
3446         }
3447
3448         return val;
3449 }
3450
3451 static noinline void gfar_update_link_state(struct gfar_private *priv)
3452 {
3453         struct gfar __iomem *regs = priv->gfargrp[0].regs;
3454         struct phy_device *phydev = priv->phydev;
3455         struct gfar_priv_rx_q *rx_queue = NULL;
3456         int i;
3457         struct rxbd8 *bdp;
3458
3459         if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3460                 return;
3461
3462         if (phydev->link) {
3463                 u32 tempval1 = gfar_read(&regs->maccfg1);
3464                 u32 tempval = gfar_read(&regs->maccfg2);
3465                 u32 ecntrl = gfar_read(&regs->ecntrl);
3466                 u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
3467
3468                 if (phydev->duplex != priv->oldduplex) {
3469                         if (!(phydev->duplex))
3470                                 tempval &= ~(MACCFG2_FULL_DUPLEX);
3471                         else
3472                                 tempval |= MACCFG2_FULL_DUPLEX;
3473
3474                         priv->oldduplex = phydev->duplex;
3475                 }
3476
3477                 if (phydev->speed != priv->oldspeed) {
3478                         switch (phydev->speed) {
3479                         case 1000:
3480                                 tempval =
3481                                     ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3482
3483                                 ecntrl &= ~(ECNTRL_R100);
3484                                 break;
3485                         case 100:
3486                         case 10:
3487                                 tempval =
3488                                     ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3489
3490                                 /* Reduced mode distinguishes
3491                                  * between 10 and 100
3492                                  */
3493                                 if (phydev->speed == SPEED_100)
3494                                         ecntrl |= ECNTRL_R100;
3495                                 else
3496                                         ecntrl &= ~(ECNTRL_R100);
3497                                 break;
3498                         default:
3499                                 netif_warn(priv, link, priv->ndev,
3500                                            "Ack!  Speed (%d) is not 10/100/1000!\n",
3501                                            phydev->speed);
3502                                 break;
3503                         }
3504
3505                         priv->oldspeed = phydev->speed;
3506                 }
3507
3508                 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3509                 tempval1 |= gfar_get_flowctrl_cfg(priv);
3510
3511                 /* Turn last free buffer recording on */
3512                 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
3513                         for (i = 0; i < priv->num_rx_queues; i++) {
3514                                 rx_queue = priv->rx_queue[i];
3515                                 bdp = rx_queue->cur_rx;
3516                                 /* skip to previous bd */
3517                                 bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1,
3518                                               rx_queue->rx_bd_base,
3519                                               rx_queue->rx_ring_size);
3520
3521                                 if (rx_queue->rfbptr)
3522                                         gfar_write(rx_queue->rfbptr, (u32)bdp);
3523                         }
3524
3525                         priv->tx_actual_en = 1;
3526                 }
3527
3528                 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
3529                         priv->tx_actual_en = 0;
3530
3531                 gfar_write(&regs->maccfg1, tempval1);
3532                 gfar_write(&regs->maccfg2, tempval);
3533                 gfar_write(&regs->ecntrl, ecntrl);
3534
3535                 if (!priv->oldlink)
3536                         priv->oldlink = 1;
3537
3538         } else if (priv->oldlink) {
3539                 priv->oldlink = 0;
3540                 priv->oldspeed = 0;
3541                 priv->oldduplex = -1;
3542         }
3543
3544         if (netif_msg_link(priv))
3545                 phy_print_status(phydev);
3546 }
3547
3548 static const struct of_device_id gfar_match[] =
3549 {
3550         {
3551                 .type = "network",
3552                 .compatible = "gianfar",
3553         },
3554         {
3555                 .compatible = "fsl,etsec2",
3556         },
3557         {},
3558 };
3559 MODULE_DEVICE_TABLE(of, gfar_match);
3560
3561 /* Structure for a device driver */
3562 static struct platform_driver gfar_driver = {
3563         .driver = {
3564                 .name = "fsl-gianfar",
3565                 .pm = GFAR_PM_OPS,
3566                 .of_match_table = gfar_match,
3567         },
3568         .probe = gfar_probe,
3569         .remove = gfar_remove,
3570 };
3571
3572 module_platform_driver(gfar_driver);