Merge tag 'please-pull-fix-ia64-warnings' of git://git.kernel.org/pub/scm/linux/kerne...
[cascardo/linux.git] / drivers / net / ethernet / broadcom / bcm63xx_enet.c
1 /*
2  * Driver for BCM963xx builtin Ethernet mac
3  *
4  * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/clk.h>
24 #include <linux/etherdevice.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/ethtool.h>
28 #include <linux/crc32.h>
29 #include <linux/err.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/platform_device.h>
32 #include <linux/if_vlan.h>
33
34 #include <bcm63xx_dev_enet.h>
35 #include "bcm63xx_enet.h"
36
37 static char bcm_enet_driver_name[] = "bcm63xx_enet";
38 static char bcm_enet_driver_version[] = "1.0";
39
40 static int copybreak __read_mostly = 128;
41 module_param(copybreak, int, 0);
42 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
43
44 /* io registers memory shared between all devices */
45 static void __iomem *bcm_enet_shared_base[3];
46
47 /*
48  * io helpers to access mac registers
49  */
50 static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
51 {
52         return bcm_readl(priv->base + off);
53 }
54
55 static inline void enet_writel(struct bcm_enet_priv *priv,
56                                u32 val, u32 off)
57 {
58         bcm_writel(val, priv->base + off);
59 }
60
61 /*
62  * io helpers to access switch registers
63  */
64 static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
65 {
66         return bcm_readl(priv->base + off);
67 }
68
69 static inline void enetsw_writel(struct bcm_enet_priv *priv,
70                                  u32 val, u32 off)
71 {
72         bcm_writel(val, priv->base + off);
73 }
74
75 static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
76 {
77         return bcm_readw(priv->base + off);
78 }
79
80 static inline void enetsw_writew(struct bcm_enet_priv *priv,
81                                  u16 val, u32 off)
82 {
83         bcm_writew(val, priv->base + off);
84 }
85
86 static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
87 {
88         return bcm_readb(priv->base + off);
89 }
90
91 static inline void enetsw_writeb(struct bcm_enet_priv *priv,
92                                  u8 val, u32 off)
93 {
94         bcm_writeb(val, priv->base + off);
95 }
96
97
98 /* io helpers to access shared registers */
99 static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
100 {
101         return bcm_readl(bcm_enet_shared_base[0] + off);
102 }
103
104 static inline void enet_dma_writel(struct bcm_enet_priv *priv,
105                                        u32 val, u32 off)
106 {
107         bcm_writel(val, bcm_enet_shared_base[0] + off);
108 }
109
110 static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
111 {
112         return bcm_readl(bcm_enet_shared_base[1] +
113                 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
114 }
115
116 static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
117                                        u32 val, u32 off, int chan)
118 {
119         bcm_writel(val, bcm_enet_shared_base[1] +
120                 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
121 }
122
123 static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
124 {
125         return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
126 }
127
128 static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
129                                        u32 val, u32 off, int chan)
130 {
131         bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
132 }
133
134 /*
135  * write given data into mii register and wait for transfer to end
136  * with timeout (average measured transfer time is 25us)
137  */
138 static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
139 {
140         int limit;
141
142         /* make sure mii interrupt status is cleared */
143         enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
144
145         enet_writel(priv, data, ENET_MIIDATA_REG);
146         wmb();
147
148         /* busy wait on mii interrupt bit, with timeout */
149         limit = 1000;
150         do {
151                 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
152                         break;
153                 udelay(1);
154         } while (limit-- > 0);
155
156         return (limit < 0) ? 1 : 0;
157 }
158
159 /*
160  * MII internal read callback
161  */
162 static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
163                               int regnum)
164 {
165         u32 tmp, val;
166
167         tmp = regnum << ENET_MIIDATA_REG_SHIFT;
168         tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
169         tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
170         tmp |= ENET_MIIDATA_OP_READ_MASK;
171
172         if (do_mdio_op(priv, tmp))
173                 return -1;
174
175         val = enet_readl(priv, ENET_MIIDATA_REG);
176         val &= 0xffff;
177         return val;
178 }
179
180 /*
181  * MII internal write callback
182  */
183 static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
184                                int regnum, u16 value)
185 {
186         u32 tmp;
187
188         tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
189         tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
190         tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
191         tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
192         tmp |= ENET_MIIDATA_OP_WRITE_MASK;
193
194         (void)do_mdio_op(priv, tmp);
195         return 0;
196 }
197
198 /*
199  * MII read callback from phylib
200  */
201 static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
202                                      int regnum)
203 {
204         return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
205 }
206
207 /*
208  * MII write callback from phylib
209  */
210 static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
211                                       int regnum, u16 value)
212 {
213         return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
214 }
215
216 /*
217  * MII read callback from mii core
218  */
219 static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
220                                   int regnum)
221 {
222         return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
223 }
224
225 /*
226  * MII write callback from mii core
227  */
228 static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
229                                     int regnum, int value)
230 {
231         bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
232 }
233
234 /*
235  * refill rx queue
236  */
237 static int bcm_enet_refill_rx(struct net_device *dev)
238 {
239         struct bcm_enet_priv *priv;
240
241         priv = netdev_priv(dev);
242
243         while (priv->rx_desc_count < priv->rx_ring_size) {
244                 struct bcm_enet_desc *desc;
245                 struct sk_buff *skb;
246                 dma_addr_t p;
247                 int desc_idx;
248                 u32 len_stat;
249
250                 desc_idx = priv->rx_dirty_desc;
251                 desc = &priv->rx_desc_cpu[desc_idx];
252
253                 if (!priv->rx_skb[desc_idx]) {
254                         skb = netdev_alloc_skb(dev, priv->rx_skb_size);
255                         if (!skb)
256                                 break;
257                         priv->rx_skb[desc_idx] = skb;
258                         p = dma_map_single(&priv->pdev->dev, skb->data,
259                                            priv->rx_skb_size,
260                                            DMA_FROM_DEVICE);
261                         desc->address = p;
262                 }
263
264                 len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
265                 len_stat |= DMADESC_OWNER_MASK;
266                 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
267                         len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
268                         priv->rx_dirty_desc = 0;
269                 } else {
270                         priv->rx_dirty_desc++;
271                 }
272                 wmb();
273                 desc->len_stat = len_stat;
274
275                 priv->rx_desc_count++;
276
277                 /* tell dma engine we allocated one buffer */
278                 if (priv->dma_has_sram)
279                         enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
280                 else
281                         enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
282         }
283
284         /* If rx ring is still empty, set a timer to try allocating
285          * again at a later time. */
286         if (priv->rx_desc_count == 0 && netif_running(dev)) {
287                 dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
288                 priv->rx_timeout.expires = jiffies + HZ;
289                 add_timer(&priv->rx_timeout);
290         }
291
292         return 0;
293 }
294
295 /*
296  * timer callback to defer refill rx queue in case we're OOM
297  */
298 static void bcm_enet_refill_rx_timer(unsigned long data)
299 {
300         struct net_device *dev;
301         struct bcm_enet_priv *priv;
302
303         dev = (struct net_device *)data;
304         priv = netdev_priv(dev);
305
306         spin_lock(&priv->rx_lock);
307         bcm_enet_refill_rx((struct net_device *)data);
308         spin_unlock(&priv->rx_lock);
309 }
310
311 /*
312  * extract packet from rx queue
313  */
314 static int bcm_enet_receive_queue(struct net_device *dev, int budget)
315 {
316         struct bcm_enet_priv *priv;
317         struct device *kdev;
318         int processed;
319
320         priv = netdev_priv(dev);
321         kdev = &priv->pdev->dev;
322         processed = 0;
323
324         /* don't scan ring further than number of refilled
325          * descriptor */
326         if (budget > priv->rx_desc_count)
327                 budget = priv->rx_desc_count;
328
329         do {
330                 struct bcm_enet_desc *desc;
331                 struct sk_buff *skb;
332                 int desc_idx;
333                 u32 len_stat;
334                 unsigned int len;
335
336                 desc_idx = priv->rx_curr_desc;
337                 desc = &priv->rx_desc_cpu[desc_idx];
338
339                 /* make sure we actually read the descriptor status at
340                  * each loop */
341                 rmb();
342
343                 len_stat = desc->len_stat;
344
345                 /* break if dma ownership belongs to hw */
346                 if (len_stat & DMADESC_OWNER_MASK)
347                         break;
348
349                 processed++;
350                 priv->rx_curr_desc++;
351                 if (priv->rx_curr_desc == priv->rx_ring_size)
352                         priv->rx_curr_desc = 0;
353                 priv->rx_desc_count--;
354
355                 /* if the packet does not have start of packet _and_
356                  * end of packet flag set, then just recycle it */
357                 if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
358                         (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
359                         dev->stats.rx_dropped++;
360                         continue;
361                 }
362
363                 /* recycle packet if it's marked as bad */
364                 if (!priv->enet_is_sw &&
365                     unlikely(len_stat & DMADESC_ERR_MASK)) {
366                         dev->stats.rx_errors++;
367
368                         if (len_stat & DMADESC_OVSIZE_MASK)
369                                 dev->stats.rx_length_errors++;
370                         if (len_stat & DMADESC_CRC_MASK)
371                                 dev->stats.rx_crc_errors++;
372                         if (len_stat & DMADESC_UNDER_MASK)
373                                 dev->stats.rx_frame_errors++;
374                         if (len_stat & DMADESC_OV_MASK)
375                                 dev->stats.rx_fifo_errors++;
376                         continue;
377                 }
378
379                 /* valid packet */
380                 skb = priv->rx_skb[desc_idx];
381                 len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
382                 /* don't include FCS */
383                 len -= 4;
384
385                 if (len < copybreak) {
386                         struct sk_buff *nskb;
387
388                         nskb = netdev_alloc_skb_ip_align(dev, len);
389                         if (!nskb) {
390                                 /* forget packet, just rearm desc */
391                                 dev->stats.rx_dropped++;
392                                 continue;
393                         }
394
395                         dma_sync_single_for_cpu(kdev, desc->address,
396                                                 len, DMA_FROM_DEVICE);
397                         memcpy(nskb->data, skb->data, len);
398                         dma_sync_single_for_device(kdev, desc->address,
399                                                    len, DMA_FROM_DEVICE);
400                         skb = nskb;
401                 } else {
402                         dma_unmap_single(&priv->pdev->dev, desc->address,
403                                          priv->rx_skb_size, DMA_FROM_DEVICE);
404                         priv->rx_skb[desc_idx] = NULL;
405                 }
406
407                 skb_put(skb, len);
408                 skb->protocol = eth_type_trans(skb, dev);
409                 dev->stats.rx_packets++;
410                 dev->stats.rx_bytes += len;
411                 netif_receive_skb(skb);
412
413         } while (--budget > 0);
414
415         if (processed || !priv->rx_desc_count) {
416                 bcm_enet_refill_rx(dev);
417
418                 /* kick rx dma */
419                 enet_dmac_writel(priv, priv->dma_chan_en_mask,
420                                          ENETDMAC_CHANCFG, priv->rx_chan);
421         }
422
423         return processed;
424 }
425
426
427 /*
428  * try to or force reclaim of transmitted buffers
429  */
430 static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
431 {
432         struct bcm_enet_priv *priv;
433         int released;
434
435         priv = netdev_priv(dev);
436         released = 0;
437
438         while (priv->tx_desc_count < priv->tx_ring_size) {
439                 struct bcm_enet_desc *desc;
440                 struct sk_buff *skb;
441
442                 /* We run in a bh and fight against start_xmit, which
443                  * is called with bh disabled  */
444                 spin_lock(&priv->tx_lock);
445
446                 desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
447
448                 if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
449                         spin_unlock(&priv->tx_lock);
450                         break;
451                 }
452
453                 /* ensure other field of the descriptor were not read
454                  * before we checked ownership */
455                 rmb();
456
457                 skb = priv->tx_skb[priv->tx_dirty_desc];
458                 priv->tx_skb[priv->tx_dirty_desc] = NULL;
459                 dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
460                                  DMA_TO_DEVICE);
461
462                 priv->tx_dirty_desc++;
463                 if (priv->tx_dirty_desc == priv->tx_ring_size)
464                         priv->tx_dirty_desc = 0;
465                 priv->tx_desc_count++;
466
467                 spin_unlock(&priv->tx_lock);
468
469                 if (desc->len_stat & DMADESC_UNDER_MASK)
470                         dev->stats.tx_errors++;
471
472                 dev_kfree_skb(skb);
473                 released++;
474         }
475
476         if (netif_queue_stopped(dev) && released)
477                 netif_wake_queue(dev);
478
479         return released;
480 }
481
482 /*
483  * poll func, called by network core
484  */
485 static int bcm_enet_poll(struct napi_struct *napi, int budget)
486 {
487         struct bcm_enet_priv *priv;
488         struct net_device *dev;
489         int tx_work_done, rx_work_done;
490
491         priv = container_of(napi, struct bcm_enet_priv, napi);
492         dev = priv->net_dev;
493
494         /* ack interrupts */
495         enet_dmac_writel(priv, priv->dma_chan_int_mask,
496                          ENETDMAC_IR, priv->rx_chan);
497         enet_dmac_writel(priv, priv->dma_chan_int_mask,
498                          ENETDMAC_IR, priv->tx_chan);
499
500         /* reclaim sent skb */
501         tx_work_done = bcm_enet_tx_reclaim(dev, 0);
502
503         spin_lock(&priv->rx_lock);
504         rx_work_done = bcm_enet_receive_queue(dev, budget);
505         spin_unlock(&priv->rx_lock);
506
507         if (rx_work_done >= budget || tx_work_done > 0) {
508                 /* rx/tx queue is not yet empty/clean */
509                 return rx_work_done;
510         }
511
512         /* no more packet in rx/tx queue, remove device from poll
513          * queue */
514         napi_complete(napi);
515
516         /* restore rx/tx interrupt */
517         enet_dmac_writel(priv, priv->dma_chan_int_mask,
518                          ENETDMAC_IRMASK, priv->rx_chan);
519         enet_dmac_writel(priv, priv->dma_chan_int_mask,
520                          ENETDMAC_IRMASK, priv->tx_chan);
521
522         return rx_work_done;
523 }
524
525 /*
526  * mac interrupt handler
527  */
528 static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
529 {
530         struct net_device *dev;
531         struct bcm_enet_priv *priv;
532         u32 stat;
533
534         dev = dev_id;
535         priv = netdev_priv(dev);
536
537         stat = enet_readl(priv, ENET_IR_REG);
538         if (!(stat & ENET_IR_MIB))
539                 return IRQ_NONE;
540
541         /* clear & mask interrupt */
542         enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
543         enet_writel(priv, 0, ENET_IRMASK_REG);
544
545         /* read mib registers in workqueue */
546         schedule_work(&priv->mib_update_task);
547
548         return IRQ_HANDLED;
549 }
550
551 /*
552  * rx/tx dma interrupt handler
553  */
554 static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
555 {
556         struct net_device *dev;
557         struct bcm_enet_priv *priv;
558
559         dev = dev_id;
560         priv = netdev_priv(dev);
561
562         /* mask rx/tx interrupts */
563         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
564         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
565
566         napi_schedule(&priv->napi);
567
568         return IRQ_HANDLED;
569 }
570
571 /*
572  * tx request callback
573  */
574 static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
575 {
576         struct bcm_enet_priv *priv;
577         struct bcm_enet_desc *desc;
578         u32 len_stat;
579         int ret;
580
581         priv = netdev_priv(dev);
582
583         /* lock against tx reclaim */
584         spin_lock(&priv->tx_lock);
585
586         /* make sure  the tx hw queue  is not full,  should not happen
587          * since we stop queue before it's the case */
588         if (unlikely(!priv->tx_desc_count)) {
589                 netif_stop_queue(dev);
590                 dev_err(&priv->pdev->dev, "xmit called with no tx desc "
591                         "available?\n");
592                 ret = NETDEV_TX_BUSY;
593                 goto out_unlock;
594         }
595
596         /* pad small packets sent on a switch device */
597         if (priv->enet_is_sw && skb->len < 64) {
598                 int needed = 64 - skb->len;
599                 char *data;
600
601                 if (unlikely(skb_tailroom(skb) < needed)) {
602                         struct sk_buff *nskb;
603
604                         nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
605                         if (!nskb) {
606                                 ret = NETDEV_TX_BUSY;
607                                 goto out_unlock;
608                         }
609                         dev_kfree_skb(skb);
610                         skb = nskb;
611                 }
612                 data = skb_put(skb, needed);
613                 memset(data, 0, needed);
614         }
615
616         /* point to the next available desc */
617         desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
618         priv->tx_skb[priv->tx_curr_desc] = skb;
619
620         /* fill descriptor */
621         desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
622                                        DMA_TO_DEVICE);
623
624         len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
625         len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
626                 DMADESC_APPEND_CRC |
627                 DMADESC_OWNER_MASK;
628
629         priv->tx_curr_desc++;
630         if (priv->tx_curr_desc == priv->tx_ring_size) {
631                 priv->tx_curr_desc = 0;
632                 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
633         }
634         priv->tx_desc_count--;
635
636         /* dma might be already polling, make sure we update desc
637          * fields in correct order */
638         wmb();
639         desc->len_stat = len_stat;
640         wmb();
641
642         /* kick tx dma */
643         enet_dmac_writel(priv, priv->dma_chan_en_mask,
644                                  ENETDMAC_CHANCFG, priv->tx_chan);
645
646         /* stop queue if no more desc available */
647         if (!priv->tx_desc_count)
648                 netif_stop_queue(dev);
649
650         dev->stats.tx_bytes += skb->len;
651         dev->stats.tx_packets++;
652         ret = NETDEV_TX_OK;
653
654 out_unlock:
655         spin_unlock(&priv->tx_lock);
656         return ret;
657 }
658
659 /*
660  * Change the interface's mac address.
661  */
662 static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
663 {
664         struct bcm_enet_priv *priv;
665         struct sockaddr *addr = p;
666         u32 val;
667
668         priv = netdev_priv(dev);
669         memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
670
671         /* use perfect match register 0 to store my mac address */
672         val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
673                 (dev->dev_addr[4] << 8) | dev->dev_addr[5];
674         enet_writel(priv, val, ENET_PML_REG(0));
675
676         val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
677         val |= ENET_PMH_DATAVALID_MASK;
678         enet_writel(priv, val, ENET_PMH_REG(0));
679
680         return 0;
681 }
682
683 /*
684  * Change rx mode (promiscuous/allmulti) and update multicast list
685  */
686 static void bcm_enet_set_multicast_list(struct net_device *dev)
687 {
688         struct bcm_enet_priv *priv;
689         struct netdev_hw_addr *ha;
690         u32 val;
691         int i;
692
693         priv = netdev_priv(dev);
694
695         val = enet_readl(priv, ENET_RXCFG_REG);
696
697         if (dev->flags & IFF_PROMISC)
698                 val |= ENET_RXCFG_PROMISC_MASK;
699         else
700                 val &= ~ENET_RXCFG_PROMISC_MASK;
701
702         /* only 3 perfect match registers left, first one is used for
703          * own mac address */
704         if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
705                 val |= ENET_RXCFG_ALLMCAST_MASK;
706         else
707                 val &= ~ENET_RXCFG_ALLMCAST_MASK;
708
709         /* no need to set perfect match registers if we catch all
710          * multicast */
711         if (val & ENET_RXCFG_ALLMCAST_MASK) {
712                 enet_writel(priv, val, ENET_RXCFG_REG);
713                 return;
714         }
715
716         i = 0;
717         netdev_for_each_mc_addr(ha, dev) {
718                 u8 *dmi_addr;
719                 u32 tmp;
720
721                 if (i == 3)
722                         break;
723                 /* update perfect match registers */
724                 dmi_addr = ha->addr;
725                 tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
726                         (dmi_addr[4] << 8) | dmi_addr[5];
727                 enet_writel(priv, tmp, ENET_PML_REG(i + 1));
728
729                 tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
730                 tmp |= ENET_PMH_DATAVALID_MASK;
731                 enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
732         }
733
734         for (; i < 3; i++) {
735                 enet_writel(priv, 0, ENET_PML_REG(i + 1));
736                 enet_writel(priv, 0, ENET_PMH_REG(i + 1));
737         }
738
739         enet_writel(priv, val, ENET_RXCFG_REG);
740 }
741
742 /*
743  * set mac duplex parameters
744  */
745 static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
746 {
747         u32 val;
748
749         val = enet_readl(priv, ENET_TXCTL_REG);
750         if (fullduplex)
751                 val |= ENET_TXCTL_FD_MASK;
752         else
753                 val &= ~ENET_TXCTL_FD_MASK;
754         enet_writel(priv, val, ENET_TXCTL_REG);
755 }
756
757 /*
758  * set mac flow control parameters
759  */
760 static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
761 {
762         u32 val;
763
764         /* rx flow control (pause frame handling) */
765         val = enet_readl(priv, ENET_RXCFG_REG);
766         if (rx_en)
767                 val |= ENET_RXCFG_ENFLOW_MASK;
768         else
769                 val &= ~ENET_RXCFG_ENFLOW_MASK;
770         enet_writel(priv, val, ENET_RXCFG_REG);
771
772         if (!priv->dma_has_sram)
773                 return;
774
775         /* tx flow control (pause frame generation) */
776         val = enet_dma_readl(priv, ENETDMA_CFG_REG);
777         if (tx_en)
778                 val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
779         else
780                 val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
781         enet_dma_writel(priv, val, ENETDMA_CFG_REG);
782 }
783
784 /*
785  * link changed callback (from phylib)
786  */
787 static void bcm_enet_adjust_phy_link(struct net_device *dev)
788 {
789         struct bcm_enet_priv *priv;
790         struct phy_device *phydev;
791         int status_changed;
792
793         priv = netdev_priv(dev);
794         phydev = priv->phydev;
795         status_changed = 0;
796
797         if (priv->old_link != phydev->link) {
798                 status_changed = 1;
799                 priv->old_link = phydev->link;
800         }
801
802         /* reflect duplex change in mac configuration */
803         if (phydev->link && phydev->duplex != priv->old_duplex) {
804                 bcm_enet_set_duplex(priv,
805                                     (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
806                 status_changed = 1;
807                 priv->old_duplex = phydev->duplex;
808         }
809
810         /* enable flow control if remote advertise it (trust phylib to
811          * check that duplex is full */
812         if (phydev->link && phydev->pause != priv->old_pause) {
813                 int rx_pause_en, tx_pause_en;
814
815                 if (phydev->pause) {
816                         /* pause was advertised by lpa and us */
817                         rx_pause_en = 1;
818                         tx_pause_en = 1;
819                 } else if (!priv->pause_auto) {
820                         /* pause setting overrided by user */
821                         rx_pause_en = priv->pause_rx;
822                         tx_pause_en = priv->pause_tx;
823                 } else {
824                         rx_pause_en = 0;
825                         tx_pause_en = 0;
826                 }
827
828                 bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
829                 status_changed = 1;
830                 priv->old_pause = phydev->pause;
831         }
832
833         if (status_changed) {
834                 pr_info("%s: link %s", dev->name, phydev->link ?
835                         "UP" : "DOWN");
836                 if (phydev->link)
837                         pr_cont(" - %d/%s - flow control %s", phydev->speed,
838                                DUPLEX_FULL == phydev->duplex ? "full" : "half",
839                                phydev->pause == 1 ? "rx&tx" : "off");
840
841                 pr_cont("\n");
842         }
843 }
844
845 /*
846  * link changed callback (if phylib is not used)
847  */
848 static void bcm_enet_adjust_link(struct net_device *dev)
849 {
850         struct bcm_enet_priv *priv;
851
852         priv = netdev_priv(dev);
853         bcm_enet_set_duplex(priv, priv->force_duplex_full);
854         bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
855         netif_carrier_on(dev);
856
857         pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
858                 dev->name,
859                 priv->force_speed_100 ? 100 : 10,
860                 priv->force_duplex_full ? "full" : "half",
861                 priv->pause_rx ? "rx" : "off",
862                 priv->pause_tx ? "tx" : "off");
863 }
864
865 /*
866  * open callback, allocate dma rings & buffers and start rx operation
867  */
868 static int bcm_enet_open(struct net_device *dev)
869 {
870         struct bcm_enet_priv *priv;
871         struct sockaddr addr;
872         struct device *kdev;
873         struct phy_device *phydev;
874         int i, ret;
875         unsigned int size;
876         char phy_id[MII_BUS_ID_SIZE + 3];
877         void *p;
878         u32 val;
879
880         priv = netdev_priv(dev);
881         kdev = &priv->pdev->dev;
882
883         if (priv->has_phy) {
884                 /* connect to PHY */
885                 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
886                          priv->mii_bus->id, priv->phy_id);
887
888                 phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
889                                      PHY_INTERFACE_MODE_MII);
890
891                 if (IS_ERR(phydev)) {
892                         dev_err(kdev, "could not attach to PHY\n");
893                         return PTR_ERR(phydev);
894                 }
895
896                 /* mask with MAC supported features */
897                 phydev->supported &= (SUPPORTED_10baseT_Half |
898                                       SUPPORTED_10baseT_Full |
899                                       SUPPORTED_100baseT_Half |
900                                       SUPPORTED_100baseT_Full |
901                                       SUPPORTED_Autoneg |
902                                       SUPPORTED_Pause |
903                                       SUPPORTED_MII);
904                 phydev->advertising = phydev->supported;
905
906                 if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
907                         phydev->advertising |= SUPPORTED_Pause;
908                 else
909                         phydev->advertising &= ~SUPPORTED_Pause;
910
911                 dev_info(kdev, "attached PHY at address %d [%s]\n",
912                          phydev->addr, phydev->drv->name);
913
914                 priv->old_link = 0;
915                 priv->old_duplex = -1;
916                 priv->old_pause = -1;
917                 priv->phydev = phydev;
918         }
919
920         /* mask all interrupts and request them */
921         enet_writel(priv, 0, ENET_IRMASK_REG);
922         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
923         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
924
925         ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
926         if (ret)
927                 goto out_phy_disconnect;
928
929         ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED,
930                           dev->name, dev);
931         if (ret)
932                 goto out_freeirq;
933
934         ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
935                           IRQF_DISABLED, dev->name, dev);
936         if (ret)
937                 goto out_freeirq_rx;
938
939         /* initialize perfect match registers */
940         for (i = 0; i < 4; i++) {
941                 enet_writel(priv, 0, ENET_PML_REG(i));
942                 enet_writel(priv, 0, ENET_PMH_REG(i));
943         }
944
945         /* write device mac address */
946         memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
947         bcm_enet_set_mac_address(dev, &addr);
948
949         /* allocate rx dma ring */
950         size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
951         p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma,
952                                GFP_KERNEL | __GFP_ZERO);
953         if (!p) {
954                 ret = -ENOMEM;
955                 goto out_freeirq_tx;
956         }
957
958         priv->rx_desc_alloc_size = size;
959         priv->rx_desc_cpu = p;
960
961         /* allocate tx dma ring */
962         size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
963         p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma,
964                                GFP_KERNEL | __GFP_ZERO);
965         if (!p) {
966                 ret = -ENOMEM;
967                 goto out_free_rx_ring;
968         }
969
970         priv->tx_desc_alloc_size = size;
971         priv->tx_desc_cpu = p;
972
973         priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
974                                GFP_KERNEL);
975         if (!priv->tx_skb) {
976                 ret = -ENOMEM;
977                 goto out_free_tx_ring;
978         }
979
980         priv->tx_desc_count = priv->tx_ring_size;
981         priv->tx_dirty_desc = 0;
982         priv->tx_curr_desc = 0;
983         spin_lock_init(&priv->tx_lock);
984
985         /* init & fill rx ring with skbs */
986         priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
987                                GFP_KERNEL);
988         if (!priv->rx_skb) {
989                 ret = -ENOMEM;
990                 goto out_free_tx_skb;
991         }
992
993         priv->rx_desc_count = 0;
994         priv->rx_dirty_desc = 0;
995         priv->rx_curr_desc = 0;
996
997         /* initialize flow control buffer allocation */
998         if (priv->dma_has_sram)
999                 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1000                                 ENETDMA_BUFALLOC_REG(priv->rx_chan));
1001         else
1002                 enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1003                                 ENETDMAC_BUFALLOC, priv->rx_chan);
1004
1005         if (bcm_enet_refill_rx(dev)) {
1006                 dev_err(kdev, "cannot allocate rx skb queue\n");
1007                 ret = -ENOMEM;
1008                 goto out;
1009         }
1010
1011         /* write rx & tx ring addresses */
1012         if (priv->dma_has_sram) {
1013                 enet_dmas_writel(priv, priv->rx_desc_dma,
1014                                  ENETDMAS_RSTART_REG, priv->rx_chan);
1015                 enet_dmas_writel(priv, priv->tx_desc_dma,
1016                          ENETDMAS_RSTART_REG, priv->tx_chan);
1017         } else {
1018                 enet_dmac_writel(priv, priv->rx_desc_dma,
1019                                 ENETDMAC_RSTART, priv->rx_chan);
1020                 enet_dmac_writel(priv, priv->tx_desc_dma,
1021                                 ENETDMAC_RSTART, priv->tx_chan);
1022         }
1023
1024         /* clear remaining state ram for rx & tx channel */
1025         if (priv->dma_has_sram) {
1026                 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
1027                 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
1028                 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
1029                 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
1030                 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
1031                 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
1032         } else {
1033                 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
1034                 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
1035         }
1036
1037         /* set max rx/tx length */
1038         enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
1039         enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
1040
1041         /* set dma maximum burst len */
1042         enet_dmac_writel(priv, priv->dma_maxburst,
1043                          ENETDMAC_MAXBURST, priv->rx_chan);
1044         enet_dmac_writel(priv, priv->dma_maxburst,
1045                          ENETDMAC_MAXBURST, priv->tx_chan);
1046
1047         /* set correct transmit fifo watermark */
1048         enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1049
1050         /* set flow control low/high threshold to 1/3 / 2/3 */
1051         if (priv->dma_has_sram) {
1052                 val = priv->rx_ring_size / 3;
1053                 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1054                 val = (priv->rx_ring_size * 2) / 3;
1055                 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1056         } else {
1057                 enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
1058                 enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
1059                 enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
1060         }
1061
1062         /* all set, enable mac and interrupts, start dma engine and
1063          * kick rx dma channel */
1064         wmb();
1065         val = enet_readl(priv, ENET_CTL_REG);
1066         val |= ENET_CTL_ENABLE_MASK;
1067         enet_writel(priv, val, ENET_CTL_REG);
1068         enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1069         enet_dmac_writel(priv, priv->dma_chan_en_mask,
1070                          ENETDMAC_CHANCFG, priv->rx_chan);
1071
1072         /* watch "mib counters about to overflow" interrupt */
1073         enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1074         enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1075
1076         /* watch "packet transferred" interrupt in rx and tx */
1077         enet_dmac_writel(priv, priv->dma_chan_int_mask,
1078                          ENETDMAC_IR, priv->rx_chan);
1079         enet_dmac_writel(priv, priv->dma_chan_int_mask,
1080                          ENETDMAC_IR, priv->tx_chan);
1081
1082         /* make sure we enable napi before rx interrupt  */
1083         napi_enable(&priv->napi);
1084
1085         enet_dmac_writel(priv, priv->dma_chan_int_mask,
1086                          ENETDMAC_IRMASK, priv->rx_chan);
1087         enet_dmac_writel(priv, priv->dma_chan_int_mask,
1088                          ENETDMAC_IRMASK, priv->tx_chan);
1089
1090         if (priv->has_phy)
1091                 phy_start(priv->phydev);
1092         else
1093                 bcm_enet_adjust_link(dev);
1094
1095         netif_start_queue(dev);
1096         return 0;
1097
1098 out:
1099         for (i = 0; i < priv->rx_ring_size; i++) {
1100                 struct bcm_enet_desc *desc;
1101
1102                 if (!priv->rx_skb[i])
1103                         continue;
1104
1105                 desc = &priv->rx_desc_cpu[i];
1106                 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1107                                  DMA_FROM_DEVICE);
1108                 kfree_skb(priv->rx_skb[i]);
1109         }
1110         kfree(priv->rx_skb);
1111
1112 out_free_tx_skb:
1113         kfree(priv->tx_skb);
1114
1115 out_free_tx_ring:
1116         dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1117                           priv->tx_desc_cpu, priv->tx_desc_dma);
1118
1119 out_free_rx_ring:
1120         dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1121                           priv->rx_desc_cpu, priv->rx_desc_dma);
1122
1123 out_freeirq_tx:
1124         free_irq(priv->irq_tx, dev);
1125
1126 out_freeirq_rx:
1127         free_irq(priv->irq_rx, dev);
1128
1129 out_freeirq:
1130         free_irq(dev->irq, dev);
1131
1132 out_phy_disconnect:
1133         phy_disconnect(priv->phydev);
1134
1135         return ret;
1136 }
1137
1138 /*
1139  * disable mac
1140  */
1141 static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1142 {
1143         int limit;
1144         u32 val;
1145
1146         val = enet_readl(priv, ENET_CTL_REG);
1147         val |= ENET_CTL_DISABLE_MASK;
1148         enet_writel(priv, val, ENET_CTL_REG);
1149
1150         limit = 1000;
1151         do {
1152                 u32 val;
1153
1154                 val = enet_readl(priv, ENET_CTL_REG);
1155                 if (!(val & ENET_CTL_DISABLE_MASK))
1156                         break;
1157                 udelay(1);
1158         } while (limit--);
1159 }
1160
1161 /*
1162  * disable dma in given channel
1163  */
1164 static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1165 {
1166         int limit;
1167
1168         enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
1169
1170         limit = 1000;
1171         do {
1172                 u32 val;
1173
1174                 val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
1175                 if (!(val & ENETDMAC_CHANCFG_EN_MASK))
1176                         break;
1177                 udelay(1);
1178         } while (limit--);
1179 }
1180
1181 /*
1182  * stop callback
1183  */
1184 static int bcm_enet_stop(struct net_device *dev)
1185 {
1186         struct bcm_enet_priv *priv;
1187         struct device *kdev;
1188         int i;
1189
1190         priv = netdev_priv(dev);
1191         kdev = &priv->pdev->dev;
1192
1193         netif_stop_queue(dev);
1194         napi_disable(&priv->napi);
1195         if (priv->has_phy)
1196                 phy_stop(priv->phydev);
1197         del_timer_sync(&priv->rx_timeout);
1198
1199         /* mask all interrupts */
1200         enet_writel(priv, 0, ENET_IRMASK_REG);
1201         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
1202         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
1203
1204         /* make sure no mib update is scheduled */
1205         cancel_work_sync(&priv->mib_update_task);
1206
1207         /* disable dma & mac */
1208         bcm_enet_disable_dma(priv, priv->tx_chan);
1209         bcm_enet_disable_dma(priv, priv->rx_chan);
1210         bcm_enet_disable_mac(priv);
1211
1212         /* force reclaim of all tx buffers */
1213         bcm_enet_tx_reclaim(dev, 1);
1214
1215         /* free the rx skb ring */
1216         for (i = 0; i < priv->rx_ring_size; i++) {
1217                 struct bcm_enet_desc *desc;
1218
1219                 if (!priv->rx_skb[i])
1220                         continue;
1221
1222                 desc = &priv->rx_desc_cpu[i];
1223                 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1224                                  DMA_FROM_DEVICE);
1225                 kfree_skb(priv->rx_skb[i]);
1226         }
1227
1228         /* free remaining allocated memory */
1229         kfree(priv->rx_skb);
1230         kfree(priv->tx_skb);
1231         dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1232                           priv->rx_desc_cpu, priv->rx_desc_dma);
1233         dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1234                           priv->tx_desc_cpu, priv->tx_desc_dma);
1235         free_irq(priv->irq_tx, dev);
1236         free_irq(priv->irq_rx, dev);
1237         free_irq(dev->irq, dev);
1238
1239         /* release phy */
1240         if (priv->has_phy) {
1241                 phy_disconnect(priv->phydev);
1242                 priv->phydev = NULL;
1243         }
1244
1245         return 0;
1246 }
1247
1248 /*
1249  * ethtool callbacks
1250  */
1251 struct bcm_enet_stats {
1252         char stat_string[ETH_GSTRING_LEN];
1253         int sizeof_stat;
1254         int stat_offset;
1255         int mib_reg;
1256 };
1257
1258 #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m),             \
1259                      offsetof(struct bcm_enet_priv, m)
1260 #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m),          \
1261                      offsetof(struct net_device_stats, m)
1262
1263 static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1264         { "rx_packets", DEV_STAT(rx_packets), -1 },
1265         { "tx_packets", DEV_STAT(tx_packets), -1 },
1266         { "rx_bytes", DEV_STAT(rx_bytes), -1 },
1267         { "tx_bytes", DEV_STAT(tx_bytes), -1 },
1268         { "rx_errors", DEV_STAT(rx_errors), -1 },
1269         { "tx_errors", DEV_STAT(tx_errors), -1 },
1270         { "rx_dropped", DEV_STAT(rx_dropped), -1 },
1271         { "tx_dropped", DEV_STAT(tx_dropped), -1 },
1272
1273         { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1274         { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1275         { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1276         { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1277         { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1278         { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1279         { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1280         { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1281         { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1282         { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1283         { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1284         { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1285         { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1286         { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1287         { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1288         { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1289         { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1290         { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1291         { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1292         { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1293         { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1294
1295         { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1296         { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1297         { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1298         { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1299         { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1300         { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1301         { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1302         { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1303         { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1304         { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1305         { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1306         { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1307         { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1308         { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1309         { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1310         { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1311         { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1312         { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1313         { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1314         { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1315         { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1316         { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1317
1318 };
1319
1320 #define BCM_ENET_STATS_LEN      \
1321         (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
1322
1323 static const u32 unused_mib_regs[] = {
1324         ETH_MIB_TX_ALL_OCTETS,
1325         ETH_MIB_TX_ALL_PKTS,
1326         ETH_MIB_RX_ALL_OCTETS,
1327         ETH_MIB_RX_ALL_PKTS,
1328 };
1329
1330
1331 static void bcm_enet_get_drvinfo(struct net_device *netdev,
1332                                  struct ethtool_drvinfo *drvinfo)
1333 {
1334         strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
1335         strlcpy(drvinfo->version, bcm_enet_driver_version,
1336                 sizeof(drvinfo->version));
1337         strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
1338         strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
1339         drvinfo->n_stats = BCM_ENET_STATS_LEN;
1340 }
1341
1342 static int bcm_enet_get_sset_count(struct net_device *netdev,
1343                                         int string_set)
1344 {
1345         switch (string_set) {
1346         case ETH_SS_STATS:
1347                 return BCM_ENET_STATS_LEN;
1348         default:
1349                 return -EINVAL;
1350         }
1351 }
1352
1353 static void bcm_enet_get_strings(struct net_device *netdev,
1354                                  u32 stringset, u8 *data)
1355 {
1356         int i;
1357
1358         switch (stringset) {
1359         case ETH_SS_STATS:
1360                 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1361                         memcpy(data + i * ETH_GSTRING_LEN,
1362                                bcm_enet_gstrings_stats[i].stat_string,
1363                                ETH_GSTRING_LEN);
1364                 }
1365                 break;
1366         }
1367 }
1368
1369 static void update_mib_counters(struct bcm_enet_priv *priv)
1370 {
1371         int i;
1372
1373         for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1374                 const struct bcm_enet_stats *s;
1375                 u32 val;
1376                 char *p;
1377
1378                 s = &bcm_enet_gstrings_stats[i];
1379                 if (s->mib_reg == -1)
1380                         continue;
1381
1382                 val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1383                 p = (char *)priv + s->stat_offset;
1384
1385                 if (s->sizeof_stat == sizeof(u64))
1386                         *(u64 *)p += val;
1387                 else
1388                         *(u32 *)p += val;
1389         }
1390
1391         /* also empty unused mib counters to make sure mib counter
1392          * overflow interrupt is cleared */
1393         for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1394                 (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1395 }
1396
1397 static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1398 {
1399         struct bcm_enet_priv *priv;
1400
1401         priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1402         mutex_lock(&priv->mib_update_lock);
1403         update_mib_counters(priv);
1404         mutex_unlock(&priv->mib_update_lock);
1405
1406         /* reenable mib interrupt */
1407         if (netif_running(priv->net_dev))
1408                 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1409 }
1410
1411 static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1412                                        struct ethtool_stats *stats,
1413                                        u64 *data)
1414 {
1415         struct bcm_enet_priv *priv;
1416         int i;
1417
1418         priv = netdev_priv(netdev);
1419
1420         mutex_lock(&priv->mib_update_lock);
1421         update_mib_counters(priv);
1422
1423         for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1424                 const struct bcm_enet_stats *s;
1425                 char *p;
1426
1427                 s = &bcm_enet_gstrings_stats[i];
1428                 if (s->mib_reg == -1)
1429                         p = (char *)&netdev->stats;
1430                 else
1431                         p = (char *)priv;
1432                 p += s->stat_offset;
1433                 data[i] = (s->sizeof_stat == sizeof(u64)) ?
1434                         *(u64 *)p : *(u32 *)p;
1435         }
1436         mutex_unlock(&priv->mib_update_lock);
1437 }
1438
1439 static int bcm_enet_nway_reset(struct net_device *dev)
1440 {
1441         struct bcm_enet_priv *priv;
1442
1443         priv = netdev_priv(dev);
1444         if (priv->has_phy) {
1445                 if (!priv->phydev)
1446                         return -ENODEV;
1447                 return genphy_restart_aneg(priv->phydev);
1448         }
1449
1450         return -EOPNOTSUPP;
1451 }
1452
1453 static int bcm_enet_get_settings(struct net_device *dev,
1454                                  struct ethtool_cmd *cmd)
1455 {
1456         struct bcm_enet_priv *priv;
1457
1458         priv = netdev_priv(dev);
1459
1460         cmd->maxrxpkt = 0;
1461         cmd->maxtxpkt = 0;
1462
1463         if (priv->has_phy) {
1464                 if (!priv->phydev)
1465                         return -ENODEV;
1466                 return phy_ethtool_gset(priv->phydev, cmd);
1467         } else {
1468                 cmd->autoneg = 0;
1469                 ethtool_cmd_speed_set(cmd, ((priv->force_speed_100)
1470                                             ? SPEED_100 : SPEED_10));
1471                 cmd->duplex = (priv->force_duplex_full) ?
1472                         DUPLEX_FULL : DUPLEX_HALF;
1473                 cmd->supported = ADVERTISED_10baseT_Half  |
1474                         ADVERTISED_10baseT_Full |
1475                         ADVERTISED_100baseT_Half |
1476                         ADVERTISED_100baseT_Full;
1477                 cmd->advertising = 0;
1478                 cmd->port = PORT_MII;
1479                 cmd->transceiver = XCVR_EXTERNAL;
1480         }
1481         return 0;
1482 }
1483
1484 static int bcm_enet_set_settings(struct net_device *dev,
1485                                  struct ethtool_cmd *cmd)
1486 {
1487         struct bcm_enet_priv *priv;
1488
1489         priv = netdev_priv(dev);
1490         if (priv->has_phy) {
1491                 if (!priv->phydev)
1492                         return -ENODEV;
1493                 return phy_ethtool_sset(priv->phydev, cmd);
1494         } else {
1495
1496                 if (cmd->autoneg ||
1497                     (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
1498                     cmd->port != PORT_MII)
1499                         return -EINVAL;
1500
1501                 priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
1502                 priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
1503
1504                 if (netif_running(dev))
1505                         bcm_enet_adjust_link(dev);
1506                 return 0;
1507         }
1508 }
1509
1510 static void bcm_enet_get_ringparam(struct net_device *dev,
1511                                    struct ethtool_ringparam *ering)
1512 {
1513         struct bcm_enet_priv *priv;
1514
1515         priv = netdev_priv(dev);
1516
1517         /* rx/tx ring is actually only limited by memory */
1518         ering->rx_max_pending = 8192;
1519         ering->tx_max_pending = 8192;
1520         ering->rx_pending = priv->rx_ring_size;
1521         ering->tx_pending = priv->tx_ring_size;
1522 }
1523
1524 static int bcm_enet_set_ringparam(struct net_device *dev,
1525                                   struct ethtool_ringparam *ering)
1526 {
1527         struct bcm_enet_priv *priv;
1528         int was_running;
1529
1530         priv = netdev_priv(dev);
1531
1532         was_running = 0;
1533         if (netif_running(dev)) {
1534                 bcm_enet_stop(dev);
1535                 was_running = 1;
1536         }
1537
1538         priv->rx_ring_size = ering->rx_pending;
1539         priv->tx_ring_size = ering->tx_pending;
1540
1541         if (was_running) {
1542                 int err;
1543
1544                 err = bcm_enet_open(dev);
1545                 if (err)
1546                         dev_close(dev);
1547                 else
1548                         bcm_enet_set_multicast_list(dev);
1549         }
1550         return 0;
1551 }
1552
1553 static void bcm_enet_get_pauseparam(struct net_device *dev,
1554                                     struct ethtool_pauseparam *ecmd)
1555 {
1556         struct bcm_enet_priv *priv;
1557
1558         priv = netdev_priv(dev);
1559         ecmd->autoneg = priv->pause_auto;
1560         ecmd->rx_pause = priv->pause_rx;
1561         ecmd->tx_pause = priv->pause_tx;
1562 }
1563
1564 static int bcm_enet_set_pauseparam(struct net_device *dev,
1565                                    struct ethtool_pauseparam *ecmd)
1566 {
1567         struct bcm_enet_priv *priv;
1568
1569         priv = netdev_priv(dev);
1570
1571         if (priv->has_phy) {
1572                 if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1573                         /* asymetric pause mode not supported,
1574                          * actually possible but integrated PHY has RO
1575                          * asym_pause bit */
1576                         return -EINVAL;
1577                 }
1578         } else {
1579                 /* no pause autoneg on direct mii connection */
1580                 if (ecmd->autoneg)
1581                         return -EINVAL;
1582         }
1583
1584         priv->pause_auto = ecmd->autoneg;
1585         priv->pause_rx = ecmd->rx_pause;
1586         priv->pause_tx = ecmd->tx_pause;
1587
1588         return 0;
1589 }
1590
1591 static const struct ethtool_ops bcm_enet_ethtool_ops = {
1592         .get_strings            = bcm_enet_get_strings,
1593         .get_sset_count         = bcm_enet_get_sset_count,
1594         .get_ethtool_stats      = bcm_enet_get_ethtool_stats,
1595         .nway_reset             = bcm_enet_nway_reset,
1596         .get_settings           = bcm_enet_get_settings,
1597         .set_settings           = bcm_enet_set_settings,
1598         .get_drvinfo            = bcm_enet_get_drvinfo,
1599         .get_link               = ethtool_op_get_link,
1600         .get_ringparam          = bcm_enet_get_ringparam,
1601         .set_ringparam          = bcm_enet_set_ringparam,
1602         .get_pauseparam         = bcm_enet_get_pauseparam,
1603         .set_pauseparam         = bcm_enet_set_pauseparam,
1604 };
1605
1606 static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1607 {
1608         struct bcm_enet_priv *priv;
1609
1610         priv = netdev_priv(dev);
1611         if (priv->has_phy) {
1612                 if (!priv->phydev)
1613                         return -ENODEV;
1614                 return phy_mii_ioctl(priv->phydev, rq, cmd);
1615         } else {
1616                 struct mii_if_info mii;
1617
1618                 mii.dev = dev;
1619                 mii.mdio_read = bcm_enet_mdio_read_mii;
1620                 mii.mdio_write = bcm_enet_mdio_write_mii;
1621                 mii.phy_id = 0;
1622                 mii.phy_id_mask = 0x3f;
1623                 mii.reg_num_mask = 0x1f;
1624                 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1625         }
1626 }
1627
1628 /*
1629  * calculate actual hardware mtu
1630  */
1631 static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
1632 {
1633         int actual_mtu;
1634
1635         actual_mtu = mtu;
1636
1637         /* add ethernet header + vlan tag size */
1638         actual_mtu += VLAN_ETH_HLEN;
1639
1640         if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU)
1641                 return -EINVAL;
1642
1643         /*
1644          * setup maximum size before we get overflow mark in
1645          * descriptor, note that this will not prevent reception of
1646          * big frames, they will be split into multiple buffers
1647          * anyway
1648          */
1649         priv->hw_mtu = actual_mtu;
1650
1651         /*
1652          * align rx buffer size to dma burst len, account FCS since
1653          * it's appended
1654          */
1655         priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1656                                   priv->dma_maxburst * 4);
1657         return 0;
1658 }
1659
1660 /*
1661  * adjust mtu, can't be called while device is running
1662  */
1663 static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
1664 {
1665         int ret;
1666
1667         if (netif_running(dev))
1668                 return -EBUSY;
1669
1670         ret = compute_hw_mtu(netdev_priv(dev), new_mtu);
1671         if (ret)
1672                 return ret;
1673         dev->mtu = new_mtu;
1674         return 0;
1675 }
1676
1677 /*
1678  * preinit hardware to allow mii operation while device is down
1679  */
1680 static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1681 {
1682         u32 val;
1683         int limit;
1684
1685         /* make sure mac is disabled */
1686         bcm_enet_disable_mac(priv);
1687
1688         /* soft reset mac */
1689         val = ENET_CTL_SRESET_MASK;
1690         enet_writel(priv, val, ENET_CTL_REG);
1691         wmb();
1692
1693         limit = 1000;
1694         do {
1695                 val = enet_readl(priv, ENET_CTL_REG);
1696                 if (!(val & ENET_CTL_SRESET_MASK))
1697                         break;
1698                 udelay(1);
1699         } while (limit--);
1700
1701         /* select correct mii interface */
1702         val = enet_readl(priv, ENET_CTL_REG);
1703         if (priv->use_external_mii)
1704                 val |= ENET_CTL_EPHYSEL_MASK;
1705         else
1706                 val &= ~ENET_CTL_EPHYSEL_MASK;
1707         enet_writel(priv, val, ENET_CTL_REG);
1708
1709         /* turn on mdc clock */
1710         enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1711                     ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1712
1713         /* set mib counters to self-clear when read */
1714         val = enet_readl(priv, ENET_MIBCTL_REG);
1715         val |= ENET_MIBCTL_RDCLEAR_MASK;
1716         enet_writel(priv, val, ENET_MIBCTL_REG);
1717 }
1718
1719 static const struct net_device_ops bcm_enet_ops = {
1720         .ndo_open               = bcm_enet_open,
1721         .ndo_stop               = bcm_enet_stop,
1722         .ndo_start_xmit         = bcm_enet_start_xmit,
1723         .ndo_set_mac_address    = bcm_enet_set_mac_address,
1724         .ndo_set_rx_mode        = bcm_enet_set_multicast_list,
1725         .ndo_do_ioctl           = bcm_enet_ioctl,
1726         .ndo_change_mtu         = bcm_enet_change_mtu,
1727 #ifdef CONFIG_NET_POLL_CONTROLLER
1728         .ndo_poll_controller = bcm_enet_netpoll,
1729 #endif
1730 };
1731
1732 /*
1733  * allocate netdevice, request register memory and register device.
1734  */
1735 static int bcm_enet_probe(struct platform_device *pdev)
1736 {
1737         struct bcm_enet_priv *priv;
1738         struct net_device *dev;
1739         struct bcm63xx_enet_platform_data *pd;
1740         struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
1741         struct mii_bus *bus;
1742         const char *clk_name;
1743         int i, ret;
1744
1745         /* stop if shared driver failed, assume driver->probe will be
1746          * called in the same order we register devices (correct ?) */
1747         if (!bcm_enet_shared_base[0])
1748                 return -ENODEV;
1749
1750         res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1751         res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1752         res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1753         res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1754         if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
1755                 return -ENODEV;
1756
1757         ret = 0;
1758         dev = alloc_etherdev(sizeof(*priv));
1759         if (!dev)
1760                 return -ENOMEM;
1761         priv = netdev_priv(dev);
1762
1763         priv->enet_is_sw = false;
1764         priv->dma_maxburst = BCMENET_DMA_MAXBURST;
1765
1766         ret = compute_hw_mtu(priv, dev->mtu);
1767         if (ret)
1768                 goto out;
1769
1770         priv->base = devm_request_and_ioremap(&pdev->dev, res_mem);
1771         if (priv->base == NULL) {
1772                 ret = -ENOMEM;
1773                 goto out;
1774         }
1775
1776         dev->irq = priv->irq = res_irq->start;
1777         priv->irq_rx = res_irq_rx->start;
1778         priv->irq_tx = res_irq_tx->start;
1779         priv->mac_id = pdev->id;
1780
1781         /* get rx & tx dma channel id for this mac */
1782         if (priv->mac_id == 0) {
1783                 priv->rx_chan = 0;
1784                 priv->tx_chan = 1;
1785                 clk_name = "enet0";
1786         } else {
1787                 priv->rx_chan = 2;
1788                 priv->tx_chan = 3;
1789                 clk_name = "enet1";
1790         }
1791
1792         priv->mac_clk = clk_get(&pdev->dev, clk_name);
1793         if (IS_ERR(priv->mac_clk)) {
1794                 ret = PTR_ERR(priv->mac_clk);
1795                 goto out;
1796         }
1797         clk_prepare_enable(priv->mac_clk);
1798
1799         /* initialize default and fetch platform data */
1800         priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1801         priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1802
1803         pd = pdev->dev.platform_data;
1804         if (pd) {
1805                 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1806                 priv->has_phy = pd->has_phy;
1807                 priv->phy_id = pd->phy_id;
1808                 priv->has_phy_interrupt = pd->has_phy_interrupt;
1809                 priv->phy_interrupt = pd->phy_interrupt;
1810                 priv->use_external_mii = !pd->use_internal_phy;
1811                 priv->pause_auto = pd->pause_auto;
1812                 priv->pause_rx = pd->pause_rx;
1813                 priv->pause_tx = pd->pause_tx;
1814                 priv->force_duplex_full = pd->force_duplex_full;
1815                 priv->force_speed_100 = pd->force_speed_100;
1816                 priv->dma_chan_en_mask = pd->dma_chan_en_mask;
1817                 priv->dma_chan_int_mask = pd->dma_chan_int_mask;
1818                 priv->dma_chan_width = pd->dma_chan_width;
1819                 priv->dma_has_sram = pd->dma_has_sram;
1820                 priv->dma_desc_shift = pd->dma_desc_shift;
1821         }
1822
1823         if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
1824                 /* using internal PHY, enable clock */
1825                 priv->phy_clk = clk_get(&pdev->dev, "ephy");
1826                 if (IS_ERR(priv->phy_clk)) {
1827                         ret = PTR_ERR(priv->phy_clk);
1828                         priv->phy_clk = NULL;
1829                         goto out_put_clk_mac;
1830                 }
1831                 clk_prepare_enable(priv->phy_clk);
1832         }
1833
1834         /* do minimal hardware init to be able to probe mii bus */
1835         bcm_enet_hw_preinit(priv);
1836
1837         /* MII bus registration */
1838         if (priv->has_phy) {
1839
1840                 priv->mii_bus = mdiobus_alloc();
1841                 if (!priv->mii_bus) {
1842                         ret = -ENOMEM;
1843                         goto out_uninit_hw;
1844                 }
1845
1846                 bus = priv->mii_bus;
1847                 bus->name = "bcm63xx_enet MII bus";
1848                 bus->parent = &pdev->dev;
1849                 bus->priv = priv;
1850                 bus->read = bcm_enet_mdio_read_phylib;
1851                 bus->write = bcm_enet_mdio_write_phylib;
1852                 sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id);
1853
1854                 /* only probe bus where we think the PHY is, because
1855                  * the mdio read operation return 0 instead of 0xffff
1856                  * if a slave is not present on hw */
1857                 bus->phy_mask = ~(1 << priv->phy_id);
1858
1859                 bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR,
1860                                         GFP_KERNEL);
1861                 if (!bus->irq) {
1862                         ret = -ENOMEM;
1863                         goto out_free_mdio;
1864                 }
1865
1866                 if (priv->has_phy_interrupt)
1867                         bus->irq[priv->phy_id] = priv->phy_interrupt;
1868                 else
1869                         bus->irq[priv->phy_id] = PHY_POLL;
1870
1871                 ret = mdiobus_register(bus);
1872                 if (ret) {
1873                         dev_err(&pdev->dev, "unable to register mdio bus\n");
1874                         goto out_free_mdio;
1875                 }
1876         } else {
1877
1878                 /* run platform code to initialize PHY device */
1879                 if (pd->mii_config &&
1880                     pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1881                                    bcm_enet_mdio_write_mii)) {
1882                         dev_err(&pdev->dev, "unable to configure mdio bus\n");
1883                         goto out_uninit_hw;
1884                 }
1885         }
1886
1887         spin_lock_init(&priv->rx_lock);
1888
1889         /* init rx timeout (used for oom) */
1890         init_timer(&priv->rx_timeout);
1891         priv->rx_timeout.function = bcm_enet_refill_rx_timer;
1892         priv->rx_timeout.data = (unsigned long)dev;
1893
1894         /* init the mib update lock&work */
1895         mutex_init(&priv->mib_update_lock);
1896         INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1897
1898         /* zero mib counters */
1899         for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1900                 enet_writel(priv, 0, ENET_MIB_REG(i));
1901
1902         /* register netdevice */
1903         dev->netdev_ops = &bcm_enet_ops;
1904         netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1905
1906         SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
1907         SET_NETDEV_DEV(dev, &pdev->dev);
1908
1909         ret = register_netdev(dev);
1910         if (ret)
1911                 goto out_unregister_mdio;
1912
1913         netif_carrier_off(dev);
1914         platform_set_drvdata(pdev, dev);
1915         priv->pdev = pdev;
1916         priv->net_dev = dev;
1917
1918         return 0;
1919
1920 out_unregister_mdio:
1921         if (priv->mii_bus)
1922                 mdiobus_unregister(priv->mii_bus);
1923
1924 out_free_mdio:
1925         if (priv->mii_bus)
1926                 mdiobus_free(priv->mii_bus);
1927
1928 out_uninit_hw:
1929         /* turn off mdc clock */
1930         enet_writel(priv, 0, ENET_MIISC_REG);
1931         if (priv->phy_clk) {
1932                 clk_disable_unprepare(priv->phy_clk);
1933                 clk_put(priv->phy_clk);
1934         }
1935
1936 out_put_clk_mac:
1937         clk_disable_unprepare(priv->mac_clk);
1938         clk_put(priv->mac_clk);
1939 out:
1940         free_netdev(dev);
1941         return ret;
1942 }
1943
1944
1945 /*
1946  * exit func, stops hardware and unregisters netdevice
1947  */
1948 static int bcm_enet_remove(struct platform_device *pdev)
1949 {
1950         struct bcm_enet_priv *priv;
1951         struct net_device *dev;
1952
1953         /* stop netdevice */
1954         dev = platform_get_drvdata(pdev);
1955         priv = netdev_priv(dev);
1956         unregister_netdev(dev);
1957
1958         /* turn off mdc clock */
1959         enet_writel(priv, 0, ENET_MIISC_REG);
1960
1961         if (priv->has_phy) {
1962                 mdiobus_unregister(priv->mii_bus);
1963                 mdiobus_free(priv->mii_bus);
1964         } else {
1965                 struct bcm63xx_enet_platform_data *pd;
1966
1967                 pd = pdev->dev.platform_data;
1968                 if (pd && pd->mii_config)
1969                         pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1970                                        bcm_enet_mdio_write_mii);
1971         }
1972
1973         /* disable hw block clocks */
1974         if (priv->phy_clk) {
1975                 clk_disable_unprepare(priv->phy_clk);
1976                 clk_put(priv->phy_clk);
1977         }
1978         clk_disable_unprepare(priv->mac_clk);
1979         clk_put(priv->mac_clk);
1980
1981         free_netdev(dev);
1982         return 0;
1983 }
1984
1985 struct platform_driver bcm63xx_enet_driver = {
1986         .probe  = bcm_enet_probe,
1987         .remove = bcm_enet_remove,
1988         .driver = {
1989                 .name   = "bcm63xx_enet",
1990                 .owner  = THIS_MODULE,
1991         },
1992 };
1993
1994 /*
1995  * switch mii access callbacks
1996  */
1997 static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
1998                                 int ext, int phy_id, int location)
1999 {
2000         u32 reg;
2001         int ret;
2002
2003         spin_lock_bh(&priv->enetsw_mdio_lock);
2004         enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
2005
2006         reg = ENETSW_MDIOC_RD_MASK |
2007                 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
2008                 (location << ENETSW_MDIOC_REG_SHIFT);
2009
2010         if (ext)
2011                 reg |= ENETSW_MDIOC_EXT_MASK;
2012
2013         enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
2014         udelay(50);
2015         ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
2016         spin_unlock_bh(&priv->enetsw_mdio_lock);
2017         return ret;
2018 }
2019
2020 static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
2021                                  int ext, int phy_id, int location,
2022                                  uint16_t data)
2023 {
2024         u32 reg;
2025
2026         spin_lock_bh(&priv->enetsw_mdio_lock);
2027         enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
2028
2029         reg = ENETSW_MDIOC_WR_MASK |
2030                 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
2031                 (location << ENETSW_MDIOC_REG_SHIFT);
2032
2033         if (ext)
2034                 reg |= ENETSW_MDIOC_EXT_MASK;
2035
2036         reg |= data;
2037
2038         enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
2039         udelay(50);
2040         spin_unlock_bh(&priv->enetsw_mdio_lock);
2041 }
2042
2043 static inline int bcm_enet_port_is_rgmii(int portid)
2044 {
2045         return portid >= ENETSW_RGMII_PORT0;
2046 }
2047
2048 /*
2049  * enet sw PHY polling
2050  */
2051 static void swphy_poll_timer(unsigned long data)
2052 {
2053         struct bcm_enet_priv *priv = (struct bcm_enet_priv *)data;
2054         unsigned int i;
2055
2056         for (i = 0; i < priv->num_ports; i++) {
2057                 struct bcm63xx_enetsw_port *port;
2058                 int val, j, up, advertise, lpa, lpa2, speed, duplex, media;
2059                 int external_phy = bcm_enet_port_is_rgmii(i);
2060                 u8 override;
2061
2062                 port = &priv->used_ports[i];
2063                 if (!port->used)
2064                         continue;
2065
2066                 if (port->bypass_link)
2067                         continue;
2068
2069                 /* dummy read to clear */
2070                 for (j = 0; j < 2; j++)
2071                         val = bcmenet_sw_mdio_read(priv, external_phy,
2072                                                    port->phy_id, MII_BMSR);
2073
2074                 if (val == 0xffff)
2075                         continue;
2076
2077                 up = (val & BMSR_LSTATUS) ? 1 : 0;
2078                 if (!(up ^ priv->sw_port_link[i]))
2079                         continue;
2080
2081                 priv->sw_port_link[i] = up;
2082
2083                 /* link changed */
2084                 if (!up) {
2085                         dev_info(&priv->pdev->dev, "link DOWN on %s\n",
2086                                  port->name);
2087                         enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2088                                       ENETSW_PORTOV_REG(i));
2089                         enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2090                                       ENETSW_PTCTRL_TXDIS_MASK,
2091                                       ENETSW_PTCTRL_REG(i));
2092                         continue;
2093                 }
2094
2095                 advertise = bcmenet_sw_mdio_read(priv, external_phy,
2096                                                  port->phy_id, MII_ADVERTISE);
2097
2098                 lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2099                                            MII_LPA);
2100
2101                 lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2102                                             MII_STAT1000);
2103
2104                 /* figure out media and duplex from advertise and LPA values */
2105                 media = mii_nway_result(lpa & advertise);
2106                 duplex = (media & ADVERTISE_FULL) ? 1 : 0;
2107                 if (lpa2 & LPA_1000FULL)
2108                         duplex = 1;
2109
2110                 if (lpa2 & (LPA_1000FULL | LPA_1000HALF))
2111                         speed = 1000;
2112                 else {
2113                         if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
2114                                 speed = 100;
2115                         else
2116                                 speed = 10;
2117                 }
2118
2119                 dev_info(&priv->pdev->dev,
2120                          "link UP on %s, %dMbps, %s-duplex\n",
2121                          port->name, speed, duplex ? "full" : "half");
2122
2123                 override = ENETSW_PORTOV_ENABLE_MASK |
2124                         ENETSW_PORTOV_LINKUP_MASK;
2125
2126                 if (speed == 1000)
2127                         override |= ENETSW_IMPOV_1000_MASK;
2128                 else if (speed == 100)
2129                         override |= ENETSW_IMPOV_100_MASK;
2130                 if (duplex)
2131                         override |= ENETSW_IMPOV_FDX_MASK;
2132
2133                 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2134                 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2135         }
2136
2137         priv->swphy_poll.expires = jiffies + HZ;
2138         add_timer(&priv->swphy_poll);
2139 }
2140
2141 /*
2142  * open callback, allocate dma rings & buffers and start rx operation
2143  */
2144 static int bcm_enetsw_open(struct net_device *dev)
2145 {
2146         struct bcm_enet_priv *priv;
2147         struct device *kdev;
2148         int i, ret;
2149         unsigned int size;
2150         void *p;
2151         u32 val;
2152
2153         priv = netdev_priv(dev);
2154         kdev = &priv->pdev->dev;
2155
2156         /* mask all interrupts and request them */
2157         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2158         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2159
2160         ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2161                           IRQF_DISABLED, dev->name, dev);
2162         if (ret)
2163                 goto out_freeirq;
2164
2165         if (priv->irq_tx != -1) {
2166                 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2167                                   IRQF_DISABLED, dev->name, dev);
2168                 if (ret)
2169                         goto out_freeirq_rx;
2170         }
2171
2172         /* allocate rx dma ring */
2173         size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2174         p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2175         if (!p) {
2176                 dev_err(kdev, "cannot allocate rx ring %u\n", size);
2177                 ret = -ENOMEM;
2178                 goto out_freeirq_tx;
2179         }
2180
2181         memset(p, 0, size);
2182         priv->rx_desc_alloc_size = size;
2183         priv->rx_desc_cpu = p;
2184
2185         /* allocate tx dma ring */
2186         size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2187         p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2188         if (!p) {
2189                 dev_err(kdev, "cannot allocate tx ring\n");
2190                 ret = -ENOMEM;
2191                 goto out_free_rx_ring;
2192         }
2193
2194         memset(p, 0, size);
2195         priv->tx_desc_alloc_size = size;
2196         priv->tx_desc_cpu = p;
2197
2198         priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
2199                                GFP_KERNEL);
2200         if (!priv->tx_skb) {
2201                 dev_err(kdev, "cannot allocate rx skb queue\n");
2202                 ret = -ENOMEM;
2203                 goto out_free_tx_ring;
2204         }
2205
2206         priv->tx_desc_count = priv->tx_ring_size;
2207         priv->tx_dirty_desc = 0;
2208         priv->tx_curr_desc = 0;
2209         spin_lock_init(&priv->tx_lock);
2210
2211         /* init & fill rx ring with skbs */
2212         priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
2213                                GFP_KERNEL);
2214         if (!priv->rx_skb) {
2215                 dev_err(kdev, "cannot allocate rx skb queue\n");
2216                 ret = -ENOMEM;
2217                 goto out_free_tx_skb;
2218         }
2219
2220         priv->rx_desc_count = 0;
2221         priv->rx_dirty_desc = 0;
2222         priv->rx_curr_desc = 0;
2223
2224         /* disable all ports */
2225         for (i = 0; i < priv->num_ports; i++) {
2226                 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2227                               ENETSW_PORTOV_REG(i));
2228                 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2229                               ENETSW_PTCTRL_TXDIS_MASK,
2230                               ENETSW_PTCTRL_REG(i));
2231
2232                 priv->sw_port_link[i] = 0;
2233         }
2234
2235         /* reset mib */
2236         val = enetsw_readb(priv, ENETSW_GMCR_REG);
2237         val |= ENETSW_GMCR_RST_MIB_MASK;
2238         enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2239         mdelay(1);
2240         val &= ~ENETSW_GMCR_RST_MIB_MASK;
2241         enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2242         mdelay(1);
2243
2244         /* force CPU port state */
2245         val = enetsw_readb(priv, ENETSW_IMPOV_REG);
2246         val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
2247         enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
2248
2249         /* enable switch forward engine */
2250         val = enetsw_readb(priv, ENETSW_SWMODE_REG);
2251         val |= ENETSW_SWMODE_FWD_EN_MASK;
2252         enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
2253
2254         /* enable jumbo on all ports */
2255         enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
2256         enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
2257
2258         /* initialize flow control buffer allocation */
2259         enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
2260                         ENETDMA_BUFALLOC_REG(priv->rx_chan));
2261
2262         if (bcm_enet_refill_rx(dev)) {
2263                 dev_err(kdev, "cannot allocate rx skb queue\n");
2264                 ret = -ENOMEM;
2265                 goto out;
2266         }
2267
2268         /* write rx & tx ring addresses */
2269         enet_dmas_writel(priv, priv->rx_desc_dma,
2270                          ENETDMAS_RSTART_REG, priv->rx_chan);
2271         enet_dmas_writel(priv, priv->tx_desc_dma,
2272                          ENETDMAS_RSTART_REG, priv->tx_chan);
2273
2274         /* clear remaining state ram for rx & tx channel */
2275         enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
2276         enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
2277         enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
2278         enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
2279         enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
2280         enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
2281
2282         /* set dma maximum burst len */
2283         enet_dmac_writel(priv, priv->dma_maxburst,
2284                          ENETDMAC_MAXBURST, priv->rx_chan);
2285         enet_dmac_writel(priv, priv->dma_maxburst,
2286                          ENETDMAC_MAXBURST, priv->tx_chan);
2287
2288         /* set flow control low/high threshold to 1/3 / 2/3 */
2289         val = priv->rx_ring_size / 3;
2290         enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
2291         val = (priv->rx_ring_size * 2) / 3;
2292         enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
2293
2294         /* all set, enable mac and interrupts, start dma engine and
2295          * kick rx dma channel
2296          */
2297         wmb();
2298         enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2299         enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2300                          ENETDMAC_CHANCFG, priv->rx_chan);
2301
2302         /* watch "packet transferred" interrupt in rx and tx */
2303         enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2304                          ENETDMAC_IR, priv->rx_chan);
2305         enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2306                          ENETDMAC_IR, priv->tx_chan);
2307
2308         /* make sure we enable napi before rx interrupt  */
2309         napi_enable(&priv->napi);
2310
2311         enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2312                          ENETDMAC_IRMASK, priv->rx_chan);
2313         enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2314                          ENETDMAC_IRMASK, priv->tx_chan);
2315
2316         netif_carrier_on(dev);
2317         netif_start_queue(dev);
2318
2319         /* apply override config for bypass_link ports here. */
2320         for (i = 0; i < priv->num_ports; i++) {
2321                 struct bcm63xx_enetsw_port *port;
2322                 u8 override;
2323                 port = &priv->used_ports[i];
2324                 if (!port->used)
2325                         continue;
2326
2327                 if (!port->bypass_link)
2328                         continue;
2329
2330                 override = ENETSW_PORTOV_ENABLE_MASK |
2331                         ENETSW_PORTOV_LINKUP_MASK;
2332
2333                 switch (port->force_speed) {
2334                 case 1000:
2335                         override |= ENETSW_IMPOV_1000_MASK;
2336                         break;
2337                 case 100:
2338                         override |= ENETSW_IMPOV_100_MASK;
2339                         break;
2340                 case 10:
2341                         break;
2342                 default:
2343                         pr_warn("invalid forced speed on port %s: assume 10\n",
2344                                port->name);
2345                         break;
2346                 }
2347
2348                 if (port->force_duplex_full)
2349                         override |= ENETSW_IMPOV_FDX_MASK;
2350
2351
2352                 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2353                 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2354         }
2355
2356         /* start phy polling timer */
2357         init_timer(&priv->swphy_poll);
2358         priv->swphy_poll.function = swphy_poll_timer;
2359         priv->swphy_poll.data = (unsigned long)priv;
2360         priv->swphy_poll.expires = jiffies;
2361         add_timer(&priv->swphy_poll);
2362         return 0;
2363
2364 out:
2365         for (i = 0; i < priv->rx_ring_size; i++) {
2366                 struct bcm_enet_desc *desc;
2367
2368                 if (!priv->rx_skb[i])
2369                         continue;
2370
2371                 desc = &priv->rx_desc_cpu[i];
2372                 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2373                                  DMA_FROM_DEVICE);
2374                 kfree_skb(priv->rx_skb[i]);
2375         }
2376         kfree(priv->rx_skb);
2377
2378 out_free_tx_skb:
2379         kfree(priv->tx_skb);
2380
2381 out_free_tx_ring:
2382         dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2383                           priv->tx_desc_cpu, priv->tx_desc_dma);
2384
2385 out_free_rx_ring:
2386         dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2387                           priv->rx_desc_cpu, priv->rx_desc_dma);
2388
2389 out_freeirq_tx:
2390         if (priv->irq_tx != -1)
2391                 free_irq(priv->irq_tx, dev);
2392
2393 out_freeirq_rx:
2394         free_irq(priv->irq_rx, dev);
2395
2396 out_freeirq:
2397         return ret;
2398 }
2399
2400 /* stop callback */
2401 static int bcm_enetsw_stop(struct net_device *dev)
2402 {
2403         struct bcm_enet_priv *priv;
2404         struct device *kdev;
2405         int i;
2406
2407         priv = netdev_priv(dev);
2408         kdev = &priv->pdev->dev;
2409
2410         del_timer_sync(&priv->swphy_poll);
2411         netif_stop_queue(dev);
2412         napi_disable(&priv->napi);
2413         del_timer_sync(&priv->rx_timeout);
2414
2415         /* mask all interrupts */
2416         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2417         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2418
2419         /* disable dma & mac */
2420         bcm_enet_disable_dma(priv, priv->tx_chan);
2421         bcm_enet_disable_dma(priv, priv->rx_chan);
2422
2423         /* force reclaim of all tx buffers */
2424         bcm_enet_tx_reclaim(dev, 1);
2425
2426         /* free the rx skb ring */
2427         for (i = 0; i < priv->rx_ring_size; i++) {
2428                 struct bcm_enet_desc *desc;
2429
2430                 if (!priv->rx_skb[i])
2431                         continue;
2432
2433                 desc = &priv->rx_desc_cpu[i];
2434                 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2435                                  DMA_FROM_DEVICE);
2436                 kfree_skb(priv->rx_skb[i]);
2437         }
2438
2439         /* free remaining allocated memory */
2440         kfree(priv->rx_skb);
2441         kfree(priv->tx_skb);
2442         dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2443                           priv->rx_desc_cpu, priv->rx_desc_dma);
2444         dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2445                           priv->tx_desc_cpu, priv->tx_desc_dma);
2446         if (priv->irq_tx != -1)
2447                 free_irq(priv->irq_tx, dev);
2448         free_irq(priv->irq_rx, dev);
2449
2450         return 0;
2451 }
2452
2453 /* try to sort out phy external status by walking the used_port field
2454  * in the bcm_enet_priv structure. in case the phy address is not
2455  * assigned to any physical port on the switch, assume it is external
2456  * (and yell at the user).
2457  */
2458 static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
2459 {
2460         int i;
2461
2462         for (i = 0; i < priv->num_ports; ++i) {
2463                 if (!priv->used_ports[i].used)
2464                         continue;
2465                 if (priv->used_ports[i].phy_id == phy_id)
2466                         return bcm_enet_port_is_rgmii(i);
2467         }
2468
2469         printk_once(KERN_WARNING  "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
2470                     phy_id);
2471         return 1;
2472 }
2473
2474 /* can't use bcmenet_sw_mdio_read directly as we need to sort out
2475  * external/internal status of the given phy_id first.
2476  */
2477 static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
2478                                     int location)
2479 {
2480         struct bcm_enet_priv *priv;
2481
2482         priv = netdev_priv(dev);
2483         return bcmenet_sw_mdio_read(priv,
2484                                     bcm_enetsw_phy_is_external(priv, phy_id),
2485                                     phy_id, location);
2486 }
2487
2488 /* can't use bcmenet_sw_mdio_write directly as we need to sort out
2489  * external/internal status of the given phy_id first.
2490  */
2491 static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
2492                                       int location,
2493                                       int val)
2494 {
2495         struct bcm_enet_priv *priv;
2496
2497         priv = netdev_priv(dev);
2498         bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
2499                               phy_id, location, val);
2500 }
2501
2502 static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2503 {
2504         struct mii_if_info mii;
2505
2506         mii.dev = dev;
2507         mii.mdio_read = bcm_enetsw_mii_mdio_read;
2508         mii.mdio_write = bcm_enetsw_mii_mdio_write;
2509         mii.phy_id = 0;
2510         mii.phy_id_mask = 0x3f;
2511         mii.reg_num_mask = 0x1f;
2512         return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
2513
2514 }
2515
2516 static const struct net_device_ops bcm_enetsw_ops = {
2517         .ndo_open               = bcm_enetsw_open,
2518         .ndo_stop               = bcm_enetsw_stop,
2519         .ndo_start_xmit         = bcm_enet_start_xmit,
2520         .ndo_change_mtu         = bcm_enet_change_mtu,
2521         .ndo_do_ioctl           = bcm_enetsw_ioctl,
2522 };
2523
2524
2525 static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
2526         { "rx_packets", DEV_STAT(rx_packets), -1 },
2527         { "tx_packets", DEV_STAT(tx_packets), -1 },
2528         { "rx_bytes", DEV_STAT(rx_bytes), -1 },
2529         { "tx_bytes", DEV_STAT(tx_bytes), -1 },
2530         { "rx_errors", DEV_STAT(rx_errors), -1 },
2531         { "tx_errors", DEV_STAT(tx_errors), -1 },
2532         { "rx_dropped", DEV_STAT(rx_dropped), -1 },
2533         { "tx_dropped", DEV_STAT(tx_dropped), -1 },
2534
2535         { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
2536         { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
2537         { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
2538         { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
2539         { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
2540         { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
2541         { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
2542         { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
2543         { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
2544         { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
2545           ETHSW_MIB_RX_1024_1522 },
2546         { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
2547           ETHSW_MIB_RX_1523_2047 },
2548         { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
2549           ETHSW_MIB_RX_2048_4095 },
2550         { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
2551           ETHSW_MIB_RX_4096_8191 },
2552         { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
2553           ETHSW_MIB_RX_8192_9728 },
2554         { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
2555         { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
2556         { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
2557         { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
2558         { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
2559
2560         { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
2561         { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
2562         { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
2563         { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
2564         { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
2565         { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
2566
2567 };
2568
2569 #define BCM_ENETSW_STATS_LEN    \
2570         (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
2571
2572 static void bcm_enetsw_get_strings(struct net_device *netdev,
2573                                    u32 stringset, u8 *data)
2574 {
2575         int i;
2576
2577         switch (stringset) {
2578         case ETH_SS_STATS:
2579                 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2580                         memcpy(data + i * ETH_GSTRING_LEN,
2581                                bcm_enetsw_gstrings_stats[i].stat_string,
2582                                ETH_GSTRING_LEN);
2583                 }
2584                 break;
2585         }
2586 }
2587
2588 static int bcm_enetsw_get_sset_count(struct net_device *netdev,
2589                                      int string_set)
2590 {
2591         switch (string_set) {
2592         case ETH_SS_STATS:
2593                 return BCM_ENETSW_STATS_LEN;
2594         default:
2595                 return -EINVAL;
2596         }
2597 }
2598
2599 static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
2600                                    struct ethtool_drvinfo *drvinfo)
2601 {
2602         strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
2603         strncpy(drvinfo->version, bcm_enet_driver_version, 32);
2604         strncpy(drvinfo->fw_version, "N/A", 32);
2605         strncpy(drvinfo->bus_info, "bcm63xx", 32);
2606         drvinfo->n_stats = BCM_ENETSW_STATS_LEN;
2607 }
2608
2609 static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
2610                                          struct ethtool_stats *stats,
2611                                          u64 *data)
2612 {
2613         struct bcm_enet_priv *priv;
2614         int i;
2615
2616         priv = netdev_priv(netdev);
2617
2618         for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2619                 const struct bcm_enet_stats *s;
2620                 u32 lo, hi;
2621                 char *p;
2622                 int reg;
2623
2624                 s = &bcm_enetsw_gstrings_stats[i];
2625
2626                 reg = s->mib_reg;
2627                 if (reg == -1)
2628                         continue;
2629
2630                 lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
2631                 p = (char *)priv + s->stat_offset;
2632
2633                 if (s->sizeof_stat == sizeof(u64)) {
2634                         hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
2635                         *(u64 *)p = ((u64)hi << 32 | lo);
2636                 } else {
2637                         *(u32 *)p = lo;
2638                 }
2639         }
2640
2641         for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2642                 const struct bcm_enet_stats *s;
2643                 char *p;
2644
2645                 s = &bcm_enetsw_gstrings_stats[i];
2646
2647                 if (s->mib_reg == -1)
2648                         p = (char *)&netdev->stats + s->stat_offset;
2649                 else
2650                         p = (char *)priv + s->stat_offset;
2651
2652                 data[i] = (s->sizeof_stat == sizeof(u64)) ?
2653                         *(u64 *)p : *(u32 *)p;
2654         }
2655 }
2656
2657 static void bcm_enetsw_get_ringparam(struct net_device *dev,
2658                                      struct ethtool_ringparam *ering)
2659 {
2660         struct bcm_enet_priv *priv;
2661
2662         priv = netdev_priv(dev);
2663
2664         /* rx/tx ring is actually only limited by memory */
2665         ering->rx_max_pending = 8192;
2666         ering->tx_max_pending = 8192;
2667         ering->rx_mini_max_pending = 0;
2668         ering->rx_jumbo_max_pending = 0;
2669         ering->rx_pending = priv->rx_ring_size;
2670         ering->tx_pending = priv->tx_ring_size;
2671 }
2672
2673 static int bcm_enetsw_set_ringparam(struct net_device *dev,
2674                                     struct ethtool_ringparam *ering)
2675 {
2676         struct bcm_enet_priv *priv;
2677         int was_running;
2678
2679         priv = netdev_priv(dev);
2680
2681         was_running = 0;
2682         if (netif_running(dev)) {
2683                 bcm_enetsw_stop(dev);
2684                 was_running = 1;
2685         }
2686
2687         priv->rx_ring_size = ering->rx_pending;
2688         priv->tx_ring_size = ering->tx_pending;
2689
2690         if (was_running) {
2691                 int err;
2692
2693                 err = bcm_enetsw_open(dev);
2694                 if (err)
2695                         dev_close(dev);
2696         }
2697         return 0;
2698 }
2699
2700 static struct ethtool_ops bcm_enetsw_ethtool_ops = {
2701         .get_strings            = bcm_enetsw_get_strings,
2702         .get_sset_count         = bcm_enetsw_get_sset_count,
2703         .get_ethtool_stats      = bcm_enetsw_get_ethtool_stats,
2704         .get_drvinfo            = bcm_enetsw_get_drvinfo,
2705         .get_ringparam          = bcm_enetsw_get_ringparam,
2706         .set_ringparam          = bcm_enetsw_set_ringparam,
2707 };
2708
2709 /* allocate netdevice, request register memory and register device. */
2710 static int bcm_enetsw_probe(struct platform_device *pdev)
2711 {
2712         struct bcm_enet_priv *priv;
2713         struct net_device *dev;
2714         struct bcm63xx_enetsw_platform_data *pd;
2715         struct resource *res_mem;
2716         int ret, irq_rx, irq_tx;
2717
2718         /* stop if shared driver failed, assume driver->probe will be
2719          * called in the same order we register devices (correct ?)
2720          */
2721         if (!bcm_enet_shared_base[0])
2722                 return -ENODEV;
2723
2724         res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2725         irq_rx = platform_get_irq(pdev, 0);
2726         irq_tx = platform_get_irq(pdev, 1);
2727         if (!res_mem || irq_rx < 0)
2728                 return -ENODEV;
2729
2730         ret = 0;
2731         dev = alloc_etherdev(sizeof(*priv));
2732         if (!dev)
2733                 return -ENOMEM;
2734         priv = netdev_priv(dev);
2735         memset(priv, 0, sizeof(*priv));
2736
2737         /* initialize default and fetch platform data */
2738         priv->enet_is_sw = true;
2739         priv->irq_rx = irq_rx;
2740         priv->irq_tx = irq_tx;
2741         priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2742         priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2743         priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2744
2745         pd = pdev->dev.platform_data;
2746         if (pd) {
2747                 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
2748                 memcpy(priv->used_ports, pd->used_ports,
2749                        sizeof(pd->used_ports));
2750                 priv->num_ports = pd->num_ports;
2751                 priv->dma_has_sram = pd->dma_has_sram;
2752                 priv->dma_chan_en_mask = pd->dma_chan_en_mask;
2753                 priv->dma_chan_int_mask = pd->dma_chan_int_mask;
2754                 priv->dma_chan_width = pd->dma_chan_width;
2755         }
2756
2757         ret = compute_hw_mtu(priv, dev->mtu);
2758         if (ret)
2759                 goto out;
2760
2761         if (!request_mem_region(res_mem->start, resource_size(res_mem),
2762                                 "bcm63xx_enetsw")) {
2763                 ret = -EBUSY;
2764                 goto out;
2765         }
2766
2767         priv->base = ioremap(res_mem->start, resource_size(res_mem));
2768         if (priv->base == NULL) {
2769                 ret = -ENOMEM;
2770                 goto out_release_mem;
2771         }
2772
2773         priv->mac_clk = clk_get(&pdev->dev, "enetsw");
2774         if (IS_ERR(priv->mac_clk)) {
2775                 ret = PTR_ERR(priv->mac_clk);
2776                 goto out_unmap;
2777         }
2778         clk_enable(priv->mac_clk);
2779
2780         priv->rx_chan = 0;
2781         priv->tx_chan = 1;
2782         spin_lock_init(&priv->rx_lock);
2783
2784         /* init rx timeout (used for oom) */
2785         init_timer(&priv->rx_timeout);
2786         priv->rx_timeout.function = bcm_enet_refill_rx_timer;
2787         priv->rx_timeout.data = (unsigned long)dev;
2788
2789         /* register netdevice */
2790         dev->netdev_ops = &bcm_enetsw_ops;
2791         netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
2792         SET_ETHTOOL_OPS(dev, &bcm_enetsw_ethtool_ops);
2793         SET_NETDEV_DEV(dev, &pdev->dev);
2794
2795         spin_lock_init(&priv->enetsw_mdio_lock);
2796
2797         ret = register_netdev(dev);
2798         if (ret)
2799                 goto out_put_clk;
2800
2801         netif_carrier_off(dev);
2802         platform_set_drvdata(pdev, dev);
2803         priv->pdev = pdev;
2804         priv->net_dev = dev;
2805
2806         return 0;
2807
2808 out_put_clk:
2809         clk_put(priv->mac_clk);
2810
2811 out_unmap:
2812         iounmap(priv->base);
2813
2814 out_release_mem:
2815         release_mem_region(res_mem->start, resource_size(res_mem));
2816 out:
2817         free_netdev(dev);
2818         return ret;
2819 }
2820
2821
2822 /* exit func, stops hardware and unregisters netdevice */
2823 static int bcm_enetsw_remove(struct platform_device *pdev)
2824 {
2825         struct bcm_enet_priv *priv;
2826         struct net_device *dev;
2827         struct resource *res;
2828
2829         /* stop netdevice */
2830         dev = platform_get_drvdata(pdev);
2831         priv = netdev_priv(dev);
2832         unregister_netdev(dev);
2833
2834         /* release device resources */
2835         iounmap(priv->base);
2836         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2837         release_mem_region(res->start, resource_size(res));
2838
2839         platform_set_drvdata(pdev, NULL);
2840         free_netdev(dev);
2841         return 0;
2842 }
2843
2844 struct platform_driver bcm63xx_enetsw_driver = {
2845         .probe  = bcm_enetsw_probe,
2846         .remove = bcm_enetsw_remove,
2847         .driver = {
2848                 .name   = "bcm63xx_enetsw",
2849                 .owner  = THIS_MODULE,
2850         },
2851 };
2852
2853 /* reserve & remap memory space shared between all macs */
2854 static int bcm_enet_shared_probe(struct platform_device *pdev)
2855 {
2856         struct resource *res;
2857         void __iomem *p[3];
2858         unsigned int i;
2859
2860         memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
2861
2862         for (i = 0; i < 3; i++) {
2863                 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
2864                 p[i] = devm_ioremap_resource(&pdev->dev, res);
2865                 if (IS_ERR(p[i]))
2866                         return PTR_ERR(p[i]);
2867         }
2868
2869         memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
2870
2871         return 0;
2872 }
2873
2874 static int bcm_enet_shared_remove(struct platform_device *pdev)
2875 {
2876         return 0;
2877 }
2878
2879 /* this "shared" driver is needed because both macs share a single
2880  * address space
2881  */
2882 struct platform_driver bcm63xx_enet_shared_driver = {
2883         .probe  = bcm_enet_shared_probe,
2884         .remove = bcm_enet_shared_remove,
2885         .driver = {
2886                 .name   = "bcm63xx_enet_shared",
2887                 .owner  = THIS_MODULE,
2888         },
2889 };
2890
2891 /* entry point */
2892 static int __init bcm_enet_init(void)
2893 {
2894         int ret;
2895
2896         ret = platform_driver_register(&bcm63xx_enet_shared_driver);
2897         if (ret)
2898                 return ret;
2899
2900         ret = platform_driver_register(&bcm63xx_enet_driver);
2901         if (ret)
2902                 platform_driver_unregister(&bcm63xx_enet_shared_driver);
2903
2904         ret = platform_driver_register(&bcm63xx_enetsw_driver);
2905         if (ret) {
2906                 platform_driver_unregister(&bcm63xx_enet_driver);
2907                 platform_driver_unregister(&bcm63xx_enet_shared_driver);
2908         }
2909
2910         return ret;
2911 }
2912
2913 static void __exit bcm_enet_exit(void)
2914 {
2915         platform_driver_unregister(&bcm63xx_enet_driver);
2916         platform_driver_unregister(&bcm63xx_enetsw_driver);
2917         platform_driver_unregister(&bcm63xx_enet_shared_driver);
2918 }
2919
2920
2921 module_init(bcm_enet_init);
2922 module_exit(bcm_enet_exit);
2923
2924 MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
2925 MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
2926 MODULE_LICENSE("GPL");