ibm_newemac Use status property for unused/unwired EMACs
[cascardo/linux.git] / drivers / net / ibm_newemac / core.c
1 /*
2  * drivers/net/ibm_newemac/core.c
3  *
4  * Driver for PowerPC 4xx on-chip ethernet controller.
5  *
6  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7  *                <benh@kernel.crashing.org>
8  *
9  * Based on the arch/ppc version of the driver:
10  *
11  * Copyright (c) 2004, 2005 Zultys Technologies.
12  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13  *
14  * Based on original work by
15  *      Matt Porter <mporter@kernel.crashing.org>
16  *      (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17  *      Armin Kuster <akuster@mvista.com>
18  *      Johnnie Peters <jpeters@mvista.com>
19  *
20  * This program is free software; you can redistribute  it and/or modify it
21  * under  the terms of  the GNU General  Public License as published by the
22  * Free Software Foundation;  either version 2 of the  License, or (at your
23  * option) any later version.
24  *
25  */
26
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
40 #include <linux/of.h>
41
42 #include <asm/processor.h>
43 #include <asm/io.h>
44 #include <asm/dma.h>
45 #include <asm/uaccess.h>
46
47 #include "core.h"
48
49 /*
50  * Lack of dma_unmap_???? calls is intentional.
51  *
52  * API-correct usage requires additional support state information to be
53  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
54  * EMAC design (e.g. TX buffer passed from network stack can be split into
55  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
56  * maintaining such information will add additional overhead.
57  * Current DMA API implementation for 4xx processors only ensures cache coherency
58  * and dma_unmap_???? routines are empty and are likely to stay this way.
59  * I decided to omit dma_unmap_??? calls because I don't want to add additional
60  * complexity just for the sake of following some abstract API, when it doesn't
61  * add any real benefit to the driver. I understand that this decision maybe
62  * controversial, but I really tried to make code API-correct and efficient
63  * at the same time and didn't come up with code I liked :(.                --ebs
64  */
65
66 #define DRV_NAME        "emac"
67 #define DRV_VERSION     "3.54"
68 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
69
70 MODULE_DESCRIPTION(DRV_DESC);
71 MODULE_AUTHOR
72     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
73 MODULE_LICENSE("GPL");
74
75 /*
76  * PPC64 doesn't (yet) have a cacheable_memcpy
77  */
78 #ifdef CONFIG_PPC64
79 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
80 #endif
81
82 /* minimum number of free TX descriptors required to wake up TX process */
83 #define EMAC_TX_WAKEUP_THRESH           (NUM_TX_BUFF / 4)
84
85 /* If packet size is less than this number, we allocate small skb and copy packet
86  * contents into it instead of just sending original big skb up
87  */
88 #define EMAC_RX_COPY_THRESH             CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
89
90 /* Since multiple EMACs share MDIO lines in various ways, we need
91  * to avoid re-using the same PHY ID in cases where the arch didn't
92  * setup precise phy_map entries
93  *
94  * XXX This is something that needs to be reworked as we can have multiple
95  * EMAC "sets" (multiple ASICs containing several EMACs) though we can
96  * probably require in that case to have explicit PHY IDs in the device-tree
97  */
98 static u32 busy_phy_map;
99 static DEFINE_MUTEX(emac_phy_map_lock);
100
101 /* This is the wait queue used to wait on any event related to probe, that
102  * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
103  */
104 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
105
106 /* Having stable interface names is a doomed idea. However, it would be nice
107  * if we didn't have completely random interface names at boot too :-) It's
108  * just a matter of making everybody's life easier. Since we are doing
109  * threaded probing, it's a bit harder though. The base idea here is that
110  * we make up a list of all emacs in the device-tree before we register the
111  * driver. Every emac will then wait for the previous one in the list to
112  * initialize before itself. We should also keep that list ordered by
113  * cell_index.
114  * That list is only 4 entries long, meaning that additional EMACs don't
115  * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
116  */
117
118 #define EMAC_BOOT_LIST_SIZE     4
119 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
120
121 /* How long should I wait for dependent devices ? */
122 #define EMAC_PROBE_DEP_TIMEOUT  (HZ * 5)
123
124 /* I don't want to litter system log with timeout errors
125  * when we have brain-damaged PHY.
126  */
127 static inline void emac_report_timeout_error(struct emac_instance *dev,
128                                              const char *error)
129 {
130         if (net_ratelimit())
131                 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
132 }
133
134 /* PHY polling intervals */
135 #define PHY_POLL_LINK_ON        HZ
136 #define PHY_POLL_LINK_OFF       (HZ / 5)
137
138 /* Graceful stop timeouts in us.
139  * We should allow up to 1 frame time (full-duplex, ignoring collisions)
140  */
141 #define STOP_TIMEOUT_10         1230
142 #define STOP_TIMEOUT_100        124
143 #define STOP_TIMEOUT_1000       13
144 #define STOP_TIMEOUT_1000_JUMBO 73
145
146 static unsigned char default_mcast_addr[] = {
147         0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
148 };
149
150 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
151 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
152         "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
153         "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
154         "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
155         "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
156         "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
157         "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
158         "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
159         "rx_bad_packet", "rx_runt_packet", "rx_short_event",
160         "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
161         "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
162         "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
163         "tx_bd_excessive_collisions", "tx_bd_late_collision",
164         "tx_bd_multple_collisions", "tx_bd_single_collision",
165         "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
166         "tx_errors"
167 };
168
169 static irqreturn_t emac_irq(int irq, void *dev_instance);
170 static void emac_clean_tx_ring(struct emac_instance *dev);
171 static void __emac_set_multicast_list(struct emac_instance *dev);
172
173 static inline int emac_phy_supports_gige(int phy_mode)
174 {
175         return  phy_mode == PHY_MODE_GMII ||
176                 phy_mode == PHY_MODE_RGMII ||
177                 phy_mode == PHY_MODE_TBI ||
178                 phy_mode == PHY_MODE_RTBI;
179 }
180
181 static inline int emac_phy_gpcs(int phy_mode)
182 {
183         return  phy_mode == PHY_MODE_TBI ||
184                 phy_mode == PHY_MODE_RTBI;
185 }
186
187 static inline void emac_tx_enable(struct emac_instance *dev)
188 {
189         struct emac_regs __iomem *p = dev->emacp;
190         u32 r;
191
192         DBG(dev, "tx_enable" NL);
193
194         r = in_be32(&p->mr0);
195         if (!(r & EMAC_MR0_TXE))
196                 out_be32(&p->mr0, r | EMAC_MR0_TXE);
197 }
198
199 static void emac_tx_disable(struct emac_instance *dev)
200 {
201         struct emac_regs __iomem *p = dev->emacp;
202         u32 r;
203
204         DBG(dev, "tx_disable" NL);
205
206         r = in_be32(&p->mr0);
207         if (r & EMAC_MR0_TXE) {
208                 int n = dev->stop_timeout;
209                 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
210                 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
211                         udelay(1);
212                         --n;
213                 }
214                 if (unlikely(!n))
215                         emac_report_timeout_error(dev, "TX disable timeout");
216         }
217 }
218
219 static void emac_rx_enable(struct emac_instance *dev)
220 {
221         struct emac_regs __iomem *p = dev->emacp;
222         u32 r;
223
224         if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
225                 goto out;
226
227         DBG(dev, "rx_enable" NL);
228
229         r = in_be32(&p->mr0);
230         if (!(r & EMAC_MR0_RXE)) {
231                 if (unlikely(!(r & EMAC_MR0_RXI))) {
232                         /* Wait if previous async disable is still in progress */
233                         int n = dev->stop_timeout;
234                         while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
235                                 udelay(1);
236                                 --n;
237                         }
238                         if (unlikely(!n))
239                                 emac_report_timeout_error(dev,
240                                                           "RX disable timeout");
241                 }
242                 out_be32(&p->mr0, r | EMAC_MR0_RXE);
243         }
244  out:
245         ;
246 }
247
248 static void emac_rx_disable(struct emac_instance *dev)
249 {
250         struct emac_regs __iomem *p = dev->emacp;
251         u32 r;
252
253         DBG(dev, "rx_disable" NL);
254
255         r = in_be32(&p->mr0);
256         if (r & EMAC_MR0_RXE) {
257                 int n = dev->stop_timeout;
258                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
259                 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
260                         udelay(1);
261                         --n;
262                 }
263                 if (unlikely(!n))
264                         emac_report_timeout_error(dev, "RX disable timeout");
265         }
266 }
267
268 static inline void emac_netif_stop(struct emac_instance *dev)
269 {
270         netif_tx_lock_bh(dev->ndev);
271         dev->no_mcast = 1;
272         netif_tx_unlock_bh(dev->ndev);
273         dev->ndev->trans_start = jiffies;       /* prevent tx timeout */
274         mal_poll_disable(dev->mal, &dev->commac);
275         netif_tx_disable(dev->ndev);
276 }
277
278 static inline void emac_netif_start(struct emac_instance *dev)
279 {
280         netif_tx_lock_bh(dev->ndev);
281         dev->no_mcast = 0;
282         if (dev->mcast_pending && netif_running(dev->ndev))
283                 __emac_set_multicast_list(dev);
284         netif_tx_unlock_bh(dev->ndev);
285
286         netif_wake_queue(dev->ndev);
287
288         /* NOTE: unconditional netif_wake_queue is only appropriate
289          * so long as all callers are assured to have free tx slots
290          * (taken from tg3... though the case where that is wrong is
291          *  not terribly harmful)
292          */
293         mal_poll_enable(dev->mal, &dev->commac);
294 }
295
296 static inline void emac_rx_disable_async(struct emac_instance *dev)
297 {
298         struct emac_regs __iomem *p = dev->emacp;
299         u32 r;
300
301         DBG(dev, "rx_disable_async" NL);
302
303         r = in_be32(&p->mr0);
304         if (r & EMAC_MR0_RXE)
305                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
306 }
307
308 static int emac_reset(struct emac_instance *dev)
309 {
310         struct emac_regs __iomem *p = dev->emacp;
311         int n = 20;
312
313         DBG(dev, "reset" NL);
314
315         if (!dev->reset_failed) {
316                 /* 40x erratum suggests stopping RX channel before reset,
317                  * we stop TX as well
318                  */
319                 emac_rx_disable(dev);
320                 emac_tx_disable(dev);
321         }
322
323         out_be32(&p->mr0, EMAC_MR0_SRST);
324         while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
325                 --n;
326
327         if (n) {
328                 dev->reset_failed = 0;
329                 return 0;
330         } else {
331                 emac_report_timeout_error(dev, "reset timeout");
332                 dev->reset_failed = 1;
333                 return -ETIMEDOUT;
334         }
335 }
336
337 static void emac_hash_mc(struct emac_instance *dev)
338 {
339         struct emac_regs __iomem *p = dev->emacp;
340         u16 gaht[4] = { 0 };
341         struct dev_mc_list *dmi;
342
343         DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
344
345         for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
346                 int bit;
347                 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
348                      dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
349                      dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
350
351                 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
352                 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
353         }
354         out_be32(&p->gaht1, gaht[0]);
355         out_be32(&p->gaht2, gaht[1]);
356         out_be32(&p->gaht3, gaht[2]);
357         out_be32(&p->gaht4, gaht[3]);
358 }
359
360 static inline u32 emac_iff2rmr(struct net_device *ndev)
361 {
362         struct emac_instance *dev = netdev_priv(ndev);
363         u32 r;
364
365         r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
366
367         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
368             r |= EMAC4_RMR_BASE;
369         else
370             r |= EMAC_RMR_BASE;
371
372         if (ndev->flags & IFF_PROMISC)
373                 r |= EMAC_RMR_PME;
374         else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
375                 r |= EMAC_RMR_PMME;
376         else if (ndev->mc_count > 0)
377                 r |= EMAC_RMR_MAE;
378
379         return r;
380 }
381
382 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
383 {
384         u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
385
386         DBG2(dev, "__emac_calc_base_mr1" NL);
387
388         switch(tx_size) {
389         case 2048:
390                 ret |= EMAC_MR1_TFS_2K;
391                 break;
392         default:
393                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
394                        dev->ndev->name, tx_size);
395         }
396
397         switch(rx_size) {
398         case 16384:
399                 ret |= EMAC_MR1_RFS_16K;
400                 break;
401         case 4096:
402                 ret |= EMAC_MR1_RFS_4K;
403                 break;
404         default:
405                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
406                        dev->ndev->name, rx_size);
407         }
408
409         return ret;
410 }
411
412 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
413 {
414         u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
415                 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
416
417         DBG2(dev, "__emac4_calc_base_mr1" NL);
418
419         switch(tx_size) {
420         case 4096:
421                 ret |= EMAC4_MR1_TFS_4K;
422                 break;
423         case 2048:
424                 ret |= EMAC4_MR1_TFS_2K;
425                 break;
426         default:
427                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
428                        dev->ndev->name, tx_size);
429         }
430
431         switch(rx_size) {
432         case 16384:
433                 ret |= EMAC4_MR1_RFS_16K;
434                 break;
435         case 4096:
436                 ret |= EMAC4_MR1_RFS_4K;
437                 break;
438         case 2048:
439                 ret |= EMAC4_MR1_RFS_2K;
440                 break;
441         default:
442                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
443                        dev->ndev->name, rx_size);
444         }
445
446         return ret;
447 }
448
449 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
450 {
451         return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
452                 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
453                 __emac_calc_base_mr1(dev, tx_size, rx_size);
454 }
455
456 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
457 {
458         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
459                 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
460         else
461                 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
462 }
463
464 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
465                                  unsigned int low, unsigned int high)
466 {
467         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
468                 return (low << 22) | ( (high & 0x3ff) << 6);
469         else
470                 return (low << 23) | ( (high & 0x1ff) << 7);
471 }
472
473 static int emac_configure(struct emac_instance *dev)
474 {
475         struct emac_regs __iomem *p = dev->emacp;
476         struct net_device *ndev = dev->ndev;
477         int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
478         u32 r, mr1 = 0;
479
480         DBG(dev, "configure" NL);
481
482         if (!link) {
483                 out_be32(&p->mr1, in_be32(&p->mr1)
484                          | EMAC_MR1_FDE | EMAC_MR1_ILE);
485                 udelay(100);
486         } else if (emac_reset(dev) < 0)
487                 return -ETIMEDOUT;
488
489         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
490                 tah_reset(dev->tah_dev);
491
492         DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
493             link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
494
495         /* Default fifo sizes */
496         tx_size = dev->tx_fifo_size;
497         rx_size = dev->rx_fifo_size;
498
499         /* No link, force loopback */
500         if (!link)
501                 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
502
503         /* Check for full duplex */
504         else if (dev->phy.duplex == DUPLEX_FULL)
505                 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
506
507         /* Adjust fifo sizes, mr1 and timeouts based on link speed */
508         dev->stop_timeout = STOP_TIMEOUT_10;
509         switch (dev->phy.speed) {
510         case SPEED_1000:
511                 if (emac_phy_gpcs(dev->phy.mode)) {
512                         mr1 |= EMAC_MR1_MF_1000GPCS |
513                                 EMAC_MR1_MF_IPPA(dev->phy.address);
514
515                         /* Put some arbitrary OUI, Manuf & Rev IDs so we can
516                          * identify this GPCS PHY later.
517                          */
518                         out_be32(&p->ipcr, 0xdeadbeef);
519                 } else
520                         mr1 |= EMAC_MR1_MF_1000;
521
522                 /* Extended fifo sizes */
523                 tx_size = dev->tx_fifo_size_gige;
524                 rx_size = dev->rx_fifo_size_gige;
525
526                 if (dev->ndev->mtu > ETH_DATA_LEN) {
527                         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
528                                 mr1 |= EMAC4_MR1_JPSM;
529                         else
530                                 mr1 |= EMAC_MR1_JPSM;
531                         dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
532                 } else
533                         dev->stop_timeout = STOP_TIMEOUT_1000;
534                 break;
535         case SPEED_100:
536                 mr1 |= EMAC_MR1_MF_100;
537                 dev->stop_timeout = STOP_TIMEOUT_100;
538                 break;
539         default: /* make gcc happy */
540                 break;
541         }
542
543         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
544                 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
545                                 dev->phy.speed);
546         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
547                 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
548
549         /* on 40x erratum forces us to NOT use integrated flow control,
550          * let's hope it works on 44x ;)
551          */
552         if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
553             dev->phy.duplex == DUPLEX_FULL) {
554                 if (dev->phy.pause)
555                         mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
556                 else if (dev->phy.asym_pause)
557                         mr1 |= EMAC_MR1_APP;
558         }
559
560         /* Add base settings & fifo sizes & program MR1 */
561         mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
562         out_be32(&p->mr1, mr1);
563
564         /* Set individual MAC address */
565         out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
566         out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
567                  (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
568                  ndev->dev_addr[5]);
569
570         /* VLAN Tag Protocol ID */
571         out_be32(&p->vtpid, 0x8100);
572
573         /* Receive mode register */
574         r = emac_iff2rmr(ndev);
575         if (r & EMAC_RMR_MAE)
576                 emac_hash_mc(dev);
577         out_be32(&p->rmr, r);
578
579         /* FIFOs thresholds */
580         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
581                 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
582                                tx_size / 2 / dev->fifo_entry_size);
583         else
584                 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
585                               tx_size / 2 / dev->fifo_entry_size);
586         out_be32(&p->tmr1, r);
587         out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
588
589         /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
590            there should be still enough space in FIFO to allow the our link
591            partner time to process this frame and also time to send PAUSE
592            frame itself.
593
594            Here is the worst case scenario for the RX FIFO "headroom"
595            (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
596
597            1) One maximum-length frame on TX                    1522 bytes
598            2) One PAUSE frame time                                64 bytes
599            3) PAUSE frame decode time allowance                   64 bytes
600            4) One maximum-length frame on RX                    1522 bytes
601            5) Round-trip propagation delay of the link (100Mb)    15 bytes
602            ----------
603            3187 bytes
604
605            I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
606            low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
607          */
608         r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
609                            rx_size / 4 / dev->fifo_entry_size);
610         out_be32(&p->rwmr, r);
611
612         /* Set PAUSE timer to the maximum */
613         out_be32(&p->ptr, 0xffff);
614
615         /* IRQ sources */
616         r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
617                 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
618                 EMAC_ISR_IRE | EMAC_ISR_TE;
619         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
620             r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
621                                                   EMAC4_ISR_RXOE | */;
622         out_be32(&p->iser,  r);
623
624         /* We need to take GPCS PHY out of isolate mode after EMAC reset */
625         if (emac_phy_gpcs(dev->phy.mode))
626                 emac_mii_reset_phy(&dev->phy);
627
628         /* Required for Pause packet support in EMAC */
629         dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
630
631         return 0;
632 }
633
634 static void emac_reinitialize(struct emac_instance *dev)
635 {
636         DBG(dev, "reinitialize" NL);
637
638         emac_netif_stop(dev);
639         if (!emac_configure(dev)) {
640                 emac_tx_enable(dev);
641                 emac_rx_enable(dev);
642         }
643         emac_netif_start(dev);
644 }
645
646 static void emac_full_tx_reset(struct emac_instance *dev)
647 {
648         DBG(dev, "full_tx_reset" NL);
649
650         emac_tx_disable(dev);
651         mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
652         emac_clean_tx_ring(dev);
653         dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
654
655         emac_configure(dev);
656
657         mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
658         emac_tx_enable(dev);
659         emac_rx_enable(dev);
660 }
661
662 static void emac_reset_work(struct work_struct *work)
663 {
664         struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
665
666         DBG(dev, "reset_work" NL);
667
668         mutex_lock(&dev->link_lock);
669         if (dev->opened) {
670                 emac_netif_stop(dev);
671                 emac_full_tx_reset(dev);
672                 emac_netif_start(dev);
673         }
674         mutex_unlock(&dev->link_lock);
675 }
676
677 static void emac_tx_timeout(struct net_device *ndev)
678 {
679         struct emac_instance *dev = netdev_priv(ndev);
680
681         DBG(dev, "tx_timeout" NL);
682
683         schedule_work(&dev->reset_work);
684 }
685
686
687 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
688 {
689         int done = !!(stacr & EMAC_STACR_OC);
690
691         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
692                 done = !done;
693
694         return done;
695 };
696
697 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
698 {
699         struct emac_regs __iomem *p = dev->emacp;
700         u32 r = 0;
701         int n, err = -ETIMEDOUT;
702
703         mutex_lock(&dev->mdio_lock);
704
705         DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
706
707         /* Enable proper MDIO port */
708         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
709                 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
710         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
711                 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
712
713         /* Wait for management interface to become idle */
714         n = 10;
715         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
716                 udelay(1);
717                 if (!--n) {
718                         DBG2(dev, " -> timeout wait idle\n");
719                         goto bail;
720                 }
721         }
722
723         /* Issue read command */
724         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
725                 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
726         else
727                 r = EMAC_STACR_BASE(dev->opb_bus_freq);
728         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
729                 r |= EMAC_STACR_OC;
730         if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
731                 r |= EMACX_STACR_STAC_READ;
732         else
733                 r |= EMAC_STACR_STAC_READ;
734         r |= (reg & EMAC_STACR_PRA_MASK)
735                 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
736         out_be32(&p->stacr, r);
737
738         /* Wait for read to complete */
739         n = 100;
740         while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
741                 udelay(1);
742                 if (!--n) {
743                         DBG2(dev, " -> timeout wait complete\n");
744                         goto bail;
745                 }
746         }
747
748         if (unlikely(r & EMAC_STACR_PHYE)) {
749                 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
750                 err = -EREMOTEIO;
751                 goto bail;
752         }
753
754         r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
755
756         DBG2(dev, "mdio_read -> %04x" NL, r);
757         err = 0;
758  bail:
759         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
760                 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
761         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
762                 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
763         mutex_unlock(&dev->mdio_lock);
764
765         return err == 0 ? r : err;
766 }
767
768 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
769                               u16 val)
770 {
771         struct emac_regs __iomem *p = dev->emacp;
772         u32 r = 0;
773         int n, err = -ETIMEDOUT;
774
775         mutex_lock(&dev->mdio_lock);
776
777         DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
778
779         /* Enable proper MDIO port */
780         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
781                 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
782         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
783                 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
784
785         /* Wait for management interface to be idle */
786         n = 10;
787         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
788                 udelay(1);
789                 if (!--n) {
790                         DBG2(dev, " -> timeout wait idle\n");
791                         goto bail;
792                 }
793         }
794
795         /* Issue write command */
796         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
797                 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
798         else
799                 r = EMAC_STACR_BASE(dev->opb_bus_freq);
800         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
801                 r |= EMAC_STACR_OC;
802         if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
803                 r |= EMACX_STACR_STAC_WRITE;
804         else
805                 r |= EMAC_STACR_STAC_WRITE;
806         r |= (reg & EMAC_STACR_PRA_MASK) |
807                 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
808                 (val << EMAC_STACR_PHYD_SHIFT);
809         out_be32(&p->stacr, r);
810
811         /* Wait for write to complete */
812         n = 100;
813         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
814                 udelay(1);
815                 if (!--n) {
816                         DBG2(dev, " -> timeout wait complete\n");
817                         goto bail;
818                 }
819         }
820         err = 0;
821  bail:
822         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
823                 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
824         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
825                 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
826         mutex_unlock(&dev->mdio_lock);
827 }
828
829 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
830 {
831         struct emac_instance *dev = netdev_priv(ndev);
832         int res;
833
834         res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
835                                (u8) id, (u8) reg);
836         return res;
837 }
838
839 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
840 {
841         struct emac_instance *dev = netdev_priv(ndev);
842
843         __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
844                           (u8) id, (u8) reg, (u16) val);
845 }
846
847 /* Tx lock BH */
848 static void __emac_set_multicast_list(struct emac_instance *dev)
849 {
850         struct emac_regs __iomem *p = dev->emacp;
851         u32 rmr = emac_iff2rmr(dev->ndev);
852
853         DBG(dev, "__multicast %08x" NL, rmr);
854
855         /* I decided to relax register access rules here to avoid
856          * full EMAC reset.
857          *
858          * There is a real problem with EMAC4 core if we use MWSW_001 bit
859          * in MR1 register and do a full EMAC reset.
860          * One TX BD status update is delayed and, after EMAC reset, it
861          * never happens, resulting in TX hung (it'll be recovered by TX
862          * timeout handler eventually, but this is just gross).
863          * So we either have to do full TX reset or try to cheat here :)
864          *
865          * The only required change is to RX mode register, so I *think* all
866          * we need is just to stop RX channel. This seems to work on all
867          * tested SoCs.                                                --ebs
868          *
869          * If we need the full reset, we might just trigger the workqueue
870          * and do it async... a bit nasty but should work --BenH
871          */
872         dev->mcast_pending = 0;
873         emac_rx_disable(dev);
874         if (rmr & EMAC_RMR_MAE)
875                 emac_hash_mc(dev);
876         out_be32(&p->rmr, rmr);
877         emac_rx_enable(dev);
878 }
879
880 /* Tx lock BH */
881 static void emac_set_multicast_list(struct net_device *ndev)
882 {
883         struct emac_instance *dev = netdev_priv(ndev);
884
885         DBG(dev, "multicast" NL);
886
887         BUG_ON(!netif_running(dev->ndev));
888
889         if (dev->no_mcast) {
890                 dev->mcast_pending = 1;
891                 return;
892         }
893         __emac_set_multicast_list(dev);
894 }
895
896 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
897 {
898         int rx_sync_size = emac_rx_sync_size(new_mtu);
899         int rx_skb_size = emac_rx_skb_size(new_mtu);
900         int i, ret = 0;
901
902         mutex_lock(&dev->link_lock);
903         emac_netif_stop(dev);
904         emac_rx_disable(dev);
905         mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
906
907         if (dev->rx_sg_skb) {
908                 ++dev->estats.rx_dropped_resize;
909                 dev_kfree_skb(dev->rx_sg_skb);
910                 dev->rx_sg_skb = NULL;
911         }
912
913         /* Make a first pass over RX ring and mark BDs ready, dropping
914          * non-processed packets on the way. We need this as a separate pass
915          * to simplify error recovery in the case of allocation failure later.
916          */
917         for (i = 0; i < NUM_RX_BUFF; ++i) {
918                 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
919                         ++dev->estats.rx_dropped_resize;
920
921                 dev->rx_desc[i].data_len = 0;
922                 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
923                     (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
924         }
925
926         /* Reallocate RX ring only if bigger skb buffers are required */
927         if (rx_skb_size <= dev->rx_skb_size)
928                 goto skip;
929
930         /* Second pass, allocate new skbs */
931         for (i = 0; i < NUM_RX_BUFF; ++i) {
932                 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
933                 if (!skb) {
934                         ret = -ENOMEM;
935                         goto oom;
936                 }
937
938                 BUG_ON(!dev->rx_skb[i]);
939                 dev_kfree_skb(dev->rx_skb[i]);
940
941                 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
942                 dev->rx_desc[i].data_ptr =
943                     dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
944                                    DMA_FROM_DEVICE) + 2;
945                 dev->rx_skb[i] = skb;
946         }
947  skip:
948         /* Check if we need to change "Jumbo" bit in MR1 */
949         if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
950                 /* This is to prevent starting RX channel in emac_rx_enable() */
951                 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
952
953                 dev->ndev->mtu = new_mtu;
954                 emac_full_tx_reset(dev);
955         }
956
957         mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
958  oom:
959         /* Restart RX */
960         clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
961         dev->rx_slot = 0;
962         mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
963         emac_rx_enable(dev);
964         emac_netif_start(dev);
965         mutex_unlock(&dev->link_lock);
966
967         return ret;
968 }
969
970 /* Process ctx, rtnl_lock semaphore */
971 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
972 {
973         struct emac_instance *dev = netdev_priv(ndev);
974         int ret = 0;
975
976         if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
977                 return -EINVAL;
978
979         DBG(dev, "change_mtu(%d)" NL, new_mtu);
980
981         if (netif_running(ndev)) {
982                 /* Check if we really need to reinitalize RX ring */
983                 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
984                         ret = emac_resize_rx_ring(dev, new_mtu);
985         }
986
987         if (!ret) {
988                 ndev->mtu = new_mtu;
989                 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
990                 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
991         }
992
993         return ret;
994 }
995
996 static void emac_clean_tx_ring(struct emac_instance *dev)
997 {
998         int i;
999
1000         for (i = 0; i < NUM_TX_BUFF; ++i) {
1001                 if (dev->tx_skb[i]) {
1002                         dev_kfree_skb(dev->tx_skb[i]);
1003                         dev->tx_skb[i] = NULL;
1004                         if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1005                                 ++dev->estats.tx_dropped;
1006                 }
1007                 dev->tx_desc[i].ctrl = 0;
1008                 dev->tx_desc[i].data_ptr = 0;
1009         }
1010 }
1011
1012 static void emac_clean_rx_ring(struct emac_instance *dev)
1013 {
1014         int i;
1015
1016         for (i = 0; i < NUM_RX_BUFF; ++i)
1017                 if (dev->rx_skb[i]) {
1018                         dev->rx_desc[i].ctrl = 0;
1019                         dev_kfree_skb(dev->rx_skb[i]);
1020                         dev->rx_skb[i] = NULL;
1021                         dev->rx_desc[i].data_ptr = 0;
1022                 }
1023
1024         if (dev->rx_sg_skb) {
1025                 dev_kfree_skb(dev->rx_sg_skb);
1026                 dev->rx_sg_skb = NULL;
1027         }
1028 }
1029
1030 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1031                                     gfp_t flags)
1032 {
1033         struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1034         if (unlikely(!skb))
1035                 return -ENOMEM;
1036
1037         dev->rx_skb[slot] = skb;
1038         dev->rx_desc[slot].data_len = 0;
1039
1040         skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1041         dev->rx_desc[slot].data_ptr =
1042             dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1043                            DMA_FROM_DEVICE) + 2;
1044         wmb();
1045         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1046             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1047
1048         return 0;
1049 }
1050
1051 static void emac_print_link_status(struct emac_instance *dev)
1052 {
1053         if (netif_carrier_ok(dev->ndev))
1054                 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1055                        dev->ndev->name, dev->phy.speed,
1056                        dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1057                        dev->phy.pause ? ", pause enabled" :
1058                        dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1059         else
1060                 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1061 }
1062
1063 /* Process ctx, rtnl_lock semaphore */
1064 static int emac_open(struct net_device *ndev)
1065 {
1066         struct emac_instance *dev = netdev_priv(ndev);
1067         int err, i;
1068
1069         DBG(dev, "open" NL);
1070
1071         /* Setup error IRQ handler */
1072         err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1073         if (err) {
1074                 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1075                        ndev->name, dev->emac_irq);
1076                 return err;
1077         }
1078
1079         /* Allocate RX ring */
1080         for (i = 0; i < NUM_RX_BUFF; ++i)
1081                 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1082                         printk(KERN_ERR "%s: failed to allocate RX ring\n",
1083                                ndev->name);
1084                         goto oom;
1085                 }
1086
1087         dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1088         clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1089         dev->rx_sg_skb = NULL;
1090
1091         mutex_lock(&dev->link_lock);
1092         dev->opened = 1;
1093
1094         /* Start PHY polling now.
1095          */
1096         if (dev->phy.address >= 0) {
1097                 int link_poll_interval;
1098                 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1099                         dev->phy.def->ops->read_link(&dev->phy);
1100                         netif_carrier_on(dev->ndev);
1101                         link_poll_interval = PHY_POLL_LINK_ON;
1102                 } else {
1103                         netif_carrier_off(dev->ndev);
1104                         link_poll_interval = PHY_POLL_LINK_OFF;
1105                 }
1106                 dev->link_polling = 1;
1107                 wmb();
1108                 schedule_delayed_work(&dev->link_work, link_poll_interval);
1109                 emac_print_link_status(dev);
1110         } else
1111                 netif_carrier_on(dev->ndev);
1112
1113         emac_configure(dev);
1114         mal_poll_add(dev->mal, &dev->commac);
1115         mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1116         mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1117         mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1118         emac_tx_enable(dev);
1119         emac_rx_enable(dev);
1120         emac_netif_start(dev);
1121
1122         mutex_unlock(&dev->link_lock);
1123
1124         return 0;
1125  oom:
1126         emac_clean_rx_ring(dev);
1127         free_irq(dev->emac_irq, dev);
1128
1129         return -ENOMEM;
1130 }
1131
1132 /* BHs disabled */
1133 #if 0
1134 static int emac_link_differs(struct emac_instance *dev)
1135 {
1136         u32 r = in_be32(&dev->emacp->mr1);
1137
1138         int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1139         int speed, pause, asym_pause;
1140
1141         if (r & EMAC_MR1_MF_1000)
1142                 speed = SPEED_1000;
1143         else if (r & EMAC_MR1_MF_100)
1144                 speed = SPEED_100;
1145         else
1146                 speed = SPEED_10;
1147
1148         switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1149         case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1150                 pause = 1;
1151                 asym_pause = 0;
1152                 break;
1153         case EMAC_MR1_APP:
1154                 pause = 0;
1155                 asym_pause = 1;
1156                 break;
1157         default:
1158                 pause = asym_pause = 0;
1159         }
1160         return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1161             pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1162 }
1163 #endif
1164
1165 static void emac_link_timer(struct work_struct *work)
1166 {
1167         struct emac_instance *dev =
1168                 container_of((struct delayed_work *)work,
1169                              struct emac_instance, link_work);
1170         int link_poll_interval;
1171
1172         mutex_lock(&dev->link_lock);
1173         DBG2(dev, "link timer" NL);
1174
1175         if (!dev->opened)
1176                 goto bail;
1177
1178         if (dev->phy.def->ops->poll_link(&dev->phy)) {
1179                 if (!netif_carrier_ok(dev->ndev)) {
1180                         /* Get new link parameters */
1181                         dev->phy.def->ops->read_link(&dev->phy);
1182
1183                         netif_carrier_on(dev->ndev);
1184                         emac_netif_stop(dev);
1185                         emac_full_tx_reset(dev);
1186                         emac_netif_start(dev);
1187                         emac_print_link_status(dev);
1188                 }
1189                 link_poll_interval = PHY_POLL_LINK_ON;
1190         } else {
1191                 if (netif_carrier_ok(dev->ndev)) {
1192                         netif_carrier_off(dev->ndev);
1193                         netif_tx_disable(dev->ndev);
1194                         emac_reinitialize(dev);
1195                         emac_print_link_status(dev);
1196                 }
1197                 link_poll_interval = PHY_POLL_LINK_OFF;
1198         }
1199         schedule_delayed_work(&dev->link_work, link_poll_interval);
1200  bail:
1201         mutex_unlock(&dev->link_lock);
1202 }
1203
1204 static void emac_force_link_update(struct emac_instance *dev)
1205 {
1206         netif_carrier_off(dev->ndev);
1207         smp_rmb();
1208         if (dev->link_polling) {
1209                 cancel_rearming_delayed_work(&dev->link_work);
1210                 if (dev->link_polling)
1211                         schedule_delayed_work(&dev->link_work,  PHY_POLL_LINK_OFF);
1212         }
1213 }
1214
1215 /* Process ctx, rtnl_lock semaphore */
1216 static int emac_close(struct net_device *ndev)
1217 {
1218         struct emac_instance *dev = netdev_priv(ndev);
1219
1220         DBG(dev, "close" NL);
1221
1222         if (dev->phy.address >= 0) {
1223                 dev->link_polling = 0;
1224                 cancel_rearming_delayed_work(&dev->link_work);
1225         }
1226         mutex_lock(&dev->link_lock);
1227         emac_netif_stop(dev);
1228         dev->opened = 0;
1229         mutex_unlock(&dev->link_lock);
1230
1231         emac_rx_disable(dev);
1232         emac_tx_disable(dev);
1233         mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1234         mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1235         mal_poll_del(dev->mal, &dev->commac);
1236
1237         emac_clean_tx_ring(dev);
1238         emac_clean_rx_ring(dev);
1239
1240         free_irq(dev->emac_irq, dev);
1241
1242         return 0;
1243 }
1244
1245 static inline u16 emac_tx_csum(struct emac_instance *dev,
1246                                struct sk_buff *skb)
1247 {
1248         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1249                 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1250                 ++dev->stats.tx_packets_csum;
1251                 return EMAC_TX_CTRL_TAH_CSUM;
1252         }
1253         return 0;
1254 }
1255
1256 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1257 {
1258         struct emac_regs __iomem *p = dev->emacp;
1259         struct net_device *ndev = dev->ndev;
1260
1261         /* Send the packet out. If the if makes a significant perf
1262          * difference, then we can store the TMR0 value in "dev"
1263          * instead
1264          */
1265         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1266                 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1267         else
1268                 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1269
1270         if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1271                 netif_stop_queue(ndev);
1272                 DBG2(dev, "stopped TX queue" NL);
1273         }
1274
1275         ndev->trans_start = jiffies;
1276         ++dev->stats.tx_packets;
1277         dev->stats.tx_bytes += len;
1278
1279         return 0;
1280 }
1281
1282 /* Tx lock BH */
1283 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1284 {
1285         struct emac_instance *dev = netdev_priv(ndev);
1286         unsigned int len = skb->len;
1287         int slot;
1288
1289         u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1290             MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1291
1292         slot = dev->tx_slot++;
1293         if (dev->tx_slot == NUM_TX_BUFF) {
1294                 dev->tx_slot = 0;
1295                 ctrl |= MAL_TX_CTRL_WRAP;
1296         }
1297
1298         DBG2(dev, "xmit(%u) %d" NL, len, slot);
1299
1300         dev->tx_skb[slot] = skb;
1301         dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1302                                                      skb->data, len,
1303                                                      DMA_TO_DEVICE);
1304         dev->tx_desc[slot].data_len = (u16) len;
1305         wmb();
1306         dev->tx_desc[slot].ctrl = ctrl;
1307
1308         return emac_xmit_finish(dev, len);
1309 }
1310
1311 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1312                                   u32 pd, int len, int last, u16 base_ctrl)
1313 {
1314         while (1) {
1315                 u16 ctrl = base_ctrl;
1316                 int chunk = min(len, MAL_MAX_TX_SIZE);
1317                 len -= chunk;
1318
1319                 slot = (slot + 1) % NUM_TX_BUFF;
1320
1321                 if (last && !len)
1322                         ctrl |= MAL_TX_CTRL_LAST;
1323                 if (slot == NUM_TX_BUFF - 1)
1324                         ctrl |= MAL_TX_CTRL_WRAP;
1325
1326                 dev->tx_skb[slot] = NULL;
1327                 dev->tx_desc[slot].data_ptr = pd;
1328                 dev->tx_desc[slot].data_len = (u16) chunk;
1329                 dev->tx_desc[slot].ctrl = ctrl;
1330                 ++dev->tx_cnt;
1331
1332                 if (!len)
1333                         break;
1334
1335                 pd += chunk;
1336         }
1337         return slot;
1338 }
1339
1340 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1341 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1342 {
1343         struct emac_instance *dev = netdev_priv(ndev);
1344         int nr_frags = skb_shinfo(skb)->nr_frags;
1345         int len = skb->len, chunk;
1346         int slot, i;
1347         u16 ctrl;
1348         u32 pd;
1349
1350         /* This is common "fast" path */
1351         if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1352                 return emac_start_xmit(skb, ndev);
1353
1354         len -= skb->data_len;
1355
1356         /* Note, this is only an *estimation*, we can still run out of empty
1357          * slots because of the additional fragmentation into
1358          * MAL_MAX_TX_SIZE-sized chunks
1359          */
1360         if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1361                 goto stop_queue;
1362
1363         ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1364             emac_tx_csum(dev, skb);
1365         slot = dev->tx_slot;
1366
1367         /* skb data */
1368         dev->tx_skb[slot] = NULL;
1369         chunk = min(len, MAL_MAX_TX_SIZE);
1370         dev->tx_desc[slot].data_ptr = pd =
1371             dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1372         dev->tx_desc[slot].data_len = (u16) chunk;
1373         len -= chunk;
1374         if (unlikely(len))
1375                 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1376                                        ctrl);
1377         /* skb fragments */
1378         for (i = 0; i < nr_frags; ++i) {
1379                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1380                 len = frag->size;
1381
1382                 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1383                         goto undo_frame;
1384
1385                 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1386                                   DMA_TO_DEVICE);
1387
1388                 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1389                                        ctrl);
1390         }
1391
1392         DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1393
1394         /* Attach skb to the last slot so we don't release it too early */
1395         dev->tx_skb[slot] = skb;
1396
1397         /* Send the packet out */
1398         if (dev->tx_slot == NUM_TX_BUFF - 1)
1399                 ctrl |= MAL_TX_CTRL_WRAP;
1400         wmb();
1401         dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1402         dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1403
1404         return emac_xmit_finish(dev, skb->len);
1405
1406  undo_frame:
1407         /* Well, too bad. Our previous estimation was overly optimistic.
1408          * Undo everything.
1409          */
1410         while (slot != dev->tx_slot) {
1411                 dev->tx_desc[slot].ctrl = 0;
1412                 --dev->tx_cnt;
1413                 if (--slot < 0)
1414                         slot = NUM_TX_BUFF - 1;
1415         }
1416         ++dev->estats.tx_undo;
1417
1418  stop_queue:
1419         netif_stop_queue(ndev);
1420         DBG2(dev, "stopped TX queue" NL);
1421         return 1;
1422 }
1423
1424 /* Tx lock BHs */
1425 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1426 {
1427         struct emac_error_stats *st = &dev->estats;
1428
1429         DBG(dev, "BD TX error %04x" NL, ctrl);
1430
1431         ++st->tx_bd_errors;
1432         if (ctrl & EMAC_TX_ST_BFCS)
1433                 ++st->tx_bd_bad_fcs;
1434         if (ctrl & EMAC_TX_ST_LCS)
1435                 ++st->tx_bd_carrier_loss;
1436         if (ctrl & EMAC_TX_ST_ED)
1437                 ++st->tx_bd_excessive_deferral;
1438         if (ctrl & EMAC_TX_ST_EC)
1439                 ++st->tx_bd_excessive_collisions;
1440         if (ctrl & EMAC_TX_ST_LC)
1441                 ++st->tx_bd_late_collision;
1442         if (ctrl & EMAC_TX_ST_MC)
1443                 ++st->tx_bd_multple_collisions;
1444         if (ctrl & EMAC_TX_ST_SC)
1445                 ++st->tx_bd_single_collision;
1446         if (ctrl & EMAC_TX_ST_UR)
1447                 ++st->tx_bd_underrun;
1448         if (ctrl & EMAC_TX_ST_SQE)
1449                 ++st->tx_bd_sqe;
1450 }
1451
1452 static void emac_poll_tx(void *param)
1453 {
1454         struct emac_instance *dev = param;
1455         u32 bad_mask;
1456
1457         DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1458
1459         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1460                 bad_mask = EMAC_IS_BAD_TX_TAH;
1461         else
1462                 bad_mask = EMAC_IS_BAD_TX;
1463
1464         netif_tx_lock_bh(dev->ndev);
1465         if (dev->tx_cnt) {
1466                 u16 ctrl;
1467                 int slot = dev->ack_slot, n = 0;
1468         again:
1469                 ctrl = dev->tx_desc[slot].ctrl;
1470                 if (!(ctrl & MAL_TX_CTRL_READY)) {
1471                         struct sk_buff *skb = dev->tx_skb[slot];
1472                         ++n;
1473
1474                         if (skb) {
1475                                 dev_kfree_skb(skb);
1476                                 dev->tx_skb[slot] = NULL;
1477                         }
1478                         slot = (slot + 1) % NUM_TX_BUFF;
1479
1480                         if (unlikely(ctrl & bad_mask))
1481                                 emac_parse_tx_error(dev, ctrl);
1482
1483                         if (--dev->tx_cnt)
1484                                 goto again;
1485                 }
1486                 if (n) {
1487                         dev->ack_slot = slot;
1488                         if (netif_queue_stopped(dev->ndev) &&
1489                             dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1490                                 netif_wake_queue(dev->ndev);
1491
1492                         DBG2(dev, "tx %d pkts" NL, n);
1493                 }
1494         }
1495         netif_tx_unlock_bh(dev->ndev);
1496 }
1497
1498 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1499                                        int len)
1500 {
1501         struct sk_buff *skb = dev->rx_skb[slot];
1502
1503         DBG2(dev, "recycle %d %d" NL, slot, len);
1504
1505         if (len)
1506                 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1507                                EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1508
1509         dev->rx_desc[slot].data_len = 0;
1510         wmb();
1511         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1512             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1513 }
1514
1515 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1516 {
1517         struct emac_error_stats *st = &dev->estats;
1518
1519         DBG(dev, "BD RX error %04x" NL, ctrl);
1520
1521         ++st->rx_bd_errors;
1522         if (ctrl & EMAC_RX_ST_OE)
1523                 ++st->rx_bd_overrun;
1524         if (ctrl & EMAC_RX_ST_BP)
1525                 ++st->rx_bd_bad_packet;
1526         if (ctrl & EMAC_RX_ST_RP)
1527                 ++st->rx_bd_runt_packet;
1528         if (ctrl & EMAC_RX_ST_SE)
1529                 ++st->rx_bd_short_event;
1530         if (ctrl & EMAC_RX_ST_AE)
1531                 ++st->rx_bd_alignment_error;
1532         if (ctrl & EMAC_RX_ST_BFCS)
1533                 ++st->rx_bd_bad_fcs;
1534         if (ctrl & EMAC_RX_ST_PTL)
1535                 ++st->rx_bd_packet_too_long;
1536         if (ctrl & EMAC_RX_ST_ORE)
1537                 ++st->rx_bd_out_of_range;
1538         if (ctrl & EMAC_RX_ST_IRE)
1539                 ++st->rx_bd_in_range;
1540 }
1541
1542 static inline void emac_rx_csum(struct emac_instance *dev,
1543                                 struct sk_buff *skb, u16 ctrl)
1544 {
1545 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1546         if (!ctrl && dev->tah_dev) {
1547                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1548                 ++dev->stats.rx_packets_csum;
1549         }
1550 #endif
1551 }
1552
1553 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1554 {
1555         if (likely(dev->rx_sg_skb != NULL)) {
1556                 int len = dev->rx_desc[slot].data_len;
1557                 int tot_len = dev->rx_sg_skb->len + len;
1558
1559                 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1560                         ++dev->estats.rx_dropped_mtu;
1561                         dev_kfree_skb(dev->rx_sg_skb);
1562                         dev->rx_sg_skb = NULL;
1563                 } else {
1564                         cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1565                                          dev->rx_skb[slot]->data, len);
1566                         skb_put(dev->rx_sg_skb, len);
1567                         emac_recycle_rx_skb(dev, slot, len);
1568                         return 0;
1569                 }
1570         }
1571         emac_recycle_rx_skb(dev, slot, 0);
1572         return -1;
1573 }
1574
1575 /* NAPI poll context */
1576 static int emac_poll_rx(void *param, int budget)
1577 {
1578         struct emac_instance *dev = param;
1579         int slot = dev->rx_slot, received = 0;
1580
1581         DBG2(dev, "poll_rx(%d)" NL, budget);
1582
1583  again:
1584         while (budget > 0) {
1585                 int len;
1586                 struct sk_buff *skb;
1587                 u16 ctrl = dev->rx_desc[slot].ctrl;
1588
1589                 if (ctrl & MAL_RX_CTRL_EMPTY)
1590                         break;
1591
1592                 skb = dev->rx_skb[slot];
1593                 mb();
1594                 len = dev->rx_desc[slot].data_len;
1595
1596                 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1597                         goto sg;
1598
1599                 ctrl &= EMAC_BAD_RX_MASK;
1600                 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1601                         emac_parse_rx_error(dev, ctrl);
1602                         ++dev->estats.rx_dropped_error;
1603                         emac_recycle_rx_skb(dev, slot, 0);
1604                         len = 0;
1605                         goto next;
1606                 }
1607
1608                 if (len && len < EMAC_RX_COPY_THRESH) {
1609                         struct sk_buff *copy_skb =
1610                             alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1611                         if (unlikely(!copy_skb))
1612                                 goto oom;
1613
1614                         skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1615                         cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1616                                          len + 2);
1617                         emac_recycle_rx_skb(dev, slot, len);
1618                         skb = copy_skb;
1619                 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1620                         goto oom;
1621
1622                 skb_put(skb, len);
1623         push_packet:
1624                 skb->dev = dev->ndev;
1625                 skb->protocol = eth_type_trans(skb, dev->ndev);
1626                 emac_rx_csum(dev, skb, ctrl);
1627
1628                 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1629                         ++dev->estats.rx_dropped_stack;
1630         next:
1631                 ++dev->stats.rx_packets;
1632         skip:
1633                 dev->stats.rx_bytes += len;
1634                 slot = (slot + 1) % NUM_RX_BUFF;
1635                 --budget;
1636                 ++received;
1637                 continue;
1638         sg:
1639                 if (ctrl & MAL_RX_CTRL_FIRST) {
1640                         BUG_ON(dev->rx_sg_skb);
1641                         if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1642                                 DBG(dev, "rx OOM %d" NL, slot);
1643                                 ++dev->estats.rx_dropped_oom;
1644                                 emac_recycle_rx_skb(dev, slot, 0);
1645                         } else {
1646                                 dev->rx_sg_skb = skb;
1647                                 skb_put(skb, len);
1648                         }
1649                 } else if (!emac_rx_sg_append(dev, slot) &&
1650                            (ctrl & MAL_RX_CTRL_LAST)) {
1651
1652                         skb = dev->rx_sg_skb;
1653                         dev->rx_sg_skb = NULL;
1654
1655                         ctrl &= EMAC_BAD_RX_MASK;
1656                         if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1657                                 emac_parse_rx_error(dev, ctrl);
1658                                 ++dev->estats.rx_dropped_error;
1659                                 dev_kfree_skb(skb);
1660                                 len = 0;
1661                         } else
1662                                 goto push_packet;
1663                 }
1664                 goto skip;
1665         oom:
1666                 DBG(dev, "rx OOM %d" NL, slot);
1667                 /* Drop the packet and recycle skb */
1668                 ++dev->estats.rx_dropped_oom;
1669                 emac_recycle_rx_skb(dev, slot, 0);
1670                 goto next;
1671         }
1672
1673         if (received) {
1674                 DBG2(dev, "rx %d BDs" NL, received);
1675                 dev->rx_slot = slot;
1676         }
1677
1678         if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1679                 mb();
1680                 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1681                         DBG2(dev, "rx restart" NL);
1682                         received = 0;
1683                         goto again;
1684                 }
1685
1686                 if (dev->rx_sg_skb) {
1687                         DBG2(dev, "dropping partial rx packet" NL);
1688                         ++dev->estats.rx_dropped_error;
1689                         dev_kfree_skb(dev->rx_sg_skb);
1690                         dev->rx_sg_skb = NULL;
1691                 }
1692
1693                 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1694                 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1695                 emac_rx_enable(dev);
1696                 dev->rx_slot = 0;
1697         }
1698         return received;
1699 }
1700
1701 /* NAPI poll context */
1702 static int emac_peek_rx(void *param)
1703 {
1704         struct emac_instance *dev = param;
1705
1706         return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1707 }
1708
1709 /* NAPI poll context */
1710 static int emac_peek_rx_sg(void *param)
1711 {
1712         struct emac_instance *dev = param;
1713
1714         int slot = dev->rx_slot;
1715         while (1) {
1716                 u16 ctrl = dev->rx_desc[slot].ctrl;
1717                 if (ctrl & MAL_RX_CTRL_EMPTY)
1718                         return 0;
1719                 else if (ctrl & MAL_RX_CTRL_LAST)
1720                         return 1;
1721
1722                 slot = (slot + 1) % NUM_RX_BUFF;
1723
1724                 /* I'm just being paranoid here :) */
1725                 if (unlikely(slot == dev->rx_slot))
1726                         return 0;
1727         }
1728 }
1729
1730 /* Hard IRQ */
1731 static void emac_rxde(void *param)
1732 {
1733         struct emac_instance *dev = param;
1734
1735         ++dev->estats.rx_stopped;
1736         emac_rx_disable_async(dev);
1737 }
1738
1739 /* Hard IRQ */
1740 static irqreturn_t emac_irq(int irq, void *dev_instance)
1741 {
1742         struct emac_instance *dev = dev_instance;
1743         struct emac_regs __iomem *p = dev->emacp;
1744         struct emac_error_stats *st = &dev->estats;
1745         u32 isr;
1746
1747         spin_lock(&dev->lock);
1748
1749         isr = in_be32(&p->isr);
1750         out_be32(&p->isr, isr);
1751
1752         DBG(dev, "isr = %08x" NL, isr);
1753
1754         if (isr & EMAC4_ISR_TXPE)
1755                 ++st->tx_parity;
1756         if (isr & EMAC4_ISR_RXPE)
1757                 ++st->rx_parity;
1758         if (isr & EMAC4_ISR_TXUE)
1759                 ++st->tx_underrun;
1760         if (isr & EMAC4_ISR_RXOE)
1761                 ++st->rx_fifo_overrun;
1762         if (isr & EMAC_ISR_OVR)
1763                 ++st->rx_overrun;
1764         if (isr & EMAC_ISR_BP)
1765                 ++st->rx_bad_packet;
1766         if (isr & EMAC_ISR_RP)
1767                 ++st->rx_runt_packet;
1768         if (isr & EMAC_ISR_SE)
1769                 ++st->rx_short_event;
1770         if (isr & EMAC_ISR_ALE)
1771                 ++st->rx_alignment_error;
1772         if (isr & EMAC_ISR_BFCS)
1773                 ++st->rx_bad_fcs;
1774         if (isr & EMAC_ISR_PTLE)
1775                 ++st->rx_packet_too_long;
1776         if (isr & EMAC_ISR_ORE)
1777                 ++st->rx_out_of_range;
1778         if (isr & EMAC_ISR_IRE)
1779                 ++st->rx_in_range;
1780         if (isr & EMAC_ISR_SQE)
1781                 ++st->tx_sqe;
1782         if (isr & EMAC_ISR_TE)
1783                 ++st->tx_errors;
1784
1785         spin_unlock(&dev->lock);
1786
1787         return IRQ_HANDLED;
1788 }
1789
1790 static struct net_device_stats *emac_stats(struct net_device *ndev)
1791 {
1792         struct emac_instance *dev = netdev_priv(ndev);
1793         struct emac_stats *st = &dev->stats;
1794         struct emac_error_stats *est = &dev->estats;
1795         struct net_device_stats *nst = &dev->nstats;
1796         unsigned long flags;
1797
1798         DBG2(dev, "stats" NL);
1799
1800         /* Compute "legacy" statistics */
1801         spin_lock_irqsave(&dev->lock, flags);
1802         nst->rx_packets = (unsigned long)st->rx_packets;
1803         nst->rx_bytes = (unsigned long)st->rx_bytes;
1804         nst->tx_packets = (unsigned long)st->tx_packets;
1805         nst->tx_bytes = (unsigned long)st->tx_bytes;
1806         nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1807                                           est->rx_dropped_error +
1808                                           est->rx_dropped_resize +
1809                                           est->rx_dropped_mtu);
1810         nst->tx_dropped = (unsigned long)est->tx_dropped;
1811
1812         nst->rx_errors = (unsigned long)est->rx_bd_errors;
1813         nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1814                                               est->rx_fifo_overrun +
1815                                               est->rx_overrun);
1816         nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1817                                                est->rx_alignment_error);
1818         nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1819                                              est->rx_bad_fcs);
1820         nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1821                                                 est->rx_bd_short_event +
1822                                                 est->rx_bd_packet_too_long +
1823                                                 est->rx_bd_out_of_range +
1824                                                 est->rx_bd_in_range +
1825                                                 est->rx_runt_packet +
1826                                                 est->rx_short_event +
1827                                                 est->rx_packet_too_long +
1828                                                 est->rx_out_of_range +
1829                                                 est->rx_in_range);
1830
1831         nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1832         nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1833                                               est->tx_underrun);
1834         nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1835         nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1836                                           est->tx_bd_excessive_collisions +
1837                                           est->tx_bd_late_collision +
1838                                           est->tx_bd_multple_collisions);
1839         spin_unlock_irqrestore(&dev->lock, flags);
1840         return nst;
1841 }
1842
1843 static struct mal_commac_ops emac_commac_ops = {
1844         .poll_tx = &emac_poll_tx,
1845         .poll_rx = &emac_poll_rx,
1846         .peek_rx = &emac_peek_rx,
1847         .rxde = &emac_rxde,
1848 };
1849
1850 static struct mal_commac_ops emac_commac_sg_ops = {
1851         .poll_tx = &emac_poll_tx,
1852         .poll_rx = &emac_poll_rx,
1853         .peek_rx = &emac_peek_rx_sg,
1854         .rxde = &emac_rxde,
1855 };
1856
1857 /* Ethtool support */
1858 static int emac_ethtool_get_settings(struct net_device *ndev,
1859                                      struct ethtool_cmd *cmd)
1860 {
1861         struct emac_instance *dev = netdev_priv(ndev);
1862
1863         cmd->supported = dev->phy.features;
1864         cmd->port = PORT_MII;
1865         cmd->phy_address = dev->phy.address;
1866         cmd->transceiver =
1867             dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1868
1869         mutex_lock(&dev->link_lock);
1870         cmd->advertising = dev->phy.advertising;
1871         cmd->autoneg = dev->phy.autoneg;
1872         cmd->speed = dev->phy.speed;
1873         cmd->duplex = dev->phy.duplex;
1874         mutex_unlock(&dev->link_lock);
1875
1876         return 0;
1877 }
1878
1879 static int emac_ethtool_set_settings(struct net_device *ndev,
1880                                      struct ethtool_cmd *cmd)
1881 {
1882         struct emac_instance *dev = netdev_priv(ndev);
1883         u32 f = dev->phy.features;
1884
1885         DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1886             cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1887
1888         /* Basic sanity checks */
1889         if (dev->phy.address < 0)
1890                 return -EOPNOTSUPP;
1891         if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1892                 return -EINVAL;
1893         if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1894                 return -EINVAL;
1895         if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1896                 return -EINVAL;
1897
1898         if (cmd->autoneg == AUTONEG_DISABLE) {
1899                 switch (cmd->speed) {
1900                 case SPEED_10:
1901                         if (cmd->duplex == DUPLEX_HALF
1902                             && !(f & SUPPORTED_10baseT_Half))
1903                                 return -EINVAL;
1904                         if (cmd->duplex == DUPLEX_FULL
1905                             && !(f & SUPPORTED_10baseT_Full))
1906                                 return -EINVAL;
1907                         break;
1908                 case SPEED_100:
1909                         if (cmd->duplex == DUPLEX_HALF
1910                             && !(f & SUPPORTED_100baseT_Half))
1911                                 return -EINVAL;
1912                         if (cmd->duplex == DUPLEX_FULL
1913                             && !(f & SUPPORTED_100baseT_Full))
1914                                 return -EINVAL;
1915                         break;
1916                 case SPEED_1000:
1917                         if (cmd->duplex == DUPLEX_HALF
1918                             && !(f & SUPPORTED_1000baseT_Half))
1919                                 return -EINVAL;
1920                         if (cmd->duplex == DUPLEX_FULL
1921                             && !(f & SUPPORTED_1000baseT_Full))
1922                                 return -EINVAL;
1923                         break;
1924                 default:
1925                         return -EINVAL;
1926                 }
1927
1928                 mutex_lock(&dev->link_lock);
1929                 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1930                                                 cmd->duplex);
1931                 mutex_unlock(&dev->link_lock);
1932
1933         } else {
1934                 if (!(f & SUPPORTED_Autoneg))
1935                         return -EINVAL;
1936
1937                 mutex_lock(&dev->link_lock);
1938                 dev->phy.def->ops->setup_aneg(&dev->phy,
1939                                               (cmd->advertising & f) |
1940                                               (dev->phy.advertising &
1941                                                (ADVERTISED_Pause |
1942                                                 ADVERTISED_Asym_Pause)));
1943                 mutex_unlock(&dev->link_lock);
1944         }
1945         emac_force_link_update(dev);
1946
1947         return 0;
1948 }
1949
1950 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1951                                        struct ethtool_ringparam *rp)
1952 {
1953         rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1954         rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1955 }
1956
1957 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1958                                         struct ethtool_pauseparam *pp)
1959 {
1960         struct emac_instance *dev = netdev_priv(ndev);
1961
1962         mutex_lock(&dev->link_lock);
1963         if ((dev->phy.features & SUPPORTED_Autoneg) &&
1964             (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1965                 pp->autoneg = 1;
1966
1967         if (dev->phy.duplex == DUPLEX_FULL) {
1968                 if (dev->phy.pause)
1969                         pp->rx_pause = pp->tx_pause = 1;
1970                 else if (dev->phy.asym_pause)
1971                         pp->tx_pause = 1;
1972         }
1973         mutex_unlock(&dev->link_lock);
1974 }
1975
1976 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1977 {
1978         struct emac_instance *dev = netdev_priv(ndev);
1979
1980         return dev->tah_dev != NULL;
1981 }
1982
1983 static int emac_get_regs_len(struct emac_instance *dev)
1984 {
1985         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1986                 return sizeof(struct emac_ethtool_regs_subhdr) +
1987                         EMAC4_ETHTOOL_REGS_SIZE;
1988         else
1989                 return sizeof(struct emac_ethtool_regs_subhdr) +
1990                         EMAC_ETHTOOL_REGS_SIZE;
1991 }
1992
1993 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1994 {
1995         struct emac_instance *dev = netdev_priv(ndev);
1996         int size;
1997
1998         size = sizeof(struct emac_ethtool_regs_hdr) +
1999                 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2000         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2001                 size += zmii_get_regs_len(dev->zmii_dev);
2002         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2003                 size += rgmii_get_regs_len(dev->rgmii_dev);
2004         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2005                 size += tah_get_regs_len(dev->tah_dev);
2006
2007         return size;
2008 }
2009
2010 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2011 {
2012         struct emac_ethtool_regs_subhdr *hdr = buf;
2013
2014         hdr->index = dev->cell_index;
2015         if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2016                 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2017                 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE);
2018                 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE);
2019         } else {
2020                 hdr->version = EMAC_ETHTOOL_REGS_VER;
2021                 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
2022                 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
2023         }
2024 }
2025
2026 static void emac_ethtool_get_regs(struct net_device *ndev,
2027                                   struct ethtool_regs *regs, void *buf)
2028 {
2029         struct emac_instance *dev = netdev_priv(ndev);
2030         struct emac_ethtool_regs_hdr *hdr = buf;
2031
2032         hdr->components = 0;
2033         buf = hdr + 1;
2034
2035         buf = mal_dump_regs(dev->mal, buf);
2036         buf = emac_dump_regs(dev, buf);
2037         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2038                 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2039                 buf = zmii_dump_regs(dev->zmii_dev, buf);
2040         }
2041         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2042                 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2043                 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2044         }
2045         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2046                 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2047                 buf = tah_dump_regs(dev->tah_dev, buf);
2048         }
2049 }
2050
2051 static int emac_ethtool_nway_reset(struct net_device *ndev)
2052 {
2053         struct emac_instance *dev = netdev_priv(ndev);
2054         int res = 0;
2055
2056         DBG(dev, "nway_reset" NL);
2057
2058         if (dev->phy.address < 0)
2059                 return -EOPNOTSUPP;
2060
2061         mutex_lock(&dev->link_lock);
2062         if (!dev->phy.autoneg) {
2063                 res = -EINVAL;
2064                 goto out;
2065         }
2066
2067         dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2068  out:
2069         mutex_unlock(&dev->link_lock);
2070         emac_force_link_update(dev);
2071         return res;
2072 }
2073
2074 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2075 {
2076         return EMAC_ETHTOOL_STATS_COUNT;
2077 }
2078
2079 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2080                                      u8 * buf)
2081 {
2082         if (stringset == ETH_SS_STATS)
2083                 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2084 }
2085
2086 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2087                                            struct ethtool_stats *estats,
2088                                            u64 * tmp_stats)
2089 {
2090         struct emac_instance *dev = netdev_priv(ndev);
2091
2092         memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2093         tmp_stats += sizeof(dev->stats) / sizeof(u64);
2094         memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2095 }
2096
2097 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2098                                      struct ethtool_drvinfo *info)
2099 {
2100         struct emac_instance *dev = netdev_priv(ndev);
2101
2102         strcpy(info->driver, "ibm_emac");
2103         strcpy(info->version, DRV_VERSION);
2104         info->fw_version[0] = '\0';
2105         sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2106                 dev->cell_index, dev->ofdev->node->full_name);
2107         info->n_stats = emac_ethtool_get_stats_count(ndev);
2108         info->regdump_len = emac_ethtool_get_regs_len(ndev);
2109 }
2110
2111 static const struct ethtool_ops emac_ethtool_ops = {
2112         .get_settings = emac_ethtool_get_settings,
2113         .set_settings = emac_ethtool_set_settings,
2114         .get_drvinfo = emac_ethtool_get_drvinfo,
2115
2116         .get_regs_len = emac_ethtool_get_regs_len,
2117         .get_regs = emac_ethtool_get_regs,
2118
2119         .nway_reset = emac_ethtool_nway_reset,
2120
2121         .get_ringparam = emac_ethtool_get_ringparam,
2122         .get_pauseparam = emac_ethtool_get_pauseparam,
2123
2124         .get_rx_csum = emac_ethtool_get_rx_csum,
2125
2126         .get_strings = emac_ethtool_get_strings,
2127         .get_stats_count = emac_ethtool_get_stats_count,
2128         .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2129
2130         .get_link = ethtool_op_get_link,
2131         .get_tx_csum = ethtool_op_get_tx_csum,
2132         .get_sg = ethtool_op_get_sg,
2133 };
2134
2135 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2136 {
2137         struct emac_instance *dev = netdev_priv(ndev);
2138         uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2139
2140         DBG(dev, "ioctl %08x" NL, cmd);
2141
2142         if (dev->phy.address < 0)
2143                 return -EOPNOTSUPP;
2144
2145         switch (cmd) {
2146         case SIOCGMIIPHY:
2147         case SIOCDEVPRIVATE:
2148                 data[0] = dev->phy.address;
2149                 /* Fall through */
2150         case SIOCGMIIREG:
2151         case SIOCDEVPRIVATE + 1:
2152                 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2153                 return 0;
2154
2155         case SIOCSMIIREG:
2156         case SIOCDEVPRIVATE + 2:
2157                 if (!capable(CAP_NET_ADMIN))
2158                         return -EPERM;
2159                 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2160                 return 0;
2161         default:
2162                 return -EOPNOTSUPP;
2163         }
2164 }
2165
2166 struct emac_depentry {
2167         u32                     phandle;
2168         struct device_node      *node;
2169         struct of_device        *ofdev;
2170         void                    *drvdata;
2171 };
2172
2173 #define EMAC_DEP_MAL_IDX        0
2174 #define EMAC_DEP_ZMII_IDX       1
2175 #define EMAC_DEP_RGMII_IDX      2
2176 #define EMAC_DEP_TAH_IDX        3
2177 #define EMAC_DEP_MDIO_IDX       4
2178 #define EMAC_DEP_PREV_IDX       5
2179 #define EMAC_DEP_COUNT          6
2180
2181 static int __devinit emac_check_deps(struct emac_instance *dev,
2182                                      struct emac_depentry *deps)
2183 {
2184         int i, there = 0;
2185         struct device_node *np;
2186
2187         for (i = 0; i < EMAC_DEP_COUNT; i++) {
2188                 /* no dependency on that item, allright */
2189                 if (deps[i].phandle == 0) {
2190                         there++;
2191                         continue;
2192                 }
2193                 /* special case for blist as the dependency might go away */
2194                 if (i == EMAC_DEP_PREV_IDX) {
2195                         np = *(dev->blist - 1);
2196                         if (np == NULL) {
2197                                 deps[i].phandle = 0;
2198                                 there++;
2199                                 continue;
2200                         }
2201                         if (deps[i].node == NULL)
2202                                 deps[i].node = of_node_get(np);
2203                 }
2204                 if (deps[i].node == NULL)
2205                         deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2206                 if (deps[i].node == NULL)
2207                         continue;
2208                 if (deps[i].ofdev == NULL)
2209                         deps[i].ofdev = of_find_device_by_node(deps[i].node);
2210                 if (deps[i].ofdev == NULL)
2211                         continue;
2212                 if (deps[i].drvdata == NULL)
2213                         deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2214                 if (deps[i].drvdata != NULL)
2215                         there++;
2216         }
2217         return (there == EMAC_DEP_COUNT);
2218 }
2219
2220 static void emac_put_deps(struct emac_instance *dev)
2221 {
2222         if (dev->mal_dev)
2223                 of_dev_put(dev->mal_dev);
2224         if (dev->zmii_dev)
2225                 of_dev_put(dev->zmii_dev);
2226         if (dev->rgmii_dev)
2227                 of_dev_put(dev->rgmii_dev);
2228         if (dev->mdio_dev)
2229                 of_dev_put(dev->mdio_dev);
2230         if (dev->tah_dev)
2231                 of_dev_put(dev->tah_dev);
2232 }
2233
2234 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2235                                         unsigned long action, void *data)
2236 {
2237         /* We are only intereted in device addition */
2238         if (action == BUS_NOTIFY_BOUND_DRIVER)
2239                 wake_up_all(&emac_probe_wait);
2240         return 0;
2241 }
2242
2243 static struct notifier_block emac_of_bus_notifier __devinitdata = {
2244         .notifier_call = emac_of_bus_notify
2245 };
2246
2247 static int __devinit emac_wait_deps(struct emac_instance *dev)
2248 {
2249         struct emac_depentry deps[EMAC_DEP_COUNT];
2250         int i, err;
2251
2252         memset(&deps, 0, sizeof(deps));
2253
2254         deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2255         deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2256         deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2257         if (dev->tah_ph)
2258                 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2259         if (dev->mdio_ph)
2260                 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2261         if (dev->blist && dev->blist > emac_boot_list)
2262                 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2263         bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2264         wait_event_timeout(emac_probe_wait,
2265                            emac_check_deps(dev, deps),
2266                            EMAC_PROBE_DEP_TIMEOUT);
2267         bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2268         err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2269         for (i = 0; i < EMAC_DEP_COUNT; i++) {
2270                 if (deps[i].node)
2271                         of_node_put(deps[i].node);
2272                 if (err && deps[i].ofdev)
2273                         of_dev_put(deps[i].ofdev);
2274         }
2275         if (err == 0) {
2276                 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2277                 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2278                 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2279                 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2280                 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2281         }
2282         if (deps[EMAC_DEP_PREV_IDX].ofdev)
2283                 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2284         return err;
2285 }
2286
2287 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2288                                          u32 *val, int fatal)
2289 {
2290         int len;
2291         const u32 *prop = of_get_property(np, name, &len);
2292         if (prop == NULL || len < sizeof(u32)) {
2293                 if (fatal)
2294                         printk(KERN_ERR "%s: missing %s property\n",
2295                                np->full_name, name);
2296                 return -ENODEV;
2297         }
2298         *val = *prop;
2299         return 0;
2300 }
2301
2302 static int __devinit emac_init_phy(struct emac_instance *dev)
2303 {
2304         struct device_node *np = dev->ofdev->node;
2305         struct net_device *ndev = dev->ndev;
2306         u32 phy_map, adv;
2307         int i;
2308
2309         dev->phy.dev = ndev;
2310         dev->phy.mode = dev->phy_mode;
2311
2312         /* PHY-less configuration.
2313          * XXX I probably should move these settings to the dev tree
2314          */
2315         if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2316                 emac_reset(dev);
2317
2318                 /* PHY-less configuration.
2319                  * XXX I probably should move these settings to the dev tree
2320                  */
2321                 dev->phy.address = -1;
2322                 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2323                 dev->phy.pause = 1;
2324
2325                 return 0;
2326         }
2327
2328         mutex_lock(&emac_phy_map_lock);
2329         phy_map = dev->phy_map | busy_phy_map;
2330
2331         DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2332
2333         dev->phy.mdio_read = emac_mdio_read;
2334         dev->phy.mdio_write = emac_mdio_write;
2335
2336         /* Configure EMAC with defaults so we can at least use MDIO
2337          * This is needed mostly for 440GX
2338          */
2339         if (emac_phy_gpcs(dev->phy.mode)) {
2340                 /* XXX
2341                  * Make GPCS PHY address equal to EMAC index.
2342                  * We probably should take into account busy_phy_map
2343                  * and/or phy_map here.
2344                  *
2345                  * Note that the busy_phy_map is currently global
2346                  * while it should probably be per-ASIC...
2347                  */
2348                 dev->phy.address = dev->cell_index;
2349         }
2350
2351         emac_configure(dev);
2352
2353         if (dev->phy_address != 0xffffffff)
2354                 phy_map = ~(1 << dev->phy_address);
2355
2356         for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2357                 if (!(phy_map & 1)) {
2358                         int r;
2359                         busy_phy_map |= 1 << i;
2360
2361                         /* Quick check if there is a PHY at the address */
2362                         r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2363                         if (r == 0xffff || r < 0)
2364                                 continue;
2365                         if (!emac_mii_phy_probe(&dev->phy, i))
2366                                 break;
2367                 }
2368         mutex_unlock(&emac_phy_map_lock);
2369         if (i == 0x20) {
2370                 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2371                 return -ENXIO;
2372         }
2373
2374         /* Init PHY */
2375         if (dev->phy.def->ops->init)
2376                 dev->phy.def->ops->init(&dev->phy);
2377
2378         /* Disable any PHY features not supported by the platform */
2379         dev->phy.def->features &= ~dev->phy_feat_exc;
2380
2381         /* Setup initial link parameters */
2382         if (dev->phy.features & SUPPORTED_Autoneg) {
2383                 adv = dev->phy.features;
2384                 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2385                         adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2386                 /* Restart autonegotiation */
2387                 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2388         } else {
2389                 u32 f = dev->phy.def->features;
2390                 int speed = SPEED_10, fd = DUPLEX_HALF;
2391
2392                 /* Select highest supported speed/duplex */
2393                 if (f & SUPPORTED_1000baseT_Full) {
2394                         speed = SPEED_1000;
2395                         fd = DUPLEX_FULL;
2396                 } else if (f & SUPPORTED_1000baseT_Half)
2397                         speed = SPEED_1000;
2398                 else if (f & SUPPORTED_100baseT_Full) {
2399                         speed = SPEED_100;
2400                         fd = DUPLEX_FULL;
2401                 } else if (f & SUPPORTED_100baseT_Half)
2402                         speed = SPEED_100;
2403                 else if (f & SUPPORTED_10baseT_Full)
2404                         fd = DUPLEX_FULL;
2405
2406                 /* Force link parameters */
2407                 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2408         }
2409         return 0;
2410 }
2411
2412 static int __devinit emac_init_config(struct emac_instance *dev)
2413 {
2414         struct device_node *np = dev->ofdev->node;
2415         const void *p;
2416         unsigned int plen;
2417         const char *pm, *phy_modes[] = {
2418                 [PHY_MODE_NA] = "",
2419                 [PHY_MODE_MII] = "mii",
2420                 [PHY_MODE_RMII] = "rmii",
2421                 [PHY_MODE_SMII] = "smii",
2422                 [PHY_MODE_RGMII] = "rgmii",
2423                 [PHY_MODE_TBI] = "tbi",
2424                 [PHY_MODE_GMII] = "gmii",
2425                 [PHY_MODE_RTBI] = "rtbi",
2426                 [PHY_MODE_SGMII] = "sgmii",
2427         };
2428
2429         /* Read config from device-tree */
2430         if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2431                 return -ENXIO;
2432         if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2433                 return -ENXIO;
2434         if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2435                 return -ENXIO;
2436         if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2437                 return -ENXIO;
2438         if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2439                 dev->max_mtu = 1500;
2440         if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2441                 dev->rx_fifo_size = 2048;
2442         if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2443                 dev->tx_fifo_size = 2048;
2444         if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2445                 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2446         if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2447                 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2448         if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2449                 dev->phy_address = 0xffffffff;
2450         if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2451                 dev->phy_map = 0xffffffff;
2452         if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2453                 return -ENXIO;
2454         if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2455                 dev->tah_ph = 0;
2456         if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2457                 dev->tah_port = 0;
2458         if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2459                 dev->mdio_ph = 0;
2460         if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2461                 dev->zmii_ph = 0;;
2462         if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2463                 dev->zmii_port = 0xffffffff;;
2464         if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2465                 dev->rgmii_ph = 0;;
2466         if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2467                 dev->rgmii_port = 0xffffffff;;
2468         if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2469                 dev->fifo_entry_size = 16;
2470         if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2471                 dev->mal_burst_size = 256;
2472
2473         /* PHY mode needs some decoding */
2474         dev->phy_mode = PHY_MODE_NA;
2475         pm = of_get_property(np, "phy-mode", &plen);
2476         if (pm != NULL) {
2477                 int i;
2478                 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2479                         if (!strcasecmp(pm, phy_modes[i])) {
2480                                 dev->phy_mode = i;
2481                                 break;
2482                         }
2483         }
2484
2485         /* Backward compat with non-final DT */
2486         if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2487                 u32 nmode = *(const u32 *)pm;
2488                 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2489                         dev->phy_mode = nmode;
2490         }
2491
2492         /* Check EMAC version */
2493         if (of_device_is_compatible(np, "ibm,emac4"))
2494                 dev->features |= EMAC_FTR_EMAC4;
2495
2496         /* Fixup some feature bits based on the device tree */
2497         if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2498                 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2499         if (of_get_property(np, "has-new-stacr-staopc", NULL))
2500                 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2501
2502         /* CAB lacks the appropriate properties */
2503         if (of_device_is_compatible(np, "ibm,emac-axon"))
2504                 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2505                         EMAC_FTR_STACR_OC_INVERT;
2506
2507         /* Enable TAH/ZMII/RGMII features as found */
2508         if (dev->tah_ph != 0) {
2509 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2510                 dev->features |= EMAC_FTR_HAS_TAH;
2511 #else
2512                 printk(KERN_ERR "%s: TAH support not enabled !\n",
2513                        np->full_name);
2514                 return -ENXIO;
2515 #endif
2516         }
2517
2518         if (dev->zmii_ph != 0) {
2519 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2520                 dev->features |= EMAC_FTR_HAS_ZMII;
2521 #else
2522                 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2523                        np->full_name);
2524                 return -ENXIO;
2525 #endif
2526         }
2527
2528         if (dev->rgmii_ph != 0) {
2529 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2530                 dev->features |= EMAC_FTR_HAS_RGMII;
2531 #else
2532                 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2533                        np->full_name);
2534                 return -ENXIO;
2535 #endif
2536         }
2537
2538         /* Read MAC-address */
2539         p = of_get_property(np, "local-mac-address", NULL);
2540         if (p == NULL) {
2541                 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2542                        np->full_name);
2543                 return -ENXIO;
2544         }
2545         memcpy(dev->ndev->dev_addr, p, 6);
2546
2547         DBG(dev, "features     : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2548         DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2549         DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2550         DBG(dev, "max_mtu      : %d\n", dev->max_mtu);
2551         DBG(dev, "OPB freq     : %d\n", dev->opb_bus_freq);
2552
2553         return 0;
2554 }
2555
2556 static int __devinit emac_probe(struct of_device *ofdev,
2557                                 const struct of_device_id *match)
2558 {
2559         struct net_device *ndev;
2560         struct emac_instance *dev;
2561         struct device_node *np = ofdev->node;
2562         struct device_node **blist = NULL;
2563         int err, i;
2564
2565         /* Skip unused/unwired EMACS.  We leave the check for an unused
2566          * property here for now, but new flat device trees should set a
2567          * status property to "disabled" instead.
2568          */
2569         if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2570                 return -ENODEV;
2571
2572         /* Find ourselves in the bootlist if we are there */
2573         for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2574                 if (emac_boot_list[i] == np)
2575                         blist = &emac_boot_list[i];
2576
2577         /* Allocate our net_device structure */
2578         err = -ENOMEM;
2579         ndev = alloc_etherdev(sizeof(struct emac_instance));
2580         if (!ndev) {
2581                 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2582                        np->full_name);
2583                 goto err_gone;
2584         }
2585         dev = netdev_priv(ndev);
2586         dev->ndev = ndev;
2587         dev->ofdev = ofdev;
2588         dev->blist = blist;
2589         SET_NETDEV_DEV(ndev, &ofdev->dev);
2590
2591         /* Initialize some embedded data structures */
2592         mutex_init(&dev->mdio_lock);
2593         mutex_init(&dev->link_lock);
2594         spin_lock_init(&dev->lock);
2595         INIT_WORK(&dev->reset_work, emac_reset_work);
2596
2597         /* Init various config data based on device-tree */
2598         err = emac_init_config(dev);
2599         if (err != 0)
2600                 goto err_free;
2601
2602         /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2603         dev->emac_irq = irq_of_parse_and_map(np, 0);
2604         dev->wol_irq = irq_of_parse_and_map(np, 1);
2605         if (dev->emac_irq == NO_IRQ) {
2606                 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2607                 goto err_free;
2608         }
2609         ndev->irq = dev->emac_irq;
2610
2611         /* Map EMAC regs */
2612         if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2613                 printk(KERN_ERR "%s: Can't get registers address\n",
2614                        np->full_name);
2615                 goto err_irq_unmap;
2616         }
2617         // TODO : request_mem_region
2618         dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs));
2619         if (dev->emacp == NULL) {
2620                 printk(KERN_ERR "%s: Can't map device registers!\n",
2621                        np->full_name);
2622                 err = -ENOMEM;
2623                 goto err_irq_unmap;
2624         }
2625
2626         /* Wait for dependent devices */
2627         err = emac_wait_deps(dev);
2628         if (err) {
2629                 printk(KERN_ERR
2630                        "%s: Timeout waiting for dependent devices\n",
2631                        np->full_name);
2632                 /*  display more info about what's missing ? */
2633                 goto err_reg_unmap;
2634         }
2635         dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2636         if (dev->mdio_dev != NULL)
2637                 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2638
2639         /* Register with MAL */
2640         dev->commac.ops = &emac_commac_ops;
2641         dev->commac.dev = dev;
2642         dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2643         dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2644         err = mal_register_commac(dev->mal, &dev->commac);
2645         if (err) {
2646                 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2647                        np->full_name, dev->mal_dev->node->full_name);
2648                 goto err_rel_deps;
2649         }
2650         dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2651         dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2652
2653         /* Get pointers to BD rings */
2654         dev->tx_desc =
2655             dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2656         dev->rx_desc =
2657             dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2658
2659         DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2660         DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2661
2662         /* Clean rings */
2663         memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2664         memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2665
2666         /* Attach to ZMII, if needed */
2667         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2668             (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2669                 goto err_unreg_commac;
2670
2671         /* Attach to RGMII, if needed */
2672         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2673             (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2674                 goto err_detach_zmii;
2675
2676         /* Attach to TAH, if needed */
2677         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2678             (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2679                 goto err_detach_rgmii;
2680
2681         /* Set some link defaults before we can find out real parameters */
2682         dev->phy.speed = SPEED_100;
2683         dev->phy.duplex = DUPLEX_FULL;
2684         dev->phy.autoneg = AUTONEG_DISABLE;
2685         dev->phy.pause = dev->phy.asym_pause = 0;
2686         dev->stop_timeout = STOP_TIMEOUT_100;
2687         INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2688
2689         /* Find PHY if any */
2690         err = emac_init_phy(dev);
2691         if (err != 0)
2692                 goto err_detach_tah;
2693
2694         /* Fill in the driver function table */
2695         ndev->open = &emac_open;
2696         if (dev->tah_dev)
2697                 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2698         ndev->tx_timeout = &emac_tx_timeout;
2699         ndev->watchdog_timeo = 5 * HZ;
2700         ndev->stop = &emac_close;
2701         ndev->get_stats = &emac_stats;
2702         ndev->set_multicast_list = &emac_set_multicast_list;
2703         ndev->do_ioctl = &emac_ioctl;
2704         if (emac_phy_supports_gige(dev->phy_mode)) {
2705                 ndev->hard_start_xmit = &emac_start_xmit_sg;
2706                 ndev->change_mtu = &emac_change_mtu;
2707                 dev->commac.ops = &emac_commac_sg_ops;
2708         } else {
2709                 ndev->hard_start_xmit = &emac_start_xmit;
2710         }
2711         SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2712
2713         netif_carrier_off(ndev);
2714         netif_stop_queue(ndev);
2715
2716         err = register_netdev(ndev);
2717         if (err) {
2718                 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2719                        np->full_name, err);
2720                 goto err_detach_tah;
2721         }
2722
2723         /* Set our drvdata last as we don't want them visible until we are
2724          * fully initialized
2725          */
2726         wmb();
2727         dev_set_drvdata(&ofdev->dev, dev);
2728
2729         /* There's a new kid in town ! Let's tell everybody */
2730         wake_up_all(&emac_probe_wait);
2731
2732
2733         printk(KERN_INFO
2734                "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2735                ndev->name, dev->cell_index, np->full_name,
2736                ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2737                ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2738
2739         if (dev->phy.address >= 0)
2740                 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2741                        dev->phy.def->name, dev->phy.address);
2742
2743         emac_dbg_register(dev);
2744
2745         /* Life is good */
2746         return 0;
2747
2748         /* I have a bad feeling about this ... */
2749
2750  err_detach_tah:
2751         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2752                 tah_detach(dev->tah_dev, dev->tah_port);
2753  err_detach_rgmii:
2754         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2755                 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2756  err_detach_zmii:
2757         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2758                 zmii_detach(dev->zmii_dev, dev->zmii_port);
2759  err_unreg_commac:
2760         mal_unregister_commac(dev->mal, &dev->commac);
2761  err_rel_deps:
2762         emac_put_deps(dev);
2763  err_reg_unmap:
2764         iounmap(dev->emacp);
2765  err_irq_unmap:
2766         if (dev->wol_irq != NO_IRQ)
2767                 irq_dispose_mapping(dev->wol_irq);
2768         if (dev->emac_irq != NO_IRQ)
2769                 irq_dispose_mapping(dev->emac_irq);
2770  err_free:
2771         kfree(ndev);
2772  err_gone:
2773         /* if we were on the bootlist, remove us as we won't show up and
2774          * wake up all waiters to notify them in case they were waiting
2775          * on us
2776          */
2777         if (blist) {
2778                 *blist = NULL;
2779                 wake_up_all(&emac_probe_wait);
2780         }
2781         return err;
2782 }
2783
2784 static int __devexit emac_remove(struct of_device *ofdev)
2785 {
2786         struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2787
2788         DBG(dev, "remove" NL);
2789
2790         dev_set_drvdata(&ofdev->dev, NULL);
2791
2792         unregister_netdev(dev->ndev);
2793
2794         flush_scheduled_work();
2795
2796         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2797                 tah_detach(dev->tah_dev, dev->tah_port);
2798         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2799                 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2800         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2801                 zmii_detach(dev->zmii_dev, dev->zmii_port);
2802
2803         mal_unregister_commac(dev->mal, &dev->commac);
2804         emac_put_deps(dev);
2805
2806         emac_dbg_unregister(dev);
2807         iounmap(dev->emacp);
2808
2809         if (dev->wol_irq != NO_IRQ)
2810                 irq_dispose_mapping(dev->wol_irq);
2811         if (dev->emac_irq != NO_IRQ)
2812                 irq_dispose_mapping(dev->emac_irq);
2813
2814         kfree(dev->ndev);
2815
2816         return 0;
2817 }
2818
2819 /* XXX Features in here should be replaced by properties... */
2820 static struct of_device_id emac_match[] =
2821 {
2822         {
2823                 .type           = "network",
2824                 .compatible     = "ibm,emac",
2825         },
2826         {
2827                 .type           = "network",
2828                 .compatible     = "ibm,emac4",
2829         },
2830         {},
2831 };
2832
2833 static struct of_platform_driver emac_driver = {
2834         .name = "emac",
2835         .match_table = emac_match,
2836
2837         .probe = emac_probe,
2838         .remove = emac_remove,
2839 };
2840
2841 static void __init emac_make_bootlist(void)
2842 {
2843         struct device_node *np = NULL;
2844         int j, max, i = 0, k;
2845         int cell_indices[EMAC_BOOT_LIST_SIZE];
2846
2847         /* Collect EMACs */
2848         while((np = of_find_all_nodes(np)) != NULL) {
2849                 const u32 *idx;
2850
2851                 if (of_match_node(emac_match, np) == NULL)
2852                         continue;
2853                 if (of_get_property(np, "unused", NULL))
2854                         continue;
2855                 idx = of_get_property(np, "cell-index", NULL);
2856                 if (idx == NULL)
2857                         continue;
2858                 cell_indices[i] = *idx;
2859                 emac_boot_list[i++] = of_node_get(np);
2860                 if (i >= EMAC_BOOT_LIST_SIZE) {
2861                         of_node_put(np);
2862                         break;
2863                 }
2864         }
2865         max = i;
2866
2867         /* Bubble sort them (doh, what a creative algorithm :-) */
2868         for (i = 0; max > 1 && (i < (max - 1)); i++)
2869                 for (j = i; j < max; j++) {
2870                         if (cell_indices[i] > cell_indices[j]) {
2871                                 np = emac_boot_list[i];
2872                                 emac_boot_list[i] = emac_boot_list[j];
2873                                 emac_boot_list[j] = np;
2874                                 k = cell_indices[i];
2875                                 cell_indices[i] = cell_indices[j];
2876                                 cell_indices[j] = k;
2877                         }
2878                 }
2879 }
2880
2881 static int __init emac_init(void)
2882 {
2883         int rc;
2884
2885         printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2886
2887         /* Init debug stuff */
2888         emac_init_debug();
2889
2890         /* Build EMAC boot list */
2891         emac_make_bootlist();
2892
2893         /* Init submodules */
2894         rc = mal_init();
2895         if (rc)
2896                 goto err;
2897         rc = zmii_init();
2898         if (rc)
2899                 goto err_mal;
2900         rc = rgmii_init();
2901         if (rc)
2902                 goto err_zmii;
2903         rc = tah_init();
2904         if (rc)
2905                 goto err_rgmii;
2906         rc = of_register_platform_driver(&emac_driver);
2907         if (rc)
2908                 goto err_tah;
2909
2910         return 0;
2911
2912  err_tah:
2913         tah_exit();
2914  err_rgmii:
2915         rgmii_exit();
2916  err_zmii:
2917         zmii_exit();
2918  err_mal:
2919         mal_exit();
2920  err:
2921         return rc;
2922 }
2923
2924 static void __exit emac_exit(void)
2925 {
2926         int i;
2927
2928         of_unregister_platform_driver(&emac_driver);
2929
2930         tah_exit();
2931         rgmii_exit();
2932         zmii_exit();
2933         mal_exit();
2934         emac_fini_debug();
2935
2936         /* Destroy EMAC boot list */
2937         for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2938                 if (emac_boot_list[i])
2939                         of_node_put(emac_boot_list[i]);
2940 }
2941
2942 module_init(emac_init);
2943 module_exit(emac_exit);