Input: HIDDEV - make HIDIOCSREPORT wait IO completion
[cascardo/linux.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39
40 #include <net/checksum.h>
41
42 #include <asm/system.h>
43 #include <asm/io.h>
44 #include <asm/byteorder.h>
45 #include <asm/uaccess.h>
46
47 #ifdef CONFIG_SPARC64
48 #include <asm/idprom.h>
49 #include <asm/oplib.h>
50 #include <asm/pbm.h>
51 #endif
52
53 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
54 #define TG3_VLAN_TAG_USED 1
55 #else
56 #define TG3_VLAN_TAG_USED 0
57 #endif
58
59 #ifdef NETIF_F_TSO
60 #define TG3_TSO_SUPPORT 1
61 #else
62 #define TG3_TSO_SUPPORT 0
63 #endif
64
65 #include "tg3.h"
66
67 #define DRV_MODULE_NAME         "tg3"
68 #define PFX DRV_MODULE_NAME     ": "
69 #define DRV_MODULE_VERSION      "3.37"
70 #define DRV_MODULE_RELDATE      "August 25, 2005"
71
72 #define TG3_DEF_MAC_MODE        0
73 #define TG3_DEF_RX_MODE         0
74 #define TG3_DEF_TX_MODE         0
75 #define TG3_DEF_MSG_ENABLE        \
76         (NETIF_MSG_DRV          | \
77          NETIF_MSG_PROBE        | \
78          NETIF_MSG_LINK         | \
79          NETIF_MSG_TIMER        | \
80          NETIF_MSG_IFDOWN       | \
81          NETIF_MSG_IFUP         | \
82          NETIF_MSG_RX_ERR       | \
83          NETIF_MSG_TX_ERR)
84
85 /* length of time before we decide the hardware is borked,
86  * and dev->tx_timeout() should be called to fix the problem
87  */
88 #define TG3_TX_TIMEOUT                  (5 * HZ)
89
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU                     60
92 #define TG3_MAX_MTU(tp) \
93         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
94
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96  * You can't change the ring sizes, but you can change where you place
97  * them in the NIC onboard memory.
98  */
99 #define TG3_RX_RING_SIZE                512
100 #define TG3_DEF_RX_RING_PENDING         200
101 #define TG3_RX_JUMBO_RING_SIZE          256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
103
104 /* Do not place this n-ring entries value into the tp struct itself,
105  * we really want to expose these constants to GCC so that modulo et
106  * al.  operations are done with shifts and masks instead of with
107  * hw multiply/modulo instructions.  Another solution would be to
108  * replace things like '% foo' with '& (foo - 1)'.
109  */
110 #define TG3_RX_RCB_RING_SIZE(tp)        \
111         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
112
113 #define TG3_TX_RING_SIZE                512
114 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
115
116 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_RING_SIZE)
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121                                    TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
123                                  TG3_TX_RING_SIZE)
124 #define TX_RING_GAP(TP) \
125         (TG3_TX_RING_SIZE - (TP)->tx_pending)
126 #define TX_BUFFS_AVAIL(TP)                                              \
127         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
128           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
129           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { 0, }
245 };
246
247 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
248
249 static struct {
250         const char string[ETH_GSTRING_LEN];
251 } ethtool_stats_keys[TG3_NUM_STATS] = {
252         { "rx_octets" },
253         { "rx_fragments" },
254         { "rx_ucast_packets" },
255         { "rx_mcast_packets" },
256         { "rx_bcast_packets" },
257         { "rx_fcs_errors" },
258         { "rx_align_errors" },
259         { "rx_xon_pause_rcvd" },
260         { "rx_xoff_pause_rcvd" },
261         { "rx_mac_ctrl_rcvd" },
262         { "rx_xoff_entered" },
263         { "rx_frame_too_long_errors" },
264         { "rx_jabbers" },
265         { "rx_undersize_packets" },
266         { "rx_in_length_errors" },
267         { "rx_out_length_errors" },
268         { "rx_64_or_less_octet_packets" },
269         { "rx_65_to_127_octet_packets" },
270         { "rx_128_to_255_octet_packets" },
271         { "rx_256_to_511_octet_packets" },
272         { "rx_512_to_1023_octet_packets" },
273         { "rx_1024_to_1522_octet_packets" },
274         { "rx_1523_to_2047_octet_packets" },
275         { "rx_2048_to_4095_octet_packets" },
276         { "rx_4096_to_8191_octet_packets" },
277         { "rx_8192_to_9022_octet_packets" },
278
279         { "tx_octets" },
280         { "tx_collisions" },
281
282         { "tx_xon_sent" },
283         { "tx_xoff_sent" },
284         { "tx_flow_control" },
285         { "tx_mac_errors" },
286         { "tx_single_collisions" },
287         { "tx_mult_collisions" },
288         { "tx_deferred" },
289         { "tx_excessive_collisions" },
290         { "tx_late_collisions" },
291         { "tx_collide_2times" },
292         { "tx_collide_3times" },
293         { "tx_collide_4times" },
294         { "tx_collide_5times" },
295         { "tx_collide_6times" },
296         { "tx_collide_7times" },
297         { "tx_collide_8times" },
298         { "tx_collide_9times" },
299         { "tx_collide_10times" },
300         { "tx_collide_11times" },
301         { "tx_collide_12times" },
302         { "tx_collide_13times" },
303         { "tx_collide_14times" },
304         { "tx_collide_15times" },
305         { "tx_ucast_packets" },
306         { "tx_mcast_packets" },
307         { "tx_bcast_packets" },
308         { "tx_carrier_sense_errors" },
309         { "tx_discards" },
310         { "tx_errors" },
311
312         { "dma_writeq_full" },
313         { "dma_write_prioq_full" },
314         { "rxbds_empty" },
315         { "rx_discards" },
316         { "rx_errors" },
317         { "rx_threshold_hit" },
318
319         { "dma_readq_full" },
320         { "dma_read_prioq_full" },
321         { "tx_comp_queue_full" },
322
323         { "ring_set_send_prod_index" },
324         { "ring_status_update" },
325         { "nic_irqs" },
326         { "nic_avoided_irqs" },
327         { "nic_tx_threshold_hit" }
328 };
329
330 static struct {
331         const char string[ETH_GSTRING_LEN];
332 } ethtool_test_keys[TG3_NUM_TEST] = {
333         { "nvram test     (online) " },
334         { "link test      (online) " },
335         { "register test  (offline)" },
336         { "memory test    (offline)" },
337         { "loopback test  (offline)" },
338         { "interrupt test (offline)" },
339 };
340
341 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
342 {
343         unsigned long flags;
344
345         spin_lock_irqsave(&tp->indirect_lock, flags);
346         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
347         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
348         spin_unlock_irqrestore(&tp->indirect_lock, flags);
349 }
350
351 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
352 {
353         writel(val, tp->regs + off);
354         readl(tp->regs + off);
355 }
356
357 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
358 {
359         unsigned long flags;
360         u32 val;
361
362         spin_lock_irqsave(&tp->indirect_lock, flags);
363         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
364         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
365         spin_unlock_irqrestore(&tp->indirect_lock, flags);
366         return val;
367 }
368
369 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
370 {
371         unsigned long flags;
372
373         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
374                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
375                                        TG3_64BIT_REG_LOW, val);
376                 return;
377         }
378         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
379                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
380                                        TG3_64BIT_REG_LOW, val);
381                 return;
382         }
383
384         spin_lock_irqsave(&tp->indirect_lock, flags);
385         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
386         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
387         spin_unlock_irqrestore(&tp->indirect_lock, flags);
388
389         /* In indirect mode when disabling interrupts, we also need
390          * to clear the interrupt bit in the GRC local ctrl register.
391          */
392         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
393             (val == 0x1)) {
394                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
395                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
396         }
397 }
398
399 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
400 {
401         unsigned long flags;
402         u32 val;
403
404         spin_lock_irqsave(&tp->indirect_lock, flags);
405         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
406         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
407         spin_unlock_irqrestore(&tp->indirect_lock, flags);
408         return val;
409 }
410
411 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
412 {
413         tp->write32(tp, off, val);
414         if (!(tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) &&
415             !(tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) &&
416             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
417                 tp->read32(tp, off);    /* flush */
418 }
419
420 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
421 {
422         tp->write32_mbox(tp, off, val);
423         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
424             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
425                 tp->read32_mbox(tp, off);
426 }
427
428 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
429 {
430         void __iomem *mbox = tp->regs + off;
431         writel(val, mbox);
432         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
433                 writel(val, mbox);
434         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
435                 readl(mbox);
436 }
437
438 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
439 {
440         writel(val, tp->regs + off);
441 }
442
443 static u32 tg3_read32(struct tg3 *tp, u32 off)
444 {
445         return (readl(tp->regs + off)); 
446 }
447
448 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
449 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
450 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
451 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
452 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
453
454 #define tw32(reg,val)           tp->write32(tp, reg, val)
455 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
456 #define tr32(reg)               tp->read32(tp, reg)
457
458 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
459 {
460         unsigned long flags;
461
462         spin_lock_irqsave(&tp->indirect_lock, flags);
463         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
464         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
465
466         /* Always leave this as zero. */
467         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
468         spin_unlock_irqrestore(&tp->indirect_lock, flags);
469 }
470
471 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
472 {
473         unsigned long flags;
474
475         spin_lock_irqsave(&tp->indirect_lock, flags);
476         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
477         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
478
479         /* Always leave this as zero. */
480         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
481         spin_unlock_irqrestore(&tp->indirect_lock, flags);
482 }
483
484 static void tg3_disable_ints(struct tg3 *tp)
485 {
486         tw32(TG3PCI_MISC_HOST_CTRL,
487              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
488         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
489 }
490
491 static inline void tg3_cond_int(struct tg3 *tp)
492 {
493         if (tp->hw_status->status & SD_STATUS_UPDATED)
494                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
495 }
496
497 static void tg3_enable_ints(struct tg3 *tp)
498 {
499         tp->irq_sync = 0;
500         wmb();
501
502         tw32(TG3PCI_MISC_HOST_CTRL,
503              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
504         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
505                        (tp->last_tag << 24));
506         tg3_cond_int(tp);
507 }
508
509 static inline unsigned int tg3_has_work(struct tg3 *tp)
510 {
511         struct tg3_hw_status *sblk = tp->hw_status;
512         unsigned int work_exists = 0;
513
514         /* check for phy events */
515         if (!(tp->tg3_flags &
516               (TG3_FLAG_USE_LINKCHG_REG |
517                TG3_FLAG_POLL_SERDES))) {
518                 if (sblk->status & SD_STATUS_LINK_CHG)
519                         work_exists = 1;
520         }
521         /* check for RX/TX work to do */
522         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
523             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
524                 work_exists = 1;
525
526         return work_exists;
527 }
528
529 /* tg3_restart_ints
530  *  similar to tg3_enable_ints, but it accurately determines whether there
531  *  is new work pending and can return without flushing the PIO write
532  *  which reenables interrupts 
533  */
534 static void tg3_restart_ints(struct tg3 *tp)
535 {
536         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
537                      tp->last_tag << 24);
538         mmiowb();
539
540         /* When doing tagged status, this work check is unnecessary.
541          * The last_tag we write above tells the chip which piece of
542          * work we've completed.
543          */
544         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
545             tg3_has_work(tp))
546                 tw32(HOSTCC_MODE, tp->coalesce_mode |
547                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
548 }
549
550 static inline void tg3_netif_stop(struct tg3 *tp)
551 {
552         tp->dev->trans_start = jiffies; /* prevent tx timeout */
553         netif_poll_disable(tp->dev);
554         netif_tx_disable(tp->dev);
555 }
556
557 static inline void tg3_netif_start(struct tg3 *tp)
558 {
559         netif_wake_queue(tp->dev);
560         /* NOTE: unconditional netif_wake_queue is only appropriate
561          * so long as all callers are assured to have free tx slots
562          * (such as after tg3_init_hw)
563          */
564         netif_poll_enable(tp->dev);
565         tp->hw_status->status |= SD_STATUS_UPDATED;
566         tg3_enable_ints(tp);
567 }
568
569 static void tg3_switch_clocks(struct tg3 *tp)
570 {
571         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
572         u32 orig_clock_ctrl;
573
574         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
575                 return;
576
577         orig_clock_ctrl = clock_ctrl;
578         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
579                        CLOCK_CTRL_CLKRUN_OENABLE |
580                        0x1f);
581         tp->pci_clock_ctrl = clock_ctrl;
582
583         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
584                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
585                         tw32_f(TG3PCI_CLOCK_CTRL,
586                                clock_ctrl | CLOCK_CTRL_625_CORE);
587                         udelay(40);
588                 }
589         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
590                 tw32_f(TG3PCI_CLOCK_CTRL,
591                      clock_ctrl |
592                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
593                 udelay(40);
594                 tw32_f(TG3PCI_CLOCK_CTRL,
595                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
596                 udelay(40);
597         }
598         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
599         udelay(40);
600 }
601
602 #define PHY_BUSY_LOOPS  5000
603
604 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
605 {
606         u32 frame_val;
607         unsigned int loops;
608         int ret;
609
610         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
611                 tw32_f(MAC_MI_MODE,
612                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
613                 udelay(80);
614         }
615
616         *val = 0x0;
617
618         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
619                       MI_COM_PHY_ADDR_MASK);
620         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
621                       MI_COM_REG_ADDR_MASK);
622         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
623         
624         tw32_f(MAC_MI_COM, frame_val);
625
626         loops = PHY_BUSY_LOOPS;
627         while (loops != 0) {
628                 udelay(10);
629                 frame_val = tr32(MAC_MI_COM);
630
631                 if ((frame_val & MI_COM_BUSY) == 0) {
632                         udelay(5);
633                         frame_val = tr32(MAC_MI_COM);
634                         break;
635                 }
636                 loops -= 1;
637         }
638
639         ret = -EBUSY;
640         if (loops != 0) {
641                 *val = frame_val & MI_COM_DATA_MASK;
642                 ret = 0;
643         }
644
645         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
646                 tw32_f(MAC_MI_MODE, tp->mi_mode);
647                 udelay(80);
648         }
649
650         return ret;
651 }
652
653 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
654 {
655         u32 frame_val;
656         unsigned int loops;
657         int ret;
658
659         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
660                 tw32_f(MAC_MI_MODE,
661                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
662                 udelay(80);
663         }
664
665         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
666                       MI_COM_PHY_ADDR_MASK);
667         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
668                       MI_COM_REG_ADDR_MASK);
669         frame_val |= (val & MI_COM_DATA_MASK);
670         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
671         
672         tw32_f(MAC_MI_COM, frame_val);
673
674         loops = PHY_BUSY_LOOPS;
675         while (loops != 0) {
676                 udelay(10);
677                 frame_val = tr32(MAC_MI_COM);
678                 if ((frame_val & MI_COM_BUSY) == 0) {
679                         udelay(5);
680                         frame_val = tr32(MAC_MI_COM);
681                         break;
682                 }
683                 loops -= 1;
684         }
685
686         ret = -EBUSY;
687         if (loops != 0)
688                 ret = 0;
689
690         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
691                 tw32_f(MAC_MI_MODE, tp->mi_mode);
692                 udelay(80);
693         }
694
695         return ret;
696 }
697
698 static void tg3_phy_set_wirespeed(struct tg3 *tp)
699 {
700         u32 val;
701
702         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
703                 return;
704
705         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
706             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
707                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
708                              (val | (1 << 15) | (1 << 4)));
709 }
710
711 static int tg3_bmcr_reset(struct tg3 *tp)
712 {
713         u32 phy_control;
714         int limit, err;
715
716         /* OK, reset it, and poll the BMCR_RESET bit until it
717          * clears or we time out.
718          */
719         phy_control = BMCR_RESET;
720         err = tg3_writephy(tp, MII_BMCR, phy_control);
721         if (err != 0)
722                 return -EBUSY;
723
724         limit = 5000;
725         while (limit--) {
726                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
727                 if (err != 0)
728                         return -EBUSY;
729
730                 if ((phy_control & BMCR_RESET) == 0) {
731                         udelay(40);
732                         break;
733                 }
734                 udelay(10);
735         }
736         if (limit <= 0)
737                 return -EBUSY;
738
739         return 0;
740 }
741
742 static int tg3_wait_macro_done(struct tg3 *tp)
743 {
744         int limit = 100;
745
746         while (limit--) {
747                 u32 tmp32;
748
749                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
750                         if ((tmp32 & 0x1000) == 0)
751                                 break;
752                 }
753         }
754         if (limit <= 0)
755                 return -EBUSY;
756
757         return 0;
758 }
759
760 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
761 {
762         static const u32 test_pat[4][6] = {
763         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
764         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
765         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
766         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
767         };
768         int chan;
769
770         for (chan = 0; chan < 4; chan++) {
771                 int i;
772
773                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
774                              (chan * 0x2000) | 0x0200);
775                 tg3_writephy(tp, 0x16, 0x0002);
776
777                 for (i = 0; i < 6; i++)
778                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
779                                      test_pat[chan][i]);
780
781                 tg3_writephy(tp, 0x16, 0x0202);
782                 if (tg3_wait_macro_done(tp)) {
783                         *resetp = 1;
784                         return -EBUSY;
785                 }
786
787                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
788                              (chan * 0x2000) | 0x0200);
789                 tg3_writephy(tp, 0x16, 0x0082);
790                 if (tg3_wait_macro_done(tp)) {
791                         *resetp = 1;
792                         return -EBUSY;
793                 }
794
795                 tg3_writephy(tp, 0x16, 0x0802);
796                 if (tg3_wait_macro_done(tp)) {
797                         *resetp = 1;
798                         return -EBUSY;
799                 }
800
801                 for (i = 0; i < 6; i += 2) {
802                         u32 low, high;
803
804                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
805                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
806                             tg3_wait_macro_done(tp)) {
807                                 *resetp = 1;
808                                 return -EBUSY;
809                         }
810                         low &= 0x7fff;
811                         high &= 0x000f;
812                         if (low != test_pat[chan][i] ||
813                             high != test_pat[chan][i+1]) {
814                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
815                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
816                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
817
818                                 return -EBUSY;
819                         }
820                 }
821         }
822
823         return 0;
824 }
825
826 static int tg3_phy_reset_chanpat(struct tg3 *tp)
827 {
828         int chan;
829
830         for (chan = 0; chan < 4; chan++) {
831                 int i;
832
833                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
834                              (chan * 0x2000) | 0x0200);
835                 tg3_writephy(tp, 0x16, 0x0002);
836                 for (i = 0; i < 6; i++)
837                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
838                 tg3_writephy(tp, 0x16, 0x0202);
839                 if (tg3_wait_macro_done(tp))
840                         return -EBUSY;
841         }
842
843         return 0;
844 }
845
846 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
847 {
848         u32 reg32, phy9_orig;
849         int retries, do_phy_reset, err;
850
851         retries = 10;
852         do_phy_reset = 1;
853         do {
854                 if (do_phy_reset) {
855                         err = tg3_bmcr_reset(tp);
856                         if (err)
857                                 return err;
858                         do_phy_reset = 0;
859                 }
860
861                 /* Disable transmitter and interrupt.  */
862                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
863                         continue;
864
865                 reg32 |= 0x3000;
866                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
867
868                 /* Set full-duplex, 1000 mbps.  */
869                 tg3_writephy(tp, MII_BMCR,
870                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
871
872                 /* Set to master mode.  */
873                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
874                         continue;
875
876                 tg3_writephy(tp, MII_TG3_CTRL,
877                              (MII_TG3_CTRL_AS_MASTER |
878                               MII_TG3_CTRL_ENABLE_AS_MASTER));
879
880                 /* Enable SM_DSP_CLOCK and 6dB.  */
881                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
882
883                 /* Block the PHY control access.  */
884                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
885                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
886
887                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
888                 if (!err)
889                         break;
890         } while (--retries);
891
892         err = tg3_phy_reset_chanpat(tp);
893         if (err)
894                 return err;
895
896         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
897         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
898
899         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
900         tg3_writephy(tp, 0x16, 0x0000);
901
902         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
903             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
904                 /* Set Extended packet length bit for jumbo frames */
905                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
906         }
907         else {
908                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
909         }
910
911         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
912
913         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
914                 reg32 &= ~0x3000;
915                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
916         } else if (!err)
917                 err = -EBUSY;
918
919         return err;
920 }
921
922 /* This will reset the tigon3 PHY if there is no valid
923  * link unless the FORCE argument is non-zero.
924  */
925 static int tg3_phy_reset(struct tg3 *tp)
926 {
927         u32 phy_status;
928         int err;
929
930         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
931         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
932         if (err != 0)
933                 return -EBUSY;
934
935         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
936             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
937             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
938                 err = tg3_phy_reset_5703_4_5(tp);
939                 if (err)
940                         return err;
941                 goto out;
942         }
943
944         err = tg3_bmcr_reset(tp);
945         if (err)
946                 return err;
947
948 out:
949         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
950                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
951                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
952                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
953                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
954                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
955                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
956         }
957         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
958                 tg3_writephy(tp, 0x1c, 0x8d68);
959                 tg3_writephy(tp, 0x1c, 0x8d68);
960         }
961         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
962                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
963                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
964                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
965                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
966                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
967                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
968                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
969                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
970         }
971         /* Set Extended packet length bit (bit 14) on all chips that */
972         /* support jumbo frames */
973         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
974                 /* Cannot do read-modify-write on 5401 */
975                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
976         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
977                 u32 phy_reg;
978
979                 /* Set bit 14 with read-modify-write to preserve other bits */
980                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
981                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
982                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
983         }
984
985         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
986          * jumbo frames transmission.
987          */
988         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
989                 u32 phy_reg;
990
991                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
992                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
993                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
994         }
995
996         tg3_phy_set_wirespeed(tp);
997         return 0;
998 }
999
1000 static void tg3_frob_aux_power(struct tg3 *tp)
1001 {
1002         struct tg3 *tp_peer = tp;
1003
1004         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1005                 return;
1006
1007         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1008                 tp_peer = pci_get_drvdata(tp->pdev_peer);
1009                 if (!tp_peer)
1010                         BUG();
1011         }
1012
1013
1014         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1015             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
1016                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1017                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1018                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1019                              (GRC_LCLCTRL_GPIO_OE0 |
1020                               GRC_LCLCTRL_GPIO_OE1 |
1021                               GRC_LCLCTRL_GPIO_OE2 |
1022                               GRC_LCLCTRL_GPIO_OUTPUT0 |
1023                               GRC_LCLCTRL_GPIO_OUTPUT1));
1024                         udelay(100);
1025                 } else {
1026                         u32 no_gpio2;
1027                         u32 grc_local_ctrl;
1028
1029                         if (tp_peer != tp &&
1030                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1031                                 return;
1032
1033                         /* On 5753 and variants, GPIO2 cannot be used. */
1034                         no_gpio2 = tp->nic_sram_data_cfg &
1035                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1036
1037                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1038                                          GRC_LCLCTRL_GPIO_OE1 |
1039                                          GRC_LCLCTRL_GPIO_OE2 |
1040                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1041                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1042                         if (no_gpio2) {
1043                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1044                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1045                         }
1046                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1047                                                 grc_local_ctrl);
1048                         udelay(100);
1049
1050                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1051
1052                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1053                                                 grc_local_ctrl);
1054                         udelay(100);
1055
1056                         if (!no_gpio2) {
1057                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1058                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1059                                        grc_local_ctrl);
1060                                 udelay(100);
1061                         }
1062                 }
1063         } else {
1064                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1065                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1066                         if (tp_peer != tp &&
1067                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1068                                 return;
1069
1070                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1071                              (GRC_LCLCTRL_GPIO_OE1 |
1072                               GRC_LCLCTRL_GPIO_OUTPUT1));
1073                         udelay(100);
1074
1075                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1076                              (GRC_LCLCTRL_GPIO_OE1));
1077                         udelay(100);
1078
1079                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1080                              (GRC_LCLCTRL_GPIO_OE1 |
1081                               GRC_LCLCTRL_GPIO_OUTPUT1));
1082                         udelay(100);
1083                 }
1084         }
1085 }
1086
1087 static int tg3_setup_phy(struct tg3 *, int);
1088
1089 #define RESET_KIND_SHUTDOWN     0
1090 #define RESET_KIND_INIT         1
1091 #define RESET_KIND_SUSPEND      2
1092
1093 static void tg3_write_sig_post_reset(struct tg3 *, int);
1094 static int tg3_halt_cpu(struct tg3 *, u32);
1095
1096 static int tg3_set_power_state(struct tg3 *tp, int state)
1097 {
1098         u32 misc_host_ctrl;
1099         u16 power_control, power_caps;
1100         int pm = tp->pm_cap;
1101
1102         /* Make sure register accesses (indirect or otherwise)
1103          * will function correctly.
1104          */
1105         pci_write_config_dword(tp->pdev,
1106                                TG3PCI_MISC_HOST_CTRL,
1107                                tp->misc_host_ctrl);
1108
1109         pci_read_config_word(tp->pdev,
1110                              pm + PCI_PM_CTRL,
1111                              &power_control);
1112         power_control |= PCI_PM_CTRL_PME_STATUS;
1113         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1114         switch (state) {
1115         case 0:
1116                 power_control |= 0;
1117                 pci_write_config_word(tp->pdev,
1118                                       pm + PCI_PM_CTRL,
1119                                       power_control);
1120                 udelay(100);    /* Delay after power state change */
1121
1122                 /* Switch out of Vaux if it is not a LOM */
1123                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1124                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1125                         udelay(100);
1126                 }
1127
1128                 return 0;
1129
1130         case 1:
1131                 power_control |= 1;
1132                 break;
1133
1134         case 2:
1135                 power_control |= 2;
1136                 break;
1137
1138         case 3:
1139                 power_control |= 3;
1140                 break;
1141
1142         default:
1143                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1144                        "requested.\n",
1145                        tp->dev->name, state);
1146                 return -EINVAL;
1147         };
1148
1149         power_control |= PCI_PM_CTRL_PME_ENABLE;
1150
1151         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1152         tw32(TG3PCI_MISC_HOST_CTRL,
1153              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1154
1155         if (tp->link_config.phy_is_low_power == 0) {
1156                 tp->link_config.phy_is_low_power = 1;
1157                 tp->link_config.orig_speed = tp->link_config.speed;
1158                 tp->link_config.orig_duplex = tp->link_config.duplex;
1159                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1160         }
1161
1162         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1163                 tp->link_config.speed = SPEED_10;
1164                 tp->link_config.duplex = DUPLEX_HALF;
1165                 tp->link_config.autoneg = AUTONEG_ENABLE;
1166                 tg3_setup_phy(tp, 0);
1167         }
1168
1169         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1170
1171         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1172                 u32 mac_mode;
1173
1174                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1175                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1176                         udelay(40);
1177
1178                         mac_mode = MAC_MODE_PORT_MODE_MII;
1179
1180                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1181                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1182                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1183                 } else {
1184                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1185                 }
1186
1187                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1188                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1189
1190                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1191                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1192                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1193
1194                 tw32_f(MAC_MODE, mac_mode);
1195                 udelay(100);
1196
1197                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1198                 udelay(10);
1199         }
1200
1201         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1202             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1203              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1204                 u32 base_val;
1205
1206                 base_val = tp->pci_clock_ctrl;
1207                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1208                              CLOCK_CTRL_TXCLK_DISABLE);
1209
1210                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1211                      CLOCK_CTRL_ALTCLK |
1212                      CLOCK_CTRL_PWRDOWN_PLL133);
1213                 udelay(40);
1214         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
1215                 /* do nothing */
1216         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1217                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1218                 u32 newbits1, newbits2;
1219
1220                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1221                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1222                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1223                                     CLOCK_CTRL_TXCLK_DISABLE |
1224                                     CLOCK_CTRL_ALTCLK);
1225                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1226                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1227                         newbits1 = CLOCK_CTRL_625_CORE;
1228                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1229                 } else {
1230                         newbits1 = CLOCK_CTRL_ALTCLK;
1231                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1232                 }
1233
1234                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1235                 udelay(40);
1236
1237                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1238                 udelay(40);
1239
1240                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1241                         u32 newbits3;
1242
1243                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1244                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1245                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1246                                             CLOCK_CTRL_TXCLK_DISABLE |
1247                                             CLOCK_CTRL_44MHZ_CORE);
1248                         } else {
1249                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1250                         }
1251
1252                         tw32_f(TG3PCI_CLOCK_CTRL,
1253                                          tp->pci_clock_ctrl | newbits3);
1254                         udelay(40);
1255                 }
1256         }
1257
1258         tg3_frob_aux_power(tp);
1259
1260         /* Workaround for unstable PLL clock */
1261         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1262             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1263                 u32 val = tr32(0x7d00);
1264
1265                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1266                 tw32(0x7d00, val);
1267                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1268                         tg3_halt_cpu(tp, RX_CPU_BASE);
1269         }
1270
1271         /* Finally, set the new power state. */
1272         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1273         udelay(100);    /* Delay after power state change */
1274
1275         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1276
1277         return 0;
1278 }
1279
1280 static void tg3_link_report(struct tg3 *tp)
1281 {
1282         if (!netif_carrier_ok(tp->dev)) {
1283                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1284         } else {
1285                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1286                        tp->dev->name,
1287                        (tp->link_config.active_speed == SPEED_1000 ?
1288                         1000 :
1289                         (tp->link_config.active_speed == SPEED_100 ?
1290                          100 : 10)),
1291                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1292                         "full" : "half"));
1293
1294                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1295                        "%s for RX.\n",
1296                        tp->dev->name,
1297                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1298                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1299         }
1300 }
1301
1302 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1303 {
1304         u32 new_tg3_flags = 0;
1305         u32 old_rx_mode = tp->rx_mode;
1306         u32 old_tx_mode = tp->tx_mode;
1307
1308         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1309
1310                 /* Convert 1000BaseX flow control bits to 1000BaseT
1311                  * bits before resolving flow control.
1312                  */
1313                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1314                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1315                                        ADVERTISE_PAUSE_ASYM);
1316                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1317
1318                         if (local_adv & ADVERTISE_1000XPAUSE)
1319                                 local_adv |= ADVERTISE_PAUSE_CAP;
1320                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1321                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1322                         if (remote_adv & LPA_1000XPAUSE)
1323                                 remote_adv |= LPA_PAUSE_CAP;
1324                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1325                                 remote_adv |= LPA_PAUSE_ASYM;
1326                 }
1327
1328                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1329                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1330                                 if (remote_adv & LPA_PAUSE_CAP)
1331                                         new_tg3_flags |=
1332                                                 (TG3_FLAG_RX_PAUSE |
1333                                                 TG3_FLAG_TX_PAUSE);
1334                                 else if (remote_adv & LPA_PAUSE_ASYM)
1335                                         new_tg3_flags |=
1336                                                 (TG3_FLAG_RX_PAUSE);
1337                         } else {
1338                                 if (remote_adv & LPA_PAUSE_CAP)
1339                                         new_tg3_flags |=
1340                                                 (TG3_FLAG_RX_PAUSE |
1341                                                 TG3_FLAG_TX_PAUSE);
1342                         }
1343                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1344                         if ((remote_adv & LPA_PAUSE_CAP) &&
1345                         (remote_adv & LPA_PAUSE_ASYM))
1346                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1347                 }
1348
1349                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1350                 tp->tg3_flags |= new_tg3_flags;
1351         } else {
1352                 new_tg3_flags = tp->tg3_flags;
1353         }
1354
1355         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1356                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1357         else
1358                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1359
1360         if (old_rx_mode != tp->rx_mode) {
1361                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1362         }
1363         
1364         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1365                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1366         else
1367                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1368
1369         if (old_tx_mode != tp->tx_mode) {
1370                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1371         }
1372 }
1373
1374 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1375 {
1376         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1377         case MII_TG3_AUX_STAT_10HALF:
1378                 *speed = SPEED_10;
1379                 *duplex = DUPLEX_HALF;
1380                 break;
1381
1382         case MII_TG3_AUX_STAT_10FULL:
1383                 *speed = SPEED_10;
1384                 *duplex = DUPLEX_FULL;
1385                 break;
1386
1387         case MII_TG3_AUX_STAT_100HALF:
1388                 *speed = SPEED_100;
1389                 *duplex = DUPLEX_HALF;
1390                 break;
1391
1392         case MII_TG3_AUX_STAT_100FULL:
1393                 *speed = SPEED_100;
1394                 *duplex = DUPLEX_FULL;
1395                 break;
1396
1397         case MII_TG3_AUX_STAT_1000HALF:
1398                 *speed = SPEED_1000;
1399                 *duplex = DUPLEX_HALF;
1400                 break;
1401
1402         case MII_TG3_AUX_STAT_1000FULL:
1403                 *speed = SPEED_1000;
1404                 *duplex = DUPLEX_FULL;
1405                 break;
1406
1407         default:
1408                 *speed = SPEED_INVALID;
1409                 *duplex = DUPLEX_INVALID;
1410                 break;
1411         };
1412 }
1413
1414 static void tg3_phy_copper_begin(struct tg3 *tp)
1415 {
1416         u32 new_adv;
1417         int i;
1418
1419         if (tp->link_config.phy_is_low_power) {
1420                 /* Entering low power mode.  Disable gigabit and
1421                  * 100baseT advertisements.
1422                  */
1423                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1424
1425                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1426                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1427                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1428                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1429
1430                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1431         } else if (tp->link_config.speed == SPEED_INVALID) {
1432                 tp->link_config.advertising =
1433                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1434                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1435                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1436                          ADVERTISED_Autoneg | ADVERTISED_MII);
1437
1438                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1439                         tp->link_config.advertising &=
1440                                 ~(ADVERTISED_1000baseT_Half |
1441                                   ADVERTISED_1000baseT_Full);
1442
1443                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1444                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1445                         new_adv |= ADVERTISE_10HALF;
1446                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1447                         new_adv |= ADVERTISE_10FULL;
1448                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1449                         new_adv |= ADVERTISE_100HALF;
1450                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1451                         new_adv |= ADVERTISE_100FULL;
1452                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1453
1454                 if (tp->link_config.advertising &
1455                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1456                         new_adv = 0;
1457                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1458                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1459                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1460                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1461                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1462                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1463                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1464                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1465                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1466                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1467                 } else {
1468                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1469                 }
1470         } else {
1471                 /* Asking for a specific link mode. */
1472                 if (tp->link_config.speed == SPEED_1000) {
1473                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1474                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1475
1476                         if (tp->link_config.duplex == DUPLEX_FULL)
1477                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1478                         else
1479                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1480                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1481                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1482                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1483                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1484                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1485                 } else {
1486                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1487
1488                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1489                         if (tp->link_config.speed == SPEED_100) {
1490                                 if (tp->link_config.duplex == DUPLEX_FULL)
1491                                         new_adv |= ADVERTISE_100FULL;
1492                                 else
1493                                         new_adv |= ADVERTISE_100HALF;
1494                         } else {
1495                                 if (tp->link_config.duplex == DUPLEX_FULL)
1496                                         new_adv |= ADVERTISE_10FULL;
1497                                 else
1498                                         new_adv |= ADVERTISE_10HALF;
1499                         }
1500                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1501                 }
1502         }
1503
1504         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1505             tp->link_config.speed != SPEED_INVALID) {
1506                 u32 bmcr, orig_bmcr;
1507
1508                 tp->link_config.active_speed = tp->link_config.speed;
1509                 tp->link_config.active_duplex = tp->link_config.duplex;
1510
1511                 bmcr = 0;
1512                 switch (tp->link_config.speed) {
1513                 default:
1514                 case SPEED_10:
1515                         break;
1516
1517                 case SPEED_100:
1518                         bmcr |= BMCR_SPEED100;
1519                         break;
1520
1521                 case SPEED_1000:
1522                         bmcr |= TG3_BMCR_SPEED1000;
1523                         break;
1524                 };
1525
1526                 if (tp->link_config.duplex == DUPLEX_FULL)
1527                         bmcr |= BMCR_FULLDPLX;
1528
1529                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1530                     (bmcr != orig_bmcr)) {
1531                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1532                         for (i = 0; i < 1500; i++) {
1533                                 u32 tmp;
1534
1535                                 udelay(10);
1536                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1537                                     tg3_readphy(tp, MII_BMSR, &tmp))
1538                                         continue;
1539                                 if (!(tmp & BMSR_LSTATUS)) {
1540                                         udelay(40);
1541                                         break;
1542                                 }
1543                         }
1544                         tg3_writephy(tp, MII_BMCR, bmcr);
1545                         udelay(40);
1546                 }
1547         } else {
1548                 tg3_writephy(tp, MII_BMCR,
1549                              BMCR_ANENABLE | BMCR_ANRESTART);
1550         }
1551 }
1552
1553 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1554 {
1555         int err;
1556
1557         /* Turn off tap power management. */
1558         /* Set Extended packet length bit */
1559         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1560
1561         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1562         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1563
1564         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1565         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1566
1567         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1568         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1569
1570         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1571         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1572
1573         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1574         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1575
1576         udelay(40);
1577
1578         return err;
1579 }
1580
1581 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1582 {
1583         u32 adv_reg, all_mask;
1584
1585         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1586                 return 0;
1587
1588         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1589                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1590         if ((adv_reg & all_mask) != all_mask)
1591                 return 0;
1592         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1593                 u32 tg3_ctrl;
1594
1595                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1596                         return 0;
1597
1598                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1599                             MII_TG3_CTRL_ADV_1000_FULL);
1600                 if ((tg3_ctrl & all_mask) != all_mask)
1601                         return 0;
1602         }
1603         return 1;
1604 }
1605
1606 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1607 {
1608         int current_link_up;
1609         u32 bmsr, dummy;
1610         u16 current_speed;
1611         u8 current_duplex;
1612         int i, err;
1613
1614         tw32(MAC_EVENT, 0);
1615
1616         tw32_f(MAC_STATUS,
1617              (MAC_STATUS_SYNC_CHANGED |
1618               MAC_STATUS_CFG_CHANGED |
1619               MAC_STATUS_MI_COMPLETION |
1620               MAC_STATUS_LNKSTATE_CHANGED));
1621         udelay(40);
1622
1623         tp->mi_mode = MAC_MI_MODE_BASE;
1624         tw32_f(MAC_MI_MODE, tp->mi_mode);
1625         udelay(80);
1626
1627         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1628
1629         /* Some third-party PHYs need to be reset on link going
1630          * down.
1631          */
1632         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1633              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1634              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1635             netif_carrier_ok(tp->dev)) {
1636                 tg3_readphy(tp, MII_BMSR, &bmsr);
1637                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1638                     !(bmsr & BMSR_LSTATUS))
1639                         force_reset = 1;
1640         }
1641         if (force_reset)
1642                 tg3_phy_reset(tp);
1643
1644         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1645                 tg3_readphy(tp, MII_BMSR, &bmsr);
1646                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1647                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1648                         bmsr = 0;
1649
1650                 if (!(bmsr & BMSR_LSTATUS)) {
1651                         err = tg3_init_5401phy_dsp(tp);
1652                         if (err)
1653                                 return err;
1654
1655                         tg3_readphy(tp, MII_BMSR, &bmsr);
1656                         for (i = 0; i < 1000; i++) {
1657                                 udelay(10);
1658                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1659                                     (bmsr & BMSR_LSTATUS)) {
1660                                         udelay(40);
1661                                         break;
1662                                 }
1663                         }
1664
1665                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1666                             !(bmsr & BMSR_LSTATUS) &&
1667                             tp->link_config.active_speed == SPEED_1000) {
1668                                 err = tg3_phy_reset(tp);
1669                                 if (!err)
1670                                         err = tg3_init_5401phy_dsp(tp);
1671                                 if (err)
1672                                         return err;
1673                         }
1674                 }
1675         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1676                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1677                 /* 5701 {A0,B0} CRC bug workaround */
1678                 tg3_writephy(tp, 0x15, 0x0a75);
1679                 tg3_writephy(tp, 0x1c, 0x8c68);
1680                 tg3_writephy(tp, 0x1c, 0x8d68);
1681                 tg3_writephy(tp, 0x1c, 0x8c68);
1682         }
1683
1684         /* Clear pending interrupts... */
1685         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1686         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1687
1688         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1689                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1690         else
1691                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1692
1693         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1694             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1695                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1696                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1697                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1698                 else
1699                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1700         }
1701
1702         current_link_up = 0;
1703         current_speed = SPEED_INVALID;
1704         current_duplex = DUPLEX_INVALID;
1705
1706         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1707                 u32 val;
1708
1709                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1710                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1711                 if (!(val & (1 << 10))) {
1712                         val |= (1 << 10);
1713                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1714                         goto relink;
1715                 }
1716         }
1717
1718         bmsr = 0;
1719         for (i = 0; i < 100; i++) {
1720                 tg3_readphy(tp, MII_BMSR, &bmsr);
1721                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1722                     (bmsr & BMSR_LSTATUS))
1723                         break;
1724                 udelay(40);
1725         }
1726
1727         if (bmsr & BMSR_LSTATUS) {
1728                 u32 aux_stat, bmcr;
1729
1730                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1731                 for (i = 0; i < 2000; i++) {
1732                         udelay(10);
1733                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1734                             aux_stat)
1735                                 break;
1736                 }
1737
1738                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1739                                              &current_speed,
1740                                              &current_duplex);
1741
1742                 bmcr = 0;
1743                 for (i = 0; i < 200; i++) {
1744                         tg3_readphy(tp, MII_BMCR, &bmcr);
1745                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1746                                 continue;
1747                         if (bmcr && bmcr != 0x7fff)
1748                                 break;
1749                         udelay(10);
1750                 }
1751
1752                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1753                         if (bmcr & BMCR_ANENABLE) {
1754                                 current_link_up = 1;
1755
1756                                 /* Force autoneg restart if we are exiting
1757                                  * low power mode.
1758                                  */
1759                                 if (!tg3_copper_is_advertising_all(tp))
1760                                         current_link_up = 0;
1761                         } else {
1762                                 current_link_up = 0;
1763                         }
1764                 } else {
1765                         if (!(bmcr & BMCR_ANENABLE) &&
1766                             tp->link_config.speed == current_speed &&
1767                             tp->link_config.duplex == current_duplex) {
1768                                 current_link_up = 1;
1769                         } else {
1770                                 current_link_up = 0;
1771                         }
1772                 }
1773
1774                 tp->link_config.active_speed = current_speed;
1775                 tp->link_config.active_duplex = current_duplex;
1776         }
1777
1778         if (current_link_up == 1 &&
1779             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1780             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1781                 u32 local_adv, remote_adv;
1782
1783                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1784                         local_adv = 0;
1785                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1786
1787                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1788                         remote_adv = 0;
1789
1790                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1791
1792                 /* If we are not advertising full pause capability,
1793                  * something is wrong.  Bring the link down and reconfigure.
1794                  */
1795                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1796                         current_link_up = 0;
1797                 } else {
1798                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1799                 }
1800         }
1801 relink:
1802         if (current_link_up == 0) {
1803                 u32 tmp;
1804
1805                 tg3_phy_copper_begin(tp);
1806
1807                 tg3_readphy(tp, MII_BMSR, &tmp);
1808                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1809                     (tmp & BMSR_LSTATUS))
1810                         current_link_up = 1;
1811         }
1812
1813         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1814         if (current_link_up == 1) {
1815                 if (tp->link_config.active_speed == SPEED_100 ||
1816                     tp->link_config.active_speed == SPEED_10)
1817                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1818                 else
1819                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1820         } else
1821                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1822
1823         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1824         if (tp->link_config.active_duplex == DUPLEX_HALF)
1825                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1826
1827         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1828         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1829                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1830                     (current_link_up == 1 &&
1831                      tp->link_config.active_speed == SPEED_10))
1832                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1833         } else {
1834                 if (current_link_up == 1)
1835                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1836         }
1837
1838         /* ??? Without this setting Netgear GA302T PHY does not
1839          * ??? send/receive packets...
1840          */
1841         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1842             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1843                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1844                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1845                 udelay(80);
1846         }
1847
1848         tw32_f(MAC_MODE, tp->mac_mode);
1849         udelay(40);
1850
1851         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1852                 /* Polled via timer. */
1853                 tw32_f(MAC_EVENT, 0);
1854         } else {
1855                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1856         }
1857         udelay(40);
1858
1859         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1860             current_link_up == 1 &&
1861             tp->link_config.active_speed == SPEED_1000 &&
1862             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1863              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1864                 udelay(120);
1865                 tw32_f(MAC_STATUS,
1866                      (MAC_STATUS_SYNC_CHANGED |
1867                       MAC_STATUS_CFG_CHANGED));
1868                 udelay(40);
1869                 tg3_write_mem(tp,
1870                               NIC_SRAM_FIRMWARE_MBOX,
1871                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1872         }
1873
1874         if (current_link_up != netif_carrier_ok(tp->dev)) {
1875                 if (current_link_up)
1876                         netif_carrier_on(tp->dev);
1877                 else
1878                         netif_carrier_off(tp->dev);
1879                 tg3_link_report(tp);
1880         }
1881
1882         return 0;
1883 }
1884
1885 struct tg3_fiber_aneginfo {
1886         int state;
1887 #define ANEG_STATE_UNKNOWN              0
1888 #define ANEG_STATE_AN_ENABLE            1
1889 #define ANEG_STATE_RESTART_INIT         2
1890 #define ANEG_STATE_RESTART              3
1891 #define ANEG_STATE_DISABLE_LINK_OK      4
1892 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1893 #define ANEG_STATE_ABILITY_DETECT       6
1894 #define ANEG_STATE_ACK_DETECT_INIT      7
1895 #define ANEG_STATE_ACK_DETECT           8
1896 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1897 #define ANEG_STATE_COMPLETE_ACK         10
1898 #define ANEG_STATE_IDLE_DETECT_INIT     11
1899 #define ANEG_STATE_IDLE_DETECT          12
1900 #define ANEG_STATE_LINK_OK              13
1901 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1902 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1903
1904         u32 flags;
1905 #define MR_AN_ENABLE            0x00000001
1906 #define MR_RESTART_AN           0x00000002
1907 #define MR_AN_COMPLETE          0x00000004
1908 #define MR_PAGE_RX              0x00000008
1909 #define MR_NP_LOADED            0x00000010
1910 #define MR_TOGGLE_TX            0x00000020
1911 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1912 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1913 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1914 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1915 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1916 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1917 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1918 #define MR_TOGGLE_RX            0x00002000
1919 #define MR_NP_RX                0x00004000
1920
1921 #define MR_LINK_OK              0x80000000
1922
1923         unsigned long link_time, cur_time;
1924
1925         u32 ability_match_cfg;
1926         int ability_match_count;
1927
1928         char ability_match, idle_match, ack_match;
1929
1930         u32 txconfig, rxconfig;
1931 #define ANEG_CFG_NP             0x00000080
1932 #define ANEG_CFG_ACK            0x00000040
1933 #define ANEG_CFG_RF2            0x00000020
1934 #define ANEG_CFG_RF1            0x00000010
1935 #define ANEG_CFG_PS2            0x00000001
1936 #define ANEG_CFG_PS1            0x00008000
1937 #define ANEG_CFG_HD             0x00004000
1938 #define ANEG_CFG_FD             0x00002000
1939 #define ANEG_CFG_INVAL          0x00001f06
1940
1941 };
1942 #define ANEG_OK         0
1943 #define ANEG_DONE       1
1944 #define ANEG_TIMER_ENAB 2
1945 #define ANEG_FAILED     -1
1946
1947 #define ANEG_STATE_SETTLE_TIME  10000
1948
1949 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1950                                    struct tg3_fiber_aneginfo *ap)
1951 {
1952         unsigned long delta;
1953         u32 rx_cfg_reg;
1954         int ret;
1955
1956         if (ap->state == ANEG_STATE_UNKNOWN) {
1957                 ap->rxconfig = 0;
1958                 ap->link_time = 0;
1959                 ap->cur_time = 0;
1960                 ap->ability_match_cfg = 0;
1961                 ap->ability_match_count = 0;
1962                 ap->ability_match = 0;
1963                 ap->idle_match = 0;
1964                 ap->ack_match = 0;
1965         }
1966         ap->cur_time++;
1967
1968         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1969                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1970
1971                 if (rx_cfg_reg != ap->ability_match_cfg) {
1972                         ap->ability_match_cfg = rx_cfg_reg;
1973                         ap->ability_match = 0;
1974                         ap->ability_match_count = 0;
1975                 } else {
1976                         if (++ap->ability_match_count > 1) {
1977                                 ap->ability_match = 1;
1978                                 ap->ability_match_cfg = rx_cfg_reg;
1979                         }
1980                 }
1981                 if (rx_cfg_reg & ANEG_CFG_ACK)
1982                         ap->ack_match = 1;
1983                 else
1984                         ap->ack_match = 0;
1985
1986                 ap->idle_match = 0;
1987         } else {
1988                 ap->idle_match = 1;
1989                 ap->ability_match_cfg = 0;
1990                 ap->ability_match_count = 0;
1991                 ap->ability_match = 0;
1992                 ap->ack_match = 0;
1993
1994                 rx_cfg_reg = 0;
1995         }
1996
1997         ap->rxconfig = rx_cfg_reg;
1998         ret = ANEG_OK;
1999
2000         switch(ap->state) {
2001         case ANEG_STATE_UNKNOWN:
2002                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2003                         ap->state = ANEG_STATE_AN_ENABLE;
2004
2005                 /* fallthru */
2006         case ANEG_STATE_AN_ENABLE:
2007                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2008                 if (ap->flags & MR_AN_ENABLE) {
2009                         ap->link_time = 0;
2010                         ap->cur_time = 0;
2011                         ap->ability_match_cfg = 0;
2012                         ap->ability_match_count = 0;
2013                         ap->ability_match = 0;
2014                         ap->idle_match = 0;
2015                         ap->ack_match = 0;
2016
2017                         ap->state = ANEG_STATE_RESTART_INIT;
2018                 } else {
2019                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2020                 }
2021                 break;
2022
2023         case ANEG_STATE_RESTART_INIT:
2024                 ap->link_time = ap->cur_time;
2025                 ap->flags &= ~(MR_NP_LOADED);
2026                 ap->txconfig = 0;
2027                 tw32(MAC_TX_AUTO_NEG, 0);
2028                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2029                 tw32_f(MAC_MODE, tp->mac_mode);
2030                 udelay(40);
2031
2032                 ret = ANEG_TIMER_ENAB;
2033                 ap->state = ANEG_STATE_RESTART;
2034
2035                 /* fallthru */
2036         case ANEG_STATE_RESTART:
2037                 delta = ap->cur_time - ap->link_time;
2038                 if (delta > ANEG_STATE_SETTLE_TIME) {
2039                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2040                 } else {
2041                         ret = ANEG_TIMER_ENAB;
2042                 }
2043                 break;
2044
2045         case ANEG_STATE_DISABLE_LINK_OK:
2046                 ret = ANEG_DONE;
2047                 break;
2048
2049         case ANEG_STATE_ABILITY_DETECT_INIT:
2050                 ap->flags &= ~(MR_TOGGLE_TX);
2051                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2052                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2053                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2054                 tw32_f(MAC_MODE, tp->mac_mode);
2055                 udelay(40);
2056
2057                 ap->state = ANEG_STATE_ABILITY_DETECT;
2058                 break;
2059
2060         case ANEG_STATE_ABILITY_DETECT:
2061                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2062                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2063                 }
2064                 break;
2065
2066         case ANEG_STATE_ACK_DETECT_INIT:
2067                 ap->txconfig |= ANEG_CFG_ACK;
2068                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2069                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2070                 tw32_f(MAC_MODE, tp->mac_mode);
2071                 udelay(40);
2072
2073                 ap->state = ANEG_STATE_ACK_DETECT;
2074
2075                 /* fallthru */
2076         case ANEG_STATE_ACK_DETECT:
2077                 if (ap->ack_match != 0) {
2078                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2079                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2080                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2081                         } else {
2082                                 ap->state = ANEG_STATE_AN_ENABLE;
2083                         }
2084                 } else if (ap->ability_match != 0 &&
2085                            ap->rxconfig == 0) {
2086                         ap->state = ANEG_STATE_AN_ENABLE;
2087                 }
2088                 break;
2089
2090         case ANEG_STATE_COMPLETE_ACK_INIT:
2091                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2092                         ret = ANEG_FAILED;
2093                         break;
2094                 }
2095                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2096                                MR_LP_ADV_HALF_DUPLEX |
2097                                MR_LP_ADV_SYM_PAUSE |
2098                                MR_LP_ADV_ASYM_PAUSE |
2099                                MR_LP_ADV_REMOTE_FAULT1 |
2100                                MR_LP_ADV_REMOTE_FAULT2 |
2101                                MR_LP_ADV_NEXT_PAGE |
2102                                MR_TOGGLE_RX |
2103                                MR_NP_RX);
2104                 if (ap->rxconfig & ANEG_CFG_FD)
2105                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2106                 if (ap->rxconfig & ANEG_CFG_HD)
2107                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2108                 if (ap->rxconfig & ANEG_CFG_PS1)
2109                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2110                 if (ap->rxconfig & ANEG_CFG_PS2)
2111                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2112                 if (ap->rxconfig & ANEG_CFG_RF1)
2113                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2114                 if (ap->rxconfig & ANEG_CFG_RF2)
2115                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2116                 if (ap->rxconfig & ANEG_CFG_NP)
2117                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2118
2119                 ap->link_time = ap->cur_time;
2120
2121                 ap->flags ^= (MR_TOGGLE_TX);
2122                 if (ap->rxconfig & 0x0008)
2123                         ap->flags |= MR_TOGGLE_RX;
2124                 if (ap->rxconfig & ANEG_CFG_NP)
2125                         ap->flags |= MR_NP_RX;
2126                 ap->flags |= MR_PAGE_RX;
2127
2128                 ap->state = ANEG_STATE_COMPLETE_ACK;
2129                 ret = ANEG_TIMER_ENAB;
2130                 break;
2131
2132         case ANEG_STATE_COMPLETE_ACK:
2133                 if (ap->ability_match != 0 &&
2134                     ap->rxconfig == 0) {
2135                         ap->state = ANEG_STATE_AN_ENABLE;
2136                         break;
2137                 }
2138                 delta = ap->cur_time - ap->link_time;
2139                 if (delta > ANEG_STATE_SETTLE_TIME) {
2140                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2141                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2142                         } else {
2143                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2144                                     !(ap->flags & MR_NP_RX)) {
2145                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2146                                 } else {
2147                                         ret = ANEG_FAILED;
2148                                 }
2149                         }
2150                 }
2151                 break;
2152
2153         case ANEG_STATE_IDLE_DETECT_INIT:
2154                 ap->link_time = ap->cur_time;
2155                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2156                 tw32_f(MAC_MODE, tp->mac_mode);
2157                 udelay(40);
2158
2159                 ap->state = ANEG_STATE_IDLE_DETECT;
2160                 ret = ANEG_TIMER_ENAB;
2161                 break;
2162
2163         case ANEG_STATE_IDLE_DETECT:
2164                 if (ap->ability_match != 0 &&
2165                     ap->rxconfig == 0) {
2166                         ap->state = ANEG_STATE_AN_ENABLE;
2167                         break;
2168                 }
2169                 delta = ap->cur_time - ap->link_time;
2170                 if (delta > ANEG_STATE_SETTLE_TIME) {
2171                         /* XXX another gem from the Broadcom driver :( */
2172                         ap->state = ANEG_STATE_LINK_OK;
2173                 }
2174                 break;
2175
2176         case ANEG_STATE_LINK_OK:
2177                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2178                 ret = ANEG_DONE;
2179                 break;
2180
2181         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2182                 /* ??? unimplemented */
2183                 break;
2184
2185         case ANEG_STATE_NEXT_PAGE_WAIT:
2186                 /* ??? unimplemented */
2187                 break;
2188
2189         default:
2190                 ret = ANEG_FAILED;
2191                 break;
2192         };
2193
2194         return ret;
2195 }
2196
2197 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2198 {
2199         int res = 0;
2200         struct tg3_fiber_aneginfo aninfo;
2201         int status = ANEG_FAILED;
2202         unsigned int tick;
2203         u32 tmp;
2204
2205         tw32_f(MAC_TX_AUTO_NEG, 0);
2206
2207         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2208         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2209         udelay(40);
2210
2211         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2212         udelay(40);
2213
2214         memset(&aninfo, 0, sizeof(aninfo));
2215         aninfo.flags |= MR_AN_ENABLE;
2216         aninfo.state = ANEG_STATE_UNKNOWN;
2217         aninfo.cur_time = 0;
2218         tick = 0;
2219         while (++tick < 195000) {
2220                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2221                 if (status == ANEG_DONE || status == ANEG_FAILED)
2222                         break;
2223
2224                 udelay(1);
2225         }
2226
2227         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2228         tw32_f(MAC_MODE, tp->mac_mode);
2229         udelay(40);
2230
2231         *flags = aninfo.flags;
2232
2233         if (status == ANEG_DONE &&
2234             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2235                              MR_LP_ADV_FULL_DUPLEX)))
2236                 res = 1;
2237
2238         return res;
2239 }
2240
2241 static void tg3_init_bcm8002(struct tg3 *tp)
2242 {
2243         u32 mac_status = tr32(MAC_STATUS);
2244         int i;
2245
2246         /* Reset when initting first time or we have a link. */
2247         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2248             !(mac_status & MAC_STATUS_PCS_SYNCED))
2249                 return;
2250
2251         /* Set PLL lock range. */
2252         tg3_writephy(tp, 0x16, 0x8007);
2253
2254         /* SW reset */
2255         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2256
2257         /* Wait for reset to complete. */
2258         /* XXX schedule_timeout() ... */
2259         for (i = 0; i < 500; i++)
2260                 udelay(10);
2261
2262         /* Config mode; select PMA/Ch 1 regs. */
2263         tg3_writephy(tp, 0x10, 0x8411);
2264
2265         /* Enable auto-lock and comdet, select txclk for tx. */
2266         tg3_writephy(tp, 0x11, 0x0a10);
2267
2268         tg3_writephy(tp, 0x18, 0x00a0);
2269         tg3_writephy(tp, 0x16, 0x41ff);
2270
2271         /* Assert and deassert POR. */
2272         tg3_writephy(tp, 0x13, 0x0400);
2273         udelay(40);
2274         tg3_writephy(tp, 0x13, 0x0000);
2275
2276         tg3_writephy(tp, 0x11, 0x0a50);
2277         udelay(40);
2278         tg3_writephy(tp, 0x11, 0x0a10);
2279
2280         /* Wait for signal to stabilize */
2281         /* XXX schedule_timeout() ... */
2282         for (i = 0; i < 15000; i++)
2283                 udelay(10);
2284
2285         /* Deselect the channel register so we can read the PHYID
2286          * later.
2287          */
2288         tg3_writephy(tp, 0x10, 0x8011);
2289 }
2290
2291 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2292 {
2293         u32 sg_dig_ctrl, sg_dig_status;
2294         u32 serdes_cfg, expected_sg_dig_ctrl;
2295         int workaround, port_a;
2296         int current_link_up;
2297
2298         serdes_cfg = 0;
2299         expected_sg_dig_ctrl = 0;
2300         workaround = 0;
2301         port_a = 1;
2302         current_link_up = 0;
2303
2304         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2305             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2306                 workaround = 1;
2307                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2308                         port_a = 0;
2309
2310                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2311                 /* preserve bits 20-23 for voltage regulator */
2312                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2313         }
2314
2315         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2316
2317         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2318                 if (sg_dig_ctrl & (1 << 31)) {
2319                         if (workaround) {
2320                                 u32 val = serdes_cfg;
2321
2322                                 if (port_a)
2323                                         val |= 0xc010000;
2324                                 else
2325                                         val |= 0x4010000;
2326                                 tw32_f(MAC_SERDES_CFG, val);
2327                         }
2328                         tw32_f(SG_DIG_CTRL, 0x01388400);
2329                 }
2330                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2331                         tg3_setup_flow_control(tp, 0, 0);
2332                         current_link_up = 1;
2333                 }
2334                 goto out;
2335         }
2336
2337         /* Want auto-negotiation.  */
2338         expected_sg_dig_ctrl = 0x81388400;
2339
2340         /* Pause capability */
2341         expected_sg_dig_ctrl |= (1 << 11);
2342
2343         /* Asymettric pause */
2344         expected_sg_dig_ctrl |= (1 << 12);
2345
2346         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2347                 if (workaround)
2348                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2349                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2350                 udelay(5);
2351                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2352
2353                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2354         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2355                                  MAC_STATUS_SIGNAL_DET)) {
2356                 int i;
2357
2358                 /* Giver time to negotiate (~200ms) */
2359                 for (i = 0; i < 40000; i++) {
2360                         sg_dig_status = tr32(SG_DIG_STATUS);
2361                         if (sg_dig_status & (0x3))
2362                                 break;
2363                         udelay(5);
2364                 }
2365                 mac_status = tr32(MAC_STATUS);
2366
2367                 if ((sg_dig_status & (1 << 1)) &&
2368                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2369                         u32 local_adv, remote_adv;
2370
2371                         local_adv = ADVERTISE_PAUSE_CAP;
2372                         remote_adv = 0;
2373                         if (sg_dig_status & (1 << 19))
2374                                 remote_adv |= LPA_PAUSE_CAP;
2375                         if (sg_dig_status & (1 << 20))
2376                                 remote_adv |= LPA_PAUSE_ASYM;
2377
2378                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2379                         current_link_up = 1;
2380                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2381                 } else if (!(sg_dig_status & (1 << 1))) {
2382                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2383                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2384                         else {
2385                                 if (workaround) {
2386                                         u32 val = serdes_cfg;
2387
2388                                         if (port_a)
2389                                                 val |= 0xc010000;
2390                                         else
2391                                                 val |= 0x4010000;
2392
2393                                         tw32_f(MAC_SERDES_CFG, val);
2394                                 }
2395
2396                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2397                                 udelay(40);
2398
2399                                 /* Link parallel detection - link is up */
2400                                 /* only if we have PCS_SYNC and not */
2401                                 /* receiving config code words */
2402                                 mac_status = tr32(MAC_STATUS);
2403                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2404                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2405                                         tg3_setup_flow_control(tp, 0, 0);
2406                                         current_link_up = 1;
2407                                 }
2408                         }
2409                 }
2410         }
2411
2412 out:
2413         return current_link_up;
2414 }
2415
2416 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2417 {
2418         int current_link_up = 0;
2419
2420         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2421                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2422                 goto out;
2423         }
2424
2425         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2426                 u32 flags;
2427                 int i;
2428   
2429                 if (fiber_autoneg(tp, &flags)) {
2430                         u32 local_adv, remote_adv;
2431
2432                         local_adv = ADVERTISE_PAUSE_CAP;
2433                         remote_adv = 0;
2434                         if (flags & MR_LP_ADV_SYM_PAUSE)
2435                                 remote_adv |= LPA_PAUSE_CAP;
2436                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2437                                 remote_adv |= LPA_PAUSE_ASYM;
2438
2439                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2440
2441                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2442                         current_link_up = 1;
2443                 }
2444                 for (i = 0; i < 30; i++) {
2445                         udelay(20);
2446                         tw32_f(MAC_STATUS,
2447                                (MAC_STATUS_SYNC_CHANGED |
2448                                 MAC_STATUS_CFG_CHANGED));
2449                         udelay(40);
2450                         if ((tr32(MAC_STATUS) &
2451                              (MAC_STATUS_SYNC_CHANGED |
2452                               MAC_STATUS_CFG_CHANGED)) == 0)
2453                                 break;
2454                 }
2455
2456                 mac_status = tr32(MAC_STATUS);
2457                 if (current_link_up == 0 &&
2458                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2459                     !(mac_status & MAC_STATUS_RCVD_CFG))
2460                         current_link_up = 1;
2461         } else {
2462                 /* Forcing 1000FD link up. */
2463                 current_link_up = 1;
2464                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2465
2466                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2467                 udelay(40);
2468         }
2469
2470 out:
2471         return current_link_up;
2472 }
2473
2474 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2475 {
2476         u32 orig_pause_cfg;
2477         u16 orig_active_speed;
2478         u8 orig_active_duplex;
2479         u32 mac_status;
2480         int current_link_up;
2481         int i;
2482
2483         orig_pause_cfg =
2484                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2485                                   TG3_FLAG_TX_PAUSE));
2486         orig_active_speed = tp->link_config.active_speed;
2487         orig_active_duplex = tp->link_config.active_duplex;
2488
2489         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2490             netif_carrier_ok(tp->dev) &&
2491             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2492                 mac_status = tr32(MAC_STATUS);
2493                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2494                                MAC_STATUS_SIGNAL_DET |
2495                                MAC_STATUS_CFG_CHANGED |
2496                                MAC_STATUS_RCVD_CFG);
2497                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2498                                    MAC_STATUS_SIGNAL_DET)) {
2499                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2500                                             MAC_STATUS_CFG_CHANGED));
2501                         return 0;
2502                 }
2503         }
2504
2505         tw32_f(MAC_TX_AUTO_NEG, 0);
2506
2507         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2508         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2509         tw32_f(MAC_MODE, tp->mac_mode);
2510         udelay(40);
2511
2512         if (tp->phy_id == PHY_ID_BCM8002)
2513                 tg3_init_bcm8002(tp);
2514
2515         /* Enable link change event even when serdes polling.  */
2516         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2517         udelay(40);
2518
2519         current_link_up = 0;
2520         mac_status = tr32(MAC_STATUS);
2521
2522         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2523                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2524         else
2525                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2526
2527         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2528         tw32_f(MAC_MODE, tp->mac_mode);
2529         udelay(40);
2530
2531         tp->hw_status->status =
2532                 (SD_STATUS_UPDATED |
2533                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2534
2535         for (i = 0; i < 100; i++) {
2536                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2537                                     MAC_STATUS_CFG_CHANGED));
2538                 udelay(5);
2539                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2540                                          MAC_STATUS_CFG_CHANGED)) == 0)
2541                         break;
2542         }
2543
2544         mac_status = tr32(MAC_STATUS);
2545         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2546                 current_link_up = 0;
2547                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2548                         tw32_f(MAC_MODE, (tp->mac_mode |
2549                                           MAC_MODE_SEND_CONFIGS));
2550                         udelay(1);
2551                         tw32_f(MAC_MODE, tp->mac_mode);
2552                 }
2553         }
2554
2555         if (current_link_up == 1) {
2556                 tp->link_config.active_speed = SPEED_1000;
2557                 tp->link_config.active_duplex = DUPLEX_FULL;
2558                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2559                                     LED_CTRL_LNKLED_OVERRIDE |
2560                                     LED_CTRL_1000MBPS_ON));
2561         } else {
2562                 tp->link_config.active_speed = SPEED_INVALID;
2563                 tp->link_config.active_duplex = DUPLEX_INVALID;
2564                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2565                                     LED_CTRL_LNKLED_OVERRIDE |
2566                                     LED_CTRL_TRAFFIC_OVERRIDE));
2567         }
2568
2569         if (current_link_up != netif_carrier_ok(tp->dev)) {
2570                 if (current_link_up)
2571                         netif_carrier_on(tp->dev);
2572                 else
2573                         netif_carrier_off(tp->dev);
2574                 tg3_link_report(tp);
2575         } else {
2576                 u32 now_pause_cfg =
2577                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2578                                          TG3_FLAG_TX_PAUSE);
2579                 if (orig_pause_cfg != now_pause_cfg ||
2580                     orig_active_speed != tp->link_config.active_speed ||
2581                     orig_active_duplex != tp->link_config.active_duplex)
2582                         tg3_link_report(tp);
2583         }
2584
2585         return 0;
2586 }
2587
2588 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2589 {
2590         int current_link_up, err = 0;
2591         u32 bmsr, bmcr;
2592         u16 current_speed;
2593         u8 current_duplex;
2594
2595         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2596         tw32_f(MAC_MODE, tp->mac_mode);
2597         udelay(40);
2598
2599         tw32(MAC_EVENT, 0);
2600
2601         tw32_f(MAC_STATUS,
2602              (MAC_STATUS_SYNC_CHANGED |
2603               MAC_STATUS_CFG_CHANGED |
2604               MAC_STATUS_MI_COMPLETION |
2605               MAC_STATUS_LNKSTATE_CHANGED));
2606         udelay(40);
2607
2608         if (force_reset)
2609                 tg3_phy_reset(tp);
2610
2611         current_link_up = 0;
2612         current_speed = SPEED_INVALID;
2613         current_duplex = DUPLEX_INVALID;
2614
2615         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2616         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2617
2618         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2619
2620         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2621             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2622                 /* do nothing, just check for link up at the end */
2623         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2624                 u32 adv, new_adv;
2625
2626                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2627                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2628                                   ADVERTISE_1000XPAUSE |
2629                                   ADVERTISE_1000XPSE_ASYM |
2630                                   ADVERTISE_SLCT);
2631
2632                 /* Always advertise symmetric PAUSE just like copper */
2633                 new_adv |= ADVERTISE_1000XPAUSE;
2634
2635                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2636                         new_adv |= ADVERTISE_1000XHALF;
2637                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2638                         new_adv |= ADVERTISE_1000XFULL;
2639
2640                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2641                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2642                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2643                         tg3_writephy(tp, MII_BMCR, bmcr);
2644
2645                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2646                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2647                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2648
2649                         return err;
2650                 }
2651         } else {
2652                 u32 new_bmcr;
2653
2654                 bmcr &= ~BMCR_SPEED1000;
2655                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2656
2657                 if (tp->link_config.duplex == DUPLEX_FULL)
2658                         new_bmcr |= BMCR_FULLDPLX;
2659
2660                 if (new_bmcr != bmcr) {
2661                         /* BMCR_SPEED1000 is a reserved bit that needs
2662                          * to be set on write.
2663                          */
2664                         new_bmcr |= BMCR_SPEED1000;
2665
2666                         /* Force a linkdown */
2667                         if (netif_carrier_ok(tp->dev)) {
2668                                 u32 adv;
2669
2670                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2671                                 adv &= ~(ADVERTISE_1000XFULL |
2672                                          ADVERTISE_1000XHALF |
2673                                          ADVERTISE_SLCT);
2674                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2675                                 tg3_writephy(tp, MII_BMCR, bmcr |
2676                                                            BMCR_ANRESTART |
2677                                                            BMCR_ANENABLE);
2678                                 udelay(10);
2679                                 netif_carrier_off(tp->dev);
2680                         }
2681                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2682                         bmcr = new_bmcr;
2683                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2684                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2685                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2686                 }
2687         }
2688
2689         if (bmsr & BMSR_LSTATUS) {
2690                 current_speed = SPEED_1000;
2691                 current_link_up = 1;
2692                 if (bmcr & BMCR_FULLDPLX)
2693                         current_duplex = DUPLEX_FULL;
2694                 else
2695                         current_duplex = DUPLEX_HALF;
2696
2697                 if (bmcr & BMCR_ANENABLE) {
2698                         u32 local_adv, remote_adv, common;
2699
2700                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2701                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2702                         common = local_adv & remote_adv;
2703                         if (common & (ADVERTISE_1000XHALF |
2704                                       ADVERTISE_1000XFULL)) {
2705                                 if (common & ADVERTISE_1000XFULL)
2706                                         current_duplex = DUPLEX_FULL;
2707                                 else
2708                                         current_duplex = DUPLEX_HALF;
2709
2710                                 tg3_setup_flow_control(tp, local_adv,
2711                                                        remote_adv);
2712                         }
2713                         else
2714                                 current_link_up = 0;
2715                 }
2716         }
2717
2718         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2719         if (tp->link_config.active_duplex == DUPLEX_HALF)
2720                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2721
2722         tw32_f(MAC_MODE, tp->mac_mode);
2723         udelay(40);
2724
2725         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2726
2727         tp->link_config.active_speed = current_speed;
2728         tp->link_config.active_duplex = current_duplex;
2729
2730         if (current_link_up != netif_carrier_ok(tp->dev)) {
2731                 if (current_link_up)
2732                         netif_carrier_on(tp->dev);
2733                 else {
2734                         netif_carrier_off(tp->dev);
2735                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2736                 }
2737                 tg3_link_report(tp);
2738         }
2739         return err;
2740 }
2741
2742 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2743 {
2744         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2745                 /* Give autoneg time to complete. */
2746                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2747                 return;
2748         }
2749         if (!netif_carrier_ok(tp->dev) &&
2750             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2751                 u32 bmcr;
2752
2753                 tg3_readphy(tp, MII_BMCR, &bmcr);
2754                 if (bmcr & BMCR_ANENABLE) {
2755                         u32 phy1, phy2;
2756
2757                         /* Select shadow register 0x1f */
2758                         tg3_writephy(tp, 0x1c, 0x7c00);
2759                         tg3_readphy(tp, 0x1c, &phy1);
2760
2761                         /* Select expansion interrupt status register */
2762                         tg3_writephy(tp, 0x17, 0x0f01);
2763                         tg3_readphy(tp, 0x15, &phy2);
2764                         tg3_readphy(tp, 0x15, &phy2);
2765
2766                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2767                                 /* We have signal detect and not receiving
2768                                  * config code words, link is up by parallel
2769                                  * detection.
2770                                  */
2771
2772                                 bmcr &= ~BMCR_ANENABLE;
2773                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2774                                 tg3_writephy(tp, MII_BMCR, bmcr);
2775                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2776                         }
2777                 }
2778         }
2779         else if (netif_carrier_ok(tp->dev) &&
2780                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2781                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2782                 u32 phy2;
2783
2784                 /* Select expansion interrupt status register */
2785                 tg3_writephy(tp, 0x17, 0x0f01);
2786                 tg3_readphy(tp, 0x15, &phy2);
2787                 if (phy2 & 0x20) {
2788                         u32 bmcr;
2789
2790                         /* Config code words received, turn on autoneg. */
2791                         tg3_readphy(tp, MII_BMCR, &bmcr);
2792                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2793
2794                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2795
2796                 }
2797         }
2798 }
2799
2800 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2801 {
2802         int err;
2803
2804         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2805                 err = tg3_setup_fiber_phy(tp, force_reset);
2806         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2807                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2808         } else {
2809                 err = tg3_setup_copper_phy(tp, force_reset);
2810         }
2811
2812         if (tp->link_config.active_speed == SPEED_1000 &&
2813             tp->link_config.active_duplex == DUPLEX_HALF)
2814                 tw32(MAC_TX_LENGTHS,
2815                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2816                       (6 << TX_LENGTHS_IPG_SHIFT) |
2817                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2818         else
2819                 tw32(MAC_TX_LENGTHS,
2820                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2821                       (6 << TX_LENGTHS_IPG_SHIFT) |
2822                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2823
2824         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2825                 if (netif_carrier_ok(tp->dev)) {
2826                         tw32(HOSTCC_STAT_COAL_TICKS,
2827                              tp->coal.stats_block_coalesce_usecs);
2828                 } else {
2829                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2830                 }
2831         }
2832
2833         return err;
2834 }
2835
2836 /* Tigon3 never reports partial packet sends.  So we do not
2837  * need special logic to handle SKBs that have not had all
2838  * of their frags sent yet, like SunGEM does.
2839  */
2840 static void tg3_tx(struct tg3 *tp)
2841 {
2842         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2843         u32 sw_idx = tp->tx_cons;
2844
2845         while (sw_idx != hw_idx) {
2846                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2847                 struct sk_buff *skb = ri->skb;
2848                 int i;
2849
2850                 if (unlikely(skb == NULL))
2851                         BUG();
2852
2853                 pci_unmap_single(tp->pdev,
2854                                  pci_unmap_addr(ri, mapping),
2855                                  skb_headlen(skb),
2856                                  PCI_DMA_TODEVICE);
2857
2858                 ri->skb = NULL;
2859
2860                 sw_idx = NEXT_TX(sw_idx);
2861
2862                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2863                         if (unlikely(sw_idx == hw_idx))
2864                                 BUG();
2865
2866                         ri = &tp->tx_buffers[sw_idx];
2867                         if (unlikely(ri->skb != NULL))
2868                                 BUG();
2869
2870                         pci_unmap_page(tp->pdev,
2871                                        pci_unmap_addr(ri, mapping),
2872                                        skb_shinfo(skb)->frags[i].size,
2873                                        PCI_DMA_TODEVICE);
2874
2875                         sw_idx = NEXT_TX(sw_idx);
2876                 }
2877
2878                 dev_kfree_skb(skb);
2879         }
2880
2881         tp->tx_cons = sw_idx;
2882
2883         if (netif_queue_stopped(tp->dev) &&
2884             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2885                 netif_wake_queue(tp->dev);
2886 }
2887
2888 /* Returns size of skb allocated or < 0 on error.
2889  *
2890  * We only need to fill in the address because the other members
2891  * of the RX descriptor are invariant, see tg3_init_rings.
2892  *
2893  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2894  * posting buffers we only dirty the first cache line of the RX
2895  * descriptor (containing the address).  Whereas for the RX status
2896  * buffers the cpu only reads the last cacheline of the RX descriptor
2897  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2898  */
2899 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2900                             int src_idx, u32 dest_idx_unmasked)
2901 {
2902         struct tg3_rx_buffer_desc *desc;
2903         struct ring_info *map, *src_map;
2904         struct sk_buff *skb;
2905         dma_addr_t mapping;
2906         int skb_size, dest_idx;
2907
2908         src_map = NULL;
2909         switch (opaque_key) {
2910         case RXD_OPAQUE_RING_STD:
2911                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2912                 desc = &tp->rx_std[dest_idx];
2913                 map = &tp->rx_std_buffers[dest_idx];
2914                 if (src_idx >= 0)
2915                         src_map = &tp->rx_std_buffers[src_idx];
2916                 skb_size = tp->rx_pkt_buf_sz;
2917                 break;
2918
2919         case RXD_OPAQUE_RING_JUMBO:
2920                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2921                 desc = &tp->rx_jumbo[dest_idx];
2922                 map = &tp->rx_jumbo_buffers[dest_idx];
2923                 if (src_idx >= 0)
2924                         src_map = &tp->rx_jumbo_buffers[src_idx];
2925                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2926                 break;
2927
2928         default:
2929                 return -EINVAL;
2930         };
2931
2932         /* Do not overwrite any of the map or rp information
2933          * until we are sure we can commit to a new buffer.
2934          *
2935          * Callers depend upon this behavior and assume that
2936          * we leave everything unchanged if we fail.
2937          */
2938         skb = dev_alloc_skb(skb_size);
2939         if (skb == NULL)
2940                 return -ENOMEM;
2941
2942         skb->dev = tp->dev;
2943         skb_reserve(skb, tp->rx_offset);
2944
2945         mapping = pci_map_single(tp->pdev, skb->data,
2946                                  skb_size - tp->rx_offset,
2947                                  PCI_DMA_FROMDEVICE);
2948
2949         map->skb = skb;
2950         pci_unmap_addr_set(map, mapping, mapping);
2951
2952         if (src_map != NULL)
2953                 src_map->skb = NULL;
2954
2955         desc->addr_hi = ((u64)mapping >> 32);
2956         desc->addr_lo = ((u64)mapping & 0xffffffff);
2957
2958         return skb_size;
2959 }
2960
2961 /* We only need to move over in the address because the other
2962  * members of the RX descriptor are invariant.  See notes above
2963  * tg3_alloc_rx_skb for full details.
2964  */
2965 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2966                            int src_idx, u32 dest_idx_unmasked)
2967 {
2968         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2969         struct ring_info *src_map, *dest_map;
2970         int dest_idx;
2971
2972         switch (opaque_key) {
2973         case RXD_OPAQUE_RING_STD:
2974                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2975                 dest_desc = &tp->rx_std[dest_idx];
2976                 dest_map = &tp->rx_std_buffers[dest_idx];
2977                 src_desc = &tp->rx_std[src_idx];
2978                 src_map = &tp->rx_std_buffers[src_idx];
2979                 break;
2980
2981         case RXD_OPAQUE_RING_JUMBO:
2982                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2983                 dest_desc = &tp->rx_jumbo[dest_idx];
2984                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2985                 src_desc = &tp->rx_jumbo[src_idx];
2986                 src_map = &tp->rx_jumbo_buffers[src_idx];
2987                 break;
2988
2989         default:
2990                 return;
2991         };
2992
2993         dest_map->skb = src_map->skb;
2994         pci_unmap_addr_set(dest_map, mapping,
2995                            pci_unmap_addr(src_map, mapping));
2996         dest_desc->addr_hi = src_desc->addr_hi;
2997         dest_desc->addr_lo = src_desc->addr_lo;
2998
2999         src_map->skb = NULL;
3000 }
3001
3002 #if TG3_VLAN_TAG_USED
3003 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3004 {
3005         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3006 }
3007 #endif
3008
3009 /* The RX ring scheme is composed of multiple rings which post fresh
3010  * buffers to the chip, and one special ring the chip uses to report
3011  * status back to the host.
3012  *
3013  * The special ring reports the status of received packets to the
3014  * host.  The chip does not write into the original descriptor the
3015  * RX buffer was obtained from.  The chip simply takes the original
3016  * descriptor as provided by the host, updates the status and length
3017  * field, then writes this into the next status ring entry.
3018  *
3019  * Each ring the host uses to post buffers to the chip is described
3020  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3021  * it is first placed into the on-chip ram.  When the packet's length
3022  * is known, it walks down the TG3_BDINFO entries to select the ring.
3023  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3024  * which is within the range of the new packet's length is chosen.
3025  *
3026  * The "separate ring for rx status" scheme may sound queer, but it makes
3027  * sense from a cache coherency perspective.  If only the host writes
3028  * to the buffer post rings, and only the chip writes to the rx status
3029  * rings, then cache lines never move beyond shared-modified state.
3030  * If both the host and chip were to write into the same ring, cache line
3031  * eviction could occur since both entities want it in an exclusive state.
3032  */
3033 static int tg3_rx(struct tg3 *tp, int budget)
3034 {
3035         u32 work_mask;
3036         u32 sw_idx = tp->rx_rcb_ptr;
3037         u16 hw_idx;
3038         int received;
3039
3040         hw_idx = tp->hw_status->idx[0].rx_producer;
3041         /*
3042          * We need to order the read of hw_idx and the read of
3043          * the opaque cookie.
3044          */
3045         rmb();
3046         work_mask = 0;
3047         received = 0;
3048         while (sw_idx != hw_idx && budget > 0) {
3049                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3050                 unsigned int len;
3051                 struct sk_buff *skb;
3052                 dma_addr_t dma_addr;
3053                 u32 opaque_key, desc_idx, *post_ptr;
3054
3055                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3056                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3057                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3058                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3059                                                   mapping);
3060                         skb = tp->rx_std_buffers[desc_idx].skb;
3061                         post_ptr = &tp->rx_std_ptr;
3062                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3063                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3064                                                   mapping);
3065                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3066                         post_ptr = &tp->rx_jumbo_ptr;
3067                 }
3068                 else {
3069                         goto next_pkt_nopost;
3070                 }
3071
3072                 work_mask |= opaque_key;
3073
3074                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3075                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3076                 drop_it:
3077                         tg3_recycle_rx(tp, opaque_key,
3078                                        desc_idx, *post_ptr);
3079                 drop_it_no_recycle:
3080                         /* Other statistics kept track of by card. */
3081                         tp->net_stats.rx_dropped++;
3082                         goto next_pkt;
3083                 }
3084
3085                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3086
3087                 if (len > RX_COPY_THRESHOLD 
3088                         && tp->rx_offset == 2
3089                         /* rx_offset != 2 iff this is a 5701 card running
3090                          * in PCI-X mode [see tg3_get_invariants()] */
3091                 ) {
3092                         int skb_size;
3093
3094                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3095                                                     desc_idx, *post_ptr);
3096                         if (skb_size < 0)
3097                                 goto drop_it;
3098
3099                         pci_unmap_single(tp->pdev, dma_addr,
3100                                          skb_size - tp->rx_offset,
3101                                          PCI_DMA_FROMDEVICE);
3102
3103                         skb_put(skb, len);
3104                 } else {
3105                         struct sk_buff *copy_skb;
3106
3107                         tg3_recycle_rx(tp, opaque_key,
3108                                        desc_idx, *post_ptr);
3109
3110                         copy_skb = dev_alloc_skb(len + 2);
3111                         if (copy_skb == NULL)
3112                                 goto drop_it_no_recycle;
3113
3114                         copy_skb->dev = tp->dev;
3115                         skb_reserve(copy_skb, 2);
3116                         skb_put(copy_skb, len);
3117                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3118                         memcpy(copy_skb->data, skb->data, len);
3119                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3120
3121                         /* We'll reuse the original ring buffer. */
3122                         skb = copy_skb;
3123                 }
3124
3125                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3126                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3127                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3128                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3129                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3130                 else
3131                         skb->ip_summed = CHECKSUM_NONE;
3132
3133                 skb->protocol = eth_type_trans(skb, tp->dev);
3134 #if TG3_VLAN_TAG_USED
3135                 if (tp->vlgrp != NULL &&
3136                     desc->type_flags & RXD_FLAG_VLAN) {
3137                         tg3_vlan_rx(tp, skb,
3138                                     desc->err_vlan & RXD_VLAN_MASK);
3139                 } else
3140 #endif
3141                         netif_receive_skb(skb);
3142
3143                 tp->dev->last_rx = jiffies;
3144                 received++;
3145                 budget--;
3146
3147 next_pkt:
3148                 (*post_ptr)++;
3149 next_pkt_nopost:
3150                 sw_idx++;
3151                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3152
3153                 /* Refresh hw_idx to see if there is new work */
3154                 if (sw_idx == hw_idx) {
3155                         hw_idx = tp->hw_status->idx[0].rx_producer;
3156                         rmb();
3157                 }
3158         }
3159
3160         /* ACK the status ring. */
3161         tp->rx_rcb_ptr = sw_idx;
3162         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3163
3164         /* Refill RX ring(s). */
3165         if (work_mask & RXD_OPAQUE_RING_STD) {
3166                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3167                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3168                              sw_idx);
3169         }
3170         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3171                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3172                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3173                              sw_idx);
3174         }
3175         mmiowb();
3176
3177         return received;
3178 }
3179
3180 static int tg3_poll(struct net_device *netdev, int *budget)
3181 {
3182         struct tg3 *tp = netdev_priv(netdev);
3183         struct tg3_hw_status *sblk = tp->hw_status;
3184         int done;
3185
3186         /* handle link change and other phy events */
3187         if (!(tp->tg3_flags &
3188               (TG3_FLAG_USE_LINKCHG_REG |
3189                TG3_FLAG_POLL_SERDES))) {
3190                 if (sblk->status & SD_STATUS_LINK_CHG) {
3191                         sblk->status = SD_STATUS_UPDATED |
3192                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3193                         spin_lock(&tp->lock);
3194                         tg3_setup_phy(tp, 0);
3195                         spin_unlock(&tp->lock);
3196                 }
3197         }
3198
3199         /* run TX completion thread */
3200         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3201                 spin_lock(&tp->tx_lock);
3202                 tg3_tx(tp);
3203                 spin_unlock(&tp->tx_lock);
3204         }
3205
3206         /* run RX thread, within the bounds set by NAPI.
3207          * All RX "locking" is done by ensuring outside
3208          * code synchronizes with dev->poll()
3209          */
3210         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3211                 int orig_budget = *budget;
3212                 int work_done;
3213
3214                 if (orig_budget > netdev->quota)
3215                         orig_budget = netdev->quota;
3216
3217                 work_done = tg3_rx(tp, orig_budget);
3218
3219                 *budget -= work_done;
3220                 netdev->quota -= work_done;
3221         }
3222
3223         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
3224                 tp->last_tag = sblk->status_tag;
3225         rmb();
3226         sblk->status &= ~SD_STATUS_UPDATED;
3227
3228         /* if no more work, tell net stack and NIC we're done */
3229         done = !tg3_has_work(tp);
3230         if (done) {
3231                 spin_lock(&tp->lock);
3232                 netif_rx_complete(netdev);
3233                 tg3_restart_ints(tp);
3234                 spin_unlock(&tp->lock);
3235         }
3236
3237         return (done ? 0 : 1);
3238 }
3239
3240 static void tg3_irq_quiesce(struct tg3 *tp)
3241 {
3242         BUG_ON(tp->irq_sync);
3243
3244         tp->irq_sync = 1;
3245         smp_mb();
3246
3247         synchronize_irq(tp->pdev->irq);
3248 }
3249
3250 static inline int tg3_irq_sync(struct tg3 *tp)
3251 {
3252         return tp->irq_sync;
3253 }
3254
3255 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3256  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3257  * with as well.  Most of the time, this is not necessary except when
3258  * shutting down the device.
3259  */
3260 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3261 {
3262         if (irq_sync)
3263                 tg3_irq_quiesce(tp);
3264         spin_lock_bh(&tp->lock);
3265         spin_lock(&tp->tx_lock);
3266 }
3267
3268 static inline void tg3_full_unlock(struct tg3 *tp)
3269 {
3270         spin_unlock(&tp->tx_lock);
3271         spin_unlock_bh(&tp->lock);
3272 }
3273
3274 /* MSI ISR - No need to check for interrupt sharing and no need to
3275  * flush status block and interrupt mailbox. PCI ordering rules
3276  * guarantee that MSI will arrive after the status block.
3277  */
3278 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3279 {
3280         struct net_device *dev = dev_id;
3281         struct tg3 *tp = netdev_priv(dev);
3282         struct tg3_hw_status *sblk = tp->hw_status;
3283
3284         /*
3285          * Writing any value to intr-mbox-0 clears PCI INTA# and
3286          * chip-internal interrupt pending events.
3287          * Writing non-zero to intr-mbox-0 additional tells the
3288          * NIC to stop sending us irqs, engaging "in-intr-handler"
3289          * event coalescing.
3290          */
3291         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3292         tp->last_tag = sblk->status_tag;
3293         rmb();
3294         if (tg3_irq_sync(tp))
3295                 goto out;
3296         sblk->status &= ~SD_STATUS_UPDATED;
3297         if (likely(tg3_has_work(tp)))
3298                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3299         else {
3300                 /* No work, re-enable interrupts.  */
3301                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3302                              tp->last_tag << 24);
3303         }
3304 out:
3305         return IRQ_RETVAL(1);
3306 }
3307
3308 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3309 {
3310         struct net_device *dev = dev_id;
3311         struct tg3 *tp = netdev_priv(dev);
3312         struct tg3_hw_status *sblk = tp->hw_status;
3313         unsigned int handled = 1;
3314
3315         /* In INTx mode, it is possible for the interrupt to arrive at
3316          * the CPU before the status block posted prior to the interrupt.
3317          * Reading the PCI State register will confirm whether the
3318          * interrupt is ours and will flush the status block.
3319          */
3320         if ((sblk->status & SD_STATUS_UPDATED) ||
3321             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3322                 /*
3323                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3324                  * chip-internal interrupt pending events.
3325                  * Writing non-zero to intr-mbox-0 additional tells the
3326                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3327                  * event coalescing.
3328                  */
3329                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3330                              0x00000001);
3331                 if (tg3_irq_sync(tp))
3332                         goto out;
3333                 sblk->status &= ~SD_STATUS_UPDATED;
3334                 if (likely(tg3_has_work(tp)))
3335                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3336                 else {
3337                         /* No work, shared interrupt perhaps?  re-enable
3338                          * interrupts, and flush that PCI write
3339                          */
3340                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3341                                 0x00000000);
3342                 }
3343         } else {        /* shared interrupt */
3344                 handled = 0;
3345         }
3346 out:
3347         return IRQ_RETVAL(handled);
3348 }
3349
3350 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3351 {
3352         struct net_device *dev = dev_id;
3353         struct tg3 *tp = netdev_priv(dev);
3354         struct tg3_hw_status *sblk = tp->hw_status;
3355         unsigned int handled = 1;
3356
3357         /* In INTx mode, it is possible for the interrupt to arrive at
3358          * the CPU before the status block posted prior to the interrupt.
3359          * Reading the PCI State register will confirm whether the
3360          * interrupt is ours and will flush the status block.
3361          */
3362         if ((sblk->status & SD_STATUS_UPDATED) ||
3363             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3364                 /*
3365                  * writing any value to intr-mbox-0 clears PCI INTA# and
3366                  * chip-internal interrupt pending events.
3367                  * writing non-zero to intr-mbox-0 additional tells the
3368                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3369                  * event coalescing.
3370                  */
3371                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3372                              0x00000001);
3373                 tp->last_tag = sblk->status_tag;
3374                 rmb();
3375                 if (tg3_irq_sync(tp))
3376                         goto out;
3377                 sblk->status &= ~SD_STATUS_UPDATED;
3378                 if (likely(tg3_has_work(tp)))
3379                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3380                 else {
3381                         /* no work, shared interrupt perhaps?  re-enable
3382                          * interrupts, and flush that PCI write
3383                          */
3384                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3385                                        tp->last_tag << 24);
3386                 }
3387         } else {        /* shared interrupt */
3388                 handled = 0;
3389         }
3390 out:
3391         return IRQ_RETVAL(handled);
3392 }
3393
3394 /* ISR for interrupt test */
3395 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3396                 struct pt_regs *regs)
3397 {
3398         struct net_device *dev = dev_id;
3399         struct tg3 *tp = netdev_priv(dev);
3400         struct tg3_hw_status *sblk = tp->hw_status;
3401
3402         if (sblk->status & SD_STATUS_UPDATED) {
3403                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3404                              0x00000001);
3405                 return IRQ_RETVAL(1);
3406         }
3407         return IRQ_RETVAL(0);
3408 }
3409
3410 static int tg3_init_hw(struct tg3 *);
3411 static int tg3_halt(struct tg3 *, int, int);
3412
3413 #ifdef CONFIG_NET_POLL_CONTROLLER
3414 static void tg3_poll_controller(struct net_device *dev)
3415 {
3416         struct tg3 *tp = netdev_priv(dev);
3417
3418         tg3_interrupt(tp->pdev->irq, dev, NULL);
3419 }
3420 #endif
3421
3422 static void tg3_reset_task(void *_data)
3423 {
3424         struct tg3 *tp = _data;
3425         unsigned int restart_timer;
3426
3427         tg3_netif_stop(tp);
3428
3429         tg3_full_lock(tp, 1);
3430
3431         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3432         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3433
3434         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3435         tg3_init_hw(tp);
3436
3437         tg3_netif_start(tp);
3438
3439         tg3_full_unlock(tp);
3440
3441         if (restart_timer)
3442                 mod_timer(&tp->timer, jiffies + 1);
3443 }
3444
3445 static void tg3_tx_timeout(struct net_device *dev)
3446 {
3447         struct tg3 *tp = netdev_priv(dev);
3448
3449         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3450                dev->name);
3451
3452         schedule_work(&tp->reset_task);
3453 }
3454
3455 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3456
3457 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3458                                        u32 guilty_entry, int guilty_len,
3459                                        u32 last_plus_one, u32 *start, u32 mss)
3460 {
3461         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3462         dma_addr_t new_addr;
3463         u32 entry = *start;
3464         int i;
3465
3466         if (!new_skb) {
3467                 dev_kfree_skb(skb);
3468                 return -1;
3469         }
3470
3471         /* New SKB is guaranteed to be linear. */
3472         entry = *start;
3473         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3474                                   PCI_DMA_TODEVICE);
3475         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3476                     (skb->ip_summed == CHECKSUM_HW) ?
3477                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3478         *start = NEXT_TX(entry);
3479
3480         /* Now clean up the sw ring entries. */
3481         i = 0;
3482         while (entry != last_plus_one) {
3483                 int len;
3484
3485                 if (i == 0)
3486                         len = skb_headlen(skb);
3487                 else
3488                         len = skb_shinfo(skb)->frags[i-1].size;
3489                 pci_unmap_single(tp->pdev,
3490                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3491                                  len, PCI_DMA_TODEVICE);
3492                 if (i == 0) {
3493                         tp->tx_buffers[entry].skb = new_skb;
3494                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3495                 } else {
3496                         tp->tx_buffers[entry].skb = NULL;
3497                 }
3498                 entry = NEXT_TX(entry);
3499                 i++;
3500         }
3501
3502         dev_kfree_skb(skb);
3503
3504         return 0;
3505 }
3506
3507 static void tg3_set_txd(struct tg3 *tp, int entry,
3508                         dma_addr_t mapping, int len, u32 flags,
3509                         u32 mss_and_is_end)
3510 {
3511         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3512         int is_end = (mss_and_is_end & 0x1);
3513         u32 mss = (mss_and_is_end >> 1);
3514         u32 vlan_tag = 0;
3515
3516         if (is_end)
3517                 flags |= TXD_FLAG_END;
3518         if (flags & TXD_FLAG_VLAN) {
3519                 vlan_tag = flags >> 16;
3520                 flags &= 0xffff;
3521         }
3522         vlan_tag |= (mss << TXD_MSS_SHIFT);
3523
3524         txd->addr_hi = ((u64) mapping >> 32);
3525         txd->addr_lo = ((u64) mapping & 0xffffffff);
3526         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3527         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3528 }
3529
3530 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3531 {
3532         u32 base = (u32) mapping & 0xffffffff;
3533
3534         return ((base > 0xffffdcc0) &&
3535                 (base + len + 8 < base));
3536 }
3537
3538 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3539 {
3540         struct tg3 *tp = netdev_priv(dev);
3541         dma_addr_t mapping;
3542         unsigned int i;
3543         u32 len, entry, base_flags, mss;
3544         int would_hit_hwbug;
3545
3546         len = skb_headlen(skb);
3547
3548         /* No BH disabling for tx_lock here.  We are running in BH disabled
3549          * context and TX reclaim runs via tp->poll inside of a software
3550          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3551          * no IRQ context deadlocks to worry about either.  Rejoice!
3552          */
3553         if (!spin_trylock(&tp->tx_lock))
3554                 return NETDEV_TX_LOCKED; 
3555
3556         /* This is a hard error, log it. */
3557         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3558                 netif_stop_queue(dev);
3559                 spin_unlock(&tp->tx_lock);
3560                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3561                        dev->name);
3562                 return NETDEV_TX_BUSY;
3563         }
3564
3565         entry = tp->tx_prod;
3566         base_flags = 0;
3567         if (skb->ip_summed == CHECKSUM_HW)
3568                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3569 #if TG3_TSO_SUPPORT != 0
3570         mss = 0;
3571         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3572             (mss = skb_shinfo(skb)->tso_size) != 0) {
3573                 int tcp_opt_len, ip_tcp_len;
3574
3575                 if (skb_header_cloned(skb) &&
3576                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3577                         dev_kfree_skb(skb);
3578                         goto out_unlock;
3579                 }
3580
3581                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3582                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3583
3584                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3585                                TXD_FLAG_CPU_POST_DMA);
3586
3587                 skb->nh.iph->check = 0;
3588                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3589                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3590                         skb->h.th->check = 0;
3591                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3592                 }
3593                 else {
3594                         skb->h.th->check =
3595                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3596                                                    skb->nh.iph->daddr,
3597                                                    0, IPPROTO_TCP, 0);
3598                 }
3599
3600                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3601                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3602                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3603                                 int tsflags;
3604
3605                                 tsflags = ((skb->nh.iph->ihl - 5) +
3606                                            (tcp_opt_len >> 2));
3607                                 mss |= (tsflags << 11);
3608                         }
3609                 } else {
3610                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3611                                 int tsflags;
3612
3613                                 tsflags = ((skb->nh.iph->ihl - 5) +
3614                                            (tcp_opt_len >> 2));
3615                                 base_flags |= tsflags << 12;
3616                         }
3617                 }
3618         }
3619 #else
3620         mss = 0;
3621 #endif
3622 #if TG3_VLAN_TAG_USED
3623         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3624                 base_flags |= (TXD_FLAG_VLAN |
3625                                (vlan_tx_tag_get(skb) << 16));
3626 #endif
3627
3628         /* Queue skb data, a.k.a. the main skb fragment. */
3629         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3630
3631         tp->tx_buffers[entry].skb = skb;
3632         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3633
3634         would_hit_hwbug = 0;
3635
3636         if (tg3_4g_overflow_test(mapping, len))
3637                 would_hit_hwbug = entry + 1;
3638
3639         tg3_set_txd(tp, entry, mapping, len, base_flags,
3640                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3641
3642         entry = NEXT_TX(entry);
3643
3644         /* Now loop through additional data fragments, and queue them. */
3645         if (skb_shinfo(skb)->nr_frags > 0) {
3646                 unsigned int i, last;
3647
3648                 last = skb_shinfo(skb)->nr_frags - 1;
3649                 for (i = 0; i <= last; i++) {
3650                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3651
3652                         len = frag->size;
3653                         mapping = pci_map_page(tp->pdev,
3654                                                frag->page,
3655                                                frag->page_offset,
3656                                                len, PCI_DMA_TODEVICE);
3657
3658                         tp->tx_buffers[entry].skb = NULL;
3659                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3660
3661                         if (tg3_4g_overflow_test(mapping, len)) {
3662                                 /* Only one should match. */
3663                                 if (would_hit_hwbug)
3664                                         BUG();
3665                                 would_hit_hwbug = entry + 1;
3666                         }
3667
3668                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3669                                 tg3_set_txd(tp, entry, mapping, len,
3670                                             base_flags, (i == last)|(mss << 1));
3671                         else
3672                                 tg3_set_txd(tp, entry, mapping, len,
3673                                             base_flags, (i == last));
3674
3675                         entry = NEXT_TX(entry);
3676                 }
3677         }
3678
3679         if (would_hit_hwbug) {
3680                 u32 last_plus_one = entry;
3681                 u32 start;
3682                 unsigned int len = 0;
3683
3684                 would_hit_hwbug -= 1;
3685                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3686                 entry &= (TG3_TX_RING_SIZE - 1);
3687                 start = entry;
3688                 i = 0;
3689                 while (entry != last_plus_one) {
3690                         if (i == 0)
3691                                 len = skb_headlen(skb);
3692                         else
3693                                 len = skb_shinfo(skb)->frags[i-1].size;
3694
3695                         if (entry == would_hit_hwbug)
3696                                 break;
3697
3698                         i++;
3699                         entry = NEXT_TX(entry);
3700
3701                 }
3702
3703                 /* If the workaround fails due to memory/mapping
3704                  * failure, silently drop this packet.
3705                  */
3706                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3707                                                 entry, len,
3708                                                 last_plus_one,
3709                                                 &start, mss))
3710                         goto out_unlock;
3711
3712                 entry = start;
3713         }
3714
3715         /* Packets are ready, update Tx producer idx local and on card. */
3716         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3717
3718         tp->tx_prod = entry;
3719         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3720                 netif_stop_queue(dev);
3721
3722 out_unlock:
3723         mmiowb();
3724         spin_unlock(&tp->tx_lock);
3725
3726         dev->trans_start = jiffies;
3727
3728         return NETDEV_TX_OK;
3729 }
3730
3731 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3732                                int new_mtu)
3733 {
3734         dev->mtu = new_mtu;
3735
3736         if (new_mtu > ETH_DATA_LEN) {
3737                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
3738                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3739                         ethtool_op_set_tso(dev, 0);
3740                 }
3741                 else
3742                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3743         } else {
3744                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
3745                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3746                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3747         }
3748 }
3749
3750 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3751 {
3752         struct tg3 *tp = netdev_priv(dev);
3753
3754         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3755                 return -EINVAL;
3756
3757         if (!netif_running(dev)) {
3758                 /* We'll just catch it later when the
3759                  * device is up'd.
3760                  */
3761                 tg3_set_mtu(dev, tp, new_mtu);
3762                 return 0;
3763         }
3764
3765         tg3_netif_stop(tp);
3766
3767         tg3_full_lock(tp, 1);
3768
3769         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3770
3771         tg3_set_mtu(dev, tp, new_mtu);
3772
3773         tg3_init_hw(tp);
3774
3775         tg3_netif_start(tp);
3776
3777         tg3_full_unlock(tp);
3778
3779         return 0;
3780 }
3781
3782 /* Free up pending packets in all rx/tx rings.
3783  *
3784  * The chip has been shut down and the driver detached from
3785  * the networking, so no interrupts or new tx packets will
3786  * end up in the driver.  tp->{tx,}lock is not held and we are not
3787  * in an interrupt context and thus may sleep.
3788  */
3789 static void tg3_free_rings(struct tg3 *tp)
3790 {
3791         struct ring_info *rxp;
3792         int i;
3793
3794         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3795                 rxp = &tp->rx_std_buffers[i];
3796
3797                 if (rxp->skb == NULL)
3798                         continue;
3799                 pci_unmap_single(tp->pdev,
3800                                  pci_unmap_addr(rxp, mapping),
3801                                  tp->rx_pkt_buf_sz - tp->rx_offset,
3802                                  PCI_DMA_FROMDEVICE);
3803                 dev_kfree_skb_any(rxp->skb);
3804                 rxp->skb = NULL;
3805         }
3806
3807         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3808                 rxp = &tp->rx_jumbo_buffers[i];
3809
3810                 if (rxp->skb == NULL)
3811                         continue;
3812                 pci_unmap_single(tp->pdev,
3813                                  pci_unmap_addr(rxp, mapping),
3814                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3815                                  PCI_DMA_FROMDEVICE);
3816                 dev_kfree_skb_any(rxp->skb);
3817                 rxp->skb = NULL;
3818         }
3819
3820         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3821                 struct tx_ring_info *txp;
3822                 struct sk_buff *skb;
3823                 int j;
3824
3825                 txp = &tp->tx_buffers[i];
3826                 skb = txp->skb;
3827
3828                 if (skb == NULL) {
3829                         i++;
3830                         continue;
3831                 }
3832
3833                 pci_unmap_single(tp->pdev,
3834                                  pci_unmap_addr(txp, mapping),
3835                                  skb_headlen(skb),
3836                                  PCI_DMA_TODEVICE);
3837                 txp->skb = NULL;
3838
3839                 i++;
3840
3841                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3842                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3843                         pci_unmap_page(tp->pdev,
3844                                        pci_unmap_addr(txp, mapping),
3845                                        skb_shinfo(skb)->frags[j].size,
3846                                        PCI_DMA_TODEVICE);
3847                         i++;
3848                 }
3849
3850                 dev_kfree_skb_any(skb);
3851         }
3852 }
3853
3854 /* Initialize tx/rx rings for packet processing.
3855  *
3856  * The chip has been shut down and the driver detached from
3857  * the networking, so no interrupts or new tx packets will
3858  * end up in the driver.  tp->{tx,}lock are held and thus
3859  * we may not sleep.
3860  */
3861 static void tg3_init_rings(struct tg3 *tp)
3862 {
3863         u32 i;
3864
3865         /* Free up all the SKBs. */
3866         tg3_free_rings(tp);
3867
3868         /* Zero out all descriptors. */
3869         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3870         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3871         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3872         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3873
3874         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3875         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) &&
3876             (tp->dev->mtu > ETH_DATA_LEN))
3877                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3878
3879         /* Initialize invariants of the rings, we only set this
3880          * stuff once.  This works because the card does not
3881          * write into the rx buffer posting rings.
3882          */
3883         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3884                 struct tg3_rx_buffer_desc *rxd;
3885
3886                 rxd = &tp->rx_std[i];
3887                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
3888                         << RXD_LEN_SHIFT;
3889                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3890                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3891                                (i << RXD_OPAQUE_INDEX_SHIFT));
3892         }
3893
3894         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3895                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3896                         struct tg3_rx_buffer_desc *rxd;
3897
3898                         rxd = &tp->rx_jumbo[i];
3899                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3900                                 << RXD_LEN_SHIFT;
3901                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3902                                 RXD_FLAG_JUMBO;
3903                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3904                                (i << RXD_OPAQUE_INDEX_SHIFT));
3905                 }
3906         }
3907
3908         /* Now allocate fresh SKBs for each rx ring. */
3909         for (i = 0; i < tp->rx_pending; i++) {
3910                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3911                                      -1, i) < 0)
3912                         break;
3913         }
3914
3915         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3916                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3917                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3918                                              -1, i) < 0)
3919                                 break;
3920                 }
3921         }
3922 }
3923
3924 /*
3925  * Must not be invoked with interrupt sources disabled and
3926  * the hardware shutdown down.
3927  */
3928 static void tg3_free_consistent(struct tg3 *tp)
3929 {
3930         if (tp->rx_std_buffers) {
3931                 kfree(tp->rx_std_buffers);
3932                 tp->rx_std_buffers = NULL;
3933         }
3934         if (tp->rx_std) {
3935                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3936                                     tp->rx_std, tp->rx_std_mapping);
3937                 tp->rx_std = NULL;
3938         }
3939         if (tp->rx_jumbo) {
3940                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3941                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3942                 tp->rx_jumbo = NULL;
3943         }
3944         if (tp->rx_rcb) {
3945                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3946                                     tp->rx_rcb, tp->rx_rcb_mapping);
3947                 tp->rx_rcb = NULL;
3948         }
3949         if (tp->tx_ring) {
3950                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3951                         tp->tx_ring, tp->tx_desc_mapping);
3952                 tp->tx_ring = NULL;
3953         }
3954         if (tp->hw_status) {
3955                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3956                                     tp->hw_status, tp->status_mapping);
3957                 tp->hw_status = NULL;
3958         }
3959         if (tp->hw_stats) {
3960                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3961                                     tp->hw_stats, tp->stats_mapping);
3962                 tp->hw_stats = NULL;
3963         }
3964 }
3965
3966 /*
3967  * Must not be invoked with interrupt sources disabled and
3968  * the hardware shutdown down.  Can sleep.
3969  */
3970 static int tg3_alloc_consistent(struct tg3 *tp)
3971 {
3972         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3973                                       (TG3_RX_RING_SIZE +
3974                                        TG3_RX_JUMBO_RING_SIZE)) +
3975                                      (sizeof(struct tx_ring_info) *
3976                                       TG3_TX_RING_SIZE),
3977                                      GFP_KERNEL);
3978         if (!tp->rx_std_buffers)
3979                 return -ENOMEM;
3980
3981         memset(tp->rx_std_buffers, 0,
3982                (sizeof(struct ring_info) *
3983                 (TG3_RX_RING_SIZE +
3984                  TG3_RX_JUMBO_RING_SIZE)) +
3985                (sizeof(struct tx_ring_info) *
3986                 TG3_TX_RING_SIZE));
3987
3988         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3989         tp->tx_buffers = (struct tx_ring_info *)
3990                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3991
3992         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3993                                           &tp->rx_std_mapping);
3994         if (!tp->rx_std)
3995                 goto err_out;
3996
3997         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3998                                             &tp->rx_jumbo_mapping);
3999
4000         if (!tp->rx_jumbo)
4001                 goto err_out;
4002
4003         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4004                                           &tp->rx_rcb_mapping);
4005         if (!tp->rx_rcb)
4006                 goto err_out;
4007
4008         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4009                                            &tp->tx_desc_mapping);
4010         if (!tp->tx_ring)
4011                 goto err_out;
4012
4013         tp->hw_status = pci_alloc_consistent(tp->pdev,
4014                                              TG3_HW_STATUS_SIZE,
4015                                              &tp->status_mapping);
4016         if (!tp->hw_status)
4017                 goto err_out;
4018
4019         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4020                                             sizeof(struct tg3_hw_stats),
4021                                             &tp->stats_mapping);
4022         if (!tp->hw_stats)
4023                 goto err_out;
4024
4025         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4026         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4027
4028         return 0;
4029
4030 err_out:
4031         tg3_free_consistent(tp);
4032         return -ENOMEM;
4033 }
4034
4035 #define MAX_WAIT_CNT 1000
4036
4037 /* To stop a block, clear the enable bit and poll till it
4038  * clears.  tp->lock is held.
4039  */
4040 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4041 {
4042         unsigned int i;
4043         u32 val;
4044
4045         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4046                 switch (ofs) {
4047                 case RCVLSC_MODE:
4048                 case DMAC_MODE:
4049                 case MBFREE_MODE:
4050                 case BUFMGR_MODE:
4051                 case MEMARB_MODE:
4052                         /* We can't enable/disable these bits of the
4053                          * 5705/5750, just say success.
4054                          */
4055                         return 0;
4056
4057                 default:
4058                         break;
4059                 };
4060         }
4061
4062         val = tr32(ofs);
4063         val &= ~enable_bit;
4064         tw32_f(ofs, val);
4065
4066         for (i = 0; i < MAX_WAIT_CNT; i++) {
4067                 udelay(100);
4068                 val = tr32(ofs);
4069                 if ((val & enable_bit) == 0)
4070                         break;
4071         }
4072
4073         if (i == MAX_WAIT_CNT && !silent) {
4074                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4075                        "ofs=%lx enable_bit=%x\n",
4076                        ofs, enable_bit);
4077                 return -ENODEV;
4078         }
4079
4080         return 0;
4081 }
4082
4083 /* tp->lock is held. */
4084 static int tg3_abort_hw(struct tg3 *tp, int silent)
4085 {
4086         int i, err;
4087
4088         tg3_disable_ints(tp);
4089
4090         tp->rx_mode &= ~RX_MODE_ENABLE;
4091         tw32_f(MAC_RX_MODE, tp->rx_mode);
4092         udelay(10);
4093
4094         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4095         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4096         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4097         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4098         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4099         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4100
4101         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4102         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4103         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4104         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4105         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4106         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4107         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4108
4109         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4110         tw32_f(MAC_MODE, tp->mac_mode);
4111         udelay(40);
4112
4113         tp->tx_mode &= ~TX_MODE_ENABLE;
4114         tw32_f(MAC_TX_MODE, tp->tx_mode);
4115
4116         for (i = 0; i < MAX_WAIT_CNT; i++) {
4117                 udelay(100);
4118                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4119                         break;
4120         }
4121         if (i >= MAX_WAIT_CNT) {
4122                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4123                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4124                        tp->dev->name, tr32(MAC_TX_MODE));
4125                 err |= -ENODEV;
4126         }
4127
4128         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4129         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4130         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4131
4132         tw32(FTQ_RESET, 0xffffffff);
4133         tw32(FTQ_RESET, 0x00000000);
4134
4135         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4136         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4137
4138         if (tp->hw_status)
4139                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4140         if (tp->hw_stats)
4141                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4142
4143         return err;
4144 }
4145
4146 /* tp->lock is held. */
4147 static int tg3_nvram_lock(struct tg3 *tp)
4148 {
4149         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4150                 int i;
4151
4152                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4153                 for (i = 0; i < 8000; i++) {
4154                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4155                                 break;
4156                         udelay(20);
4157                 }
4158                 if (i == 8000)
4159                         return -ENODEV;
4160         }
4161         return 0;
4162 }
4163
4164 /* tp->lock is held. */
4165 static void tg3_nvram_unlock(struct tg3 *tp)
4166 {
4167         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4168                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4169 }
4170
4171 /* tp->lock is held. */
4172 static void tg3_enable_nvram_access(struct tg3 *tp)
4173 {
4174         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4175             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4176                 u32 nvaccess = tr32(NVRAM_ACCESS);
4177
4178                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4179         }
4180 }
4181
4182 /* tp->lock is held. */
4183 static void tg3_disable_nvram_access(struct tg3 *tp)
4184 {
4185         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4186             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4187                 u32 nvaccess = tr32(NVRAM_ACCESS);
4188
4189                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4190         }
4191 }
4192
4193 /* tp->lock is held. */
4194 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4195 {
4196         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4197                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4198                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4199
4200         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4201                 switch (kind) {
4202                 case RESET_KIND_INIT:
4203                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4204                                       DRV_STATE_START);
4205                         break;
4206
4207                 case RESET_KIND_SHUTDOWN:
4208                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4209                                       DRV_STATE_UNLOAD);
4210                         break;
4211
4212                 case RESET_KIND_SUSPEND:
4213                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4214                                       DRV_STATE_SUSPEND);
4215                         break;
4216
4217                 default:
4218                         break;
4219                 };
4220         }
4221 }
4222
4223 /* tp->lock is held. */
4224 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4225 {
4226         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4227                 switch (kind) {
4228                 case RESET_KIND_INIT:
4229                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4230                                       DRV_STATE_START_DONE);
4231                         break;
4232
4233                 case RESET_KIND_SHUTDOWN:
4234                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4235                                       DRV_STATE_UNLOAD_DONE);
4236                         break;
4237
4238                 default:
4239                         break;
4240                 };
4241         }
4242 }
4243
4244 /* tp->lock is held. */
4245 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4246 {
4247         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4248                 switch (kind) {
4249                 case RESET_KIND_INIT:
4250                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4251                                       DRV_STATE_START);
4252                         break;
4253
4254                 case RESET_KIND_SHUTDOWN:
4255                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4256                                       DRV_STATE_UNLOAD);
4257                         break;
4258
4259                 case RESET_KIND_SUSPEND:
4260                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4261                                       DRV_STATE_SUSPEND);
4262                         break;
4263
4264                 default:
4265                         break;
4266                 };
4267         }
4268 }
4269
4270 static void tg3_stop_fw(struct tg3 *);
4271
4272 /* tp->lock is held. */
4273 static int tg3_chip_reset(struct tg3 *tp)
4274 {
4275         u32 val;
4276         void (*write_op)(struct tg3 *, u32, u32);
4277         int i;
4278
4279         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4280                 tg3_nvram_lock(tp);
4281
4282         /*
4283          * We must avoid the readl() that normally takes place.
4284          * It locks machines, causes machine checks, and other
4285          * fun things.  So, temporarily disable the 5701
4286          * hardware workaround, while we do the reset.
4287          */
4288         write_op = tp->write32;
4289         if (write_op == tg3_write_flush_reg32)
4290                 tp->write32 = tg3_write32;
4291
4292         /* do the reset */
4293         val = GRC_MISC_CFG_CORECLK_RESET;
4294
4295         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4296                 if (tr32(0x7e2c) == 0x60) {
4297                         tw32(0x7e2c, 0x20);
4298                 }
4299                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4300                         tw32(GRC_MISC_CFG, (1 << 29));
4301                         val |= (1 << 29);
4302                 }
4303         }
4304
4305         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4306                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4307         tw32(GRC_MISC_CFG, val);
4308
4309         /* restore 5701 hardware bug workaround write method */
4310         tp->write32 = write_op;
4311
4312         /* Unfortunately, we have to delay before the PCI read back.
4313          * Some 575X chips even will not respond to a PCI cfg access
4314          * when the reset command is given to the chip.
4315          *
4316          * How do these hardware designers expect things to work
4317          * properly if the PCI write is posted for a long period
4318          * of time?  It is always necessary to have some method by
4319          * which a register read back can occur to push the write
4320          * out which does the reset.
4321          *
4322          * For most tg3 variants the trick below was working.
4323          * Ho hum...
4324          */
4325         udelay(120);
4326
4327         /* Flush PCI posted writes.  The normal MMIO registers
4328          * are inaccessible at this time so this is the only
4329          * way to make this reliably (actually, this is no longer
4330          * the case, see above).  I tried to use indirect
4331          * register read/write but this upset some 5701 variants.
4332          */
4333         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4334
4335         udelay(120);
4336
4337         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4338                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4339                         int i;
4340                         u32 cfg_val;
4341
4342                         /* Wait for link training to complete.  */
4343                         for (i = 0; i < 5000; i++)
4344                                 udelay(100);
4345
4346                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4347                         pci_write_config_dword(tp->pdev, 0xc4,
4348                                                cfg_val | (1 << 15));
4349                 }
4350                 /* Set PCIE max payload size and clear error status.  */
4351                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4352         }
4353
4354         /* Re-enable indirect register accesses. */
4355         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4356                                tp->misc_host_ctrl);
4357
4358         /* Set MAX PCI retry to zero. */
4359         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4360         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4361             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4362                 val |= PCISTATE_RETRY_SAME_DMA;
4363         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4364
4365         pci_restore_state(tp->pdev);
4366
4367         /* Make sure PCI-X relaxed ordering bit is clear. */
4368         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4369         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4370         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4371
4372         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
4373                 u32 val;
4374
4375                 /* Chip reset on 5780 will reset MSI enable bit,
4376                  * so need to restore it.
4377                  */
4378                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4379                         u16 ctrl;
4380
4381                         pci_read_config_word(tp->pdev,
4382                                              tp->msi_cap + PCI_MSI_FLAGS,
4383                                              &ctrl);
4384                         pci_write_config_word(tp->pdev,
4385                                               tp->msi_cap + PCI_MSI_FLAGS,
4386                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4387                         val = tr32(MSGINT_MODE);
4388                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4389                 }
4390
4391                 val = tr32(MEMARB_MODE);
4392                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4393
4394         } else
4395                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4396
4397         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4398                 tg3_stop_fw(tp);
4399                 tw32(0x5000, 0x400);
4400         }
4401
4402         tw32(GRC_MODE, tp->grc_mode);
4403
4404         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4405                 u32 val = tr32(0xc4);
4406
4407                 tw32(0xc4, val | (1 << 15));
4408         }
4409
4410         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4411             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4412                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4413                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4414                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4415                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4416         }
4417
4418         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4419                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4420                 tw32_f(MAC_MODE, tp->mac_mode);
4421         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4422                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4423                 tw32_f(MAC_MODE, tp->mac_mode);
4424         } else
4425                 tw32_f(MAC_MODE, 0);
4426         udelay(40);
4427
4428         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4429                 /* Wait for firmware initialization to complete. */
4430                 for (i = 0; i < 100000; i++) {
4431                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4432                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4433                                 break;
4434                         udelay(10);
4435                 }
4436                 if (i >= 100000) {
4437                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4438                                "firmware will not restart magic=%08x\n",
4439                                tp->dev->name, val);
4440                         return -ENODEV;
4441                 }
4442         }
4443
4444         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4445             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4446                 u32 val = tr32(0x7c00);
4447
4448                 tw32(0x7c00, val | (1 << 25));
4449         }
4450
4451         /* Reprobe ASF enable state.  */
4452         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4453         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4454         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4455         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4456                 u32 nic_cfg;
4457
4458                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4459                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4460                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4461                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4462                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4463                 }
4464         }
4465
4466         return 0;
4467 }
4468
4469 /* tp->lock is held. */
4470 static void tg3_stop_fw(struct tg3 *tp)
4471 {
4472         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4473                 u32 val;
4474                 int i;
4475
4476                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4477                 val = tr32(GRC_RX_CPU_EVENT);
4478                 val |= (1 << 14);
4479                 tw32(GRC_RX_CPU_EVENT, val);
4480
4481                 /* Wait for RX cpu to ACK the event.  */
4482                 for (i = 0; i < 100; i++) {
4483                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4484                                 break;
4485                         udelay(1);
4486                 }
4487         }
4488 }
4489
4490 /* tp->lock is held. */
4491 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4492 {
4493         int err;
4494
4495         tg3_stop_fw(tp);
4496
4497         tg3_write_sig_pre_reset(tp, kind);
4498
4499         tg3_abort_hw(tp, silent);
4500         err = tg3_chip_reset(tp);
4501
4502         tg3_write_sig_legacy(tp, kind);
4503         tg3_write_sig_post_reset(tp, kind);
4504
4505         if (err)
4506                 return err;
4507
4508         return 0;
4509 }
4510
4511 #define TG3_FW_RELEASE_MAJOR    0x0
4512 #define TG3_FW_RELASE_MINOR     0x0
4513 #define TG3_FW_RELEASE_FIX      0x0
4514 #define TG3_FW_START_ADDR       0x08000000
4515 #define TG3_FW_TEXT_ADDR        0x08000000
4516 #define TG3_FW_TEXT_LEN         0x9c0
4517 #define TG3_FW_RODATA_ADDR      0x080009c0
4518 #define TG3_FW_RODATA_LEN       0x60
4519 #define TG3_FW_DATA_ADDR        0x08000a40
4520 #define TG3_FW_DATA_LEN         0x20
4521 #define TG3_FW_SBSS_ADDR        0x08000a60
4522 #define TG3_FW_SBSS_LEN         0xc
4523 #define TG3_FW_BSS_ADDR         0x08000a70
4524 #define TG3_FW_BSS_LEN          0x10
4525
4526 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4527         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4528         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4529         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4530         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4531         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4532         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4533         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4534         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4535         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4536         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4537         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4538         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4539         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4540         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4541         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4542         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4543         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4544         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4545         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4546         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4547         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4548         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4549         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4550         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4551         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4552         0, 0, 0, 0, 0, 0,
4553         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4554         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4555         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4556         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4557         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4558         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4559         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4560         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4561         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4562         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4563         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4564         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4565         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4566         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4567         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4568         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4569         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4570         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4571         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4572         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4573         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4574         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4575         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4576         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4577         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4578         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4579         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4580         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4581         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4582         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4583         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4584         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4585         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4586         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4587         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4588         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4589         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4590         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4591         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4592         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4593         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4594         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4595         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4596         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4597         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4598         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4599         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4600         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4601         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4602         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4603         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4604         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4605         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4606         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4607         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4608         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4609         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4610         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4611         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4612         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4613         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4614         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4615         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4616         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4617         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4618 };
4619
4620 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4621         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4622         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4623         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4624         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4625         0x00000000
4626 };
4627
4628 #if 0 /* All zeros, don't eat up space with it. */
4629 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4630         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4631         0x00000000, 0x00000000, 0x00000000, 0x00000000
4632 };
4633 #endif
4634
4635 #define RX_CPU_SCRATCH_BASE     0x30000
4636 #define RX_CPU_SCRATCH_SIZE     0x04000
4637 #define TX_CPU_SCRATCH_BASE     0x34000
4638 #define TX_CPU_SCRATCH_SIZE     0x04000
4639
4640 /* tp->lock is held. */
4641 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4642 {
4643         int i;
4644
4645         if (offset == TX_CPU_BASE &&
4646             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4647                 BUG();
4648
4649         if (offset == RX_CPU_BASE) {
4650                 for (i = 0; i < 10000; i++) {
4651                         tw32(offset + CPU_STATE, 0xffffffff);
4652                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4653                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4654                                 break;
4655                 }
4656
4657                 tw32(offset + CPU_STATE, 0xffffffff);
4658                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4659                 udelay(10);
4660         } else {
4661                 for (i = 0; i < 10000; i++) {
4662                         tw32(offset + CPU_STATE, 0xffffffff);
4663                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4664                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4665                                 break;
4666                 }
4667         }
4668
4669         if (i >= 10000) {
4670                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4671                        "and %s CPU\n",
4672                        tp->dev->name,
4673                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4674                 return -ENODEV;
4675         }
4676         return 0;
4677 }
4678
4679 struct fw_info {
4680         unsigned int text_base;
4681         unsigned int text_len;
4682         u32 *text_data;
4683         unsigned int rodata_base;
4684         unsigned int rodata_len;
4685         u32 *rodata_data;
4686         unsigned int data_base;
4687         unsigned int data_len;
4688         u32 *data_data;
4689 };
4690
4691 /* tp->lock is held. */
4692 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4693                                  int cpu_scratch_size, struct fw_info *info)
4694 {
4695         int err, i;
4696         void (*write_op)(struct tg3 *, u32, u32);
4697
4698         if (cpu_base == TX_CPU_BASE &&
4699             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4700                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4701                        "TX cpu firmware on %s which is 5705.\n",
4702                        tp->dev->name);
4703                 return -EINVAL;
4704         }
4705
4706         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4707                 write_op = tg3_write_mem;
4708         else
4709                 write_op = tg3_write_indirect_reg32;
4710
4711         /* It is possible that bootcode is still loading at this point.
4712          * Get the nvram lock first before halting the cpu.
4713          */
4714         tg3_nvram_lock(tp);
4715         err = tg3_halt_cpu(tp, cpu_base);
4716         tg3_nvram_unlock(tp);
4717         if (err)
4718                 goto out;
4719
4720         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4721                 write_op(tp, cpu_scratch_base + i, 0);
4722         tw32(cpu_base + CPU_STATE, 0xffffffff);
4723         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4724         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4725                 write_op(tp, (cpu_scratch_base +
4726                               (info->text_base & 0xffff) +
4727                               (i * sizeof(u32))),
4728                          (info->text_data ?
4729                           info->text_data[i] : 0));
4730         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4731                 write_op(tp, (cpu_scratch_base +
4732                               (info->rodata_base & 0xffff) +
4733                               (i * sizeof(u32))),
4734                          (info->rodata_data ?
4735                           info->rodata_data[i] : 0));
4736         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4737                 write_op(tp, (cpu_scratch_base +
4738                               (info->data_base & 0xffff) +
4739                               (i * sizeof(u32))),
4740                          (info->data_data ?
4741                           info->data_data[i] : 0));
4742
4743         err = 0;
4744
4745 out:
4746         return err;
4747 }
4748
4749 /* tp->lock is held. */
4750 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4751 {
4752         struct fw_info info;
4753         int err, i;
4754
4755         info.text_base = TG3_FW_TEXT_ADDR;
4756         info.text_len = TG3_FW_TEXT_LEN;
4757         info.text_data = &tg3FwText[0];
4758         info.rodata_base = TG3_FW_RODATA_ADDR;
4759         info.rodata_len = TG3_FW_RODATA_LEN;
4760         info.rodata_data = &tg3FwRodata[0];
4761         info.data_base = TG3_FW_DATA_ADDR;
4762         info.data_len = TG3_FW_DATA_LEN;
4763         info.data_data = NULL;
4764
4765         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4766                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4767                                     &info);
4768         if (err)
4769                 return err;
4770
4771         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4772                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4773                                     &info);
4774         if (err)
4775                 return err;
4776
4777         /* Now startup only the RX cpu. */
4778         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4779         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4780
4781         for (i = 0; i < 5; i++) {
4782                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4783                         break;
4784                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4785                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4786                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4787                 udelay(1000);
4788         }
4789         if (i >= 5) {
4790                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4791                        "to set RX CPU PC, is %08x should be %08x\n",
4792                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4793                        TG3_FW_TEXT_ADDR);
4794                 return -ENODEV;
4795         }
4796         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4797         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4798
4799         return 0;
4800 }
4801
4802 #if TG3_TSO_SUPPORT != 0
4803
4804 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4805 #define TG3_TSO_FW_RELASE_MINOR         0x6
4806 #define TG3_TSO_FW_RELEASE_FIX          0x0
4807 #define TG3_TSO_FW_START_ADDR           0x08000000
4808 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4809 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4810 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4811 #define TG3_TSO_FW_RODATA_LEN           0x60
4812 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4813 #define TG3_TSO_FW_DATA_LEN             0x30
4814 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4815 #define TG3_TSO_FW_SBSS_LEN             0x2c
4816 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4817 #define TG3_TSO_FW_BSS_LEN              0x894
4818
4819 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4820         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4821         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4822         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4823         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4824         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4825         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4826         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4827         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4828         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4829         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4830         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4831         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4832         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4833         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4834         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4835         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4836         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4837         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4838         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4839         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4840         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4841         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4842         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4843         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4844         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4845         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4846         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4847         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4848         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4849         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4850         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4851         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4852         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4853         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4854         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4855         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4856         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4857         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4858         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4859         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4860         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4861         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4862         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4863         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4864         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4865         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4866         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4867         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4868         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4869         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4870         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4871         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4872         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4873         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4874         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4875         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4876         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4877         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4878         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4879         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4880         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4881         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4882         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4883         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4884         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4885         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4886         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4887         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4888         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4889         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4890         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4891         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4892         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4893         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4894         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4895         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4896         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4897         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4898         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4899         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4900         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4901         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4902         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4903         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4904         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4905         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4906         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4907         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4908         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4909         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4910         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4911         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4912         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4913         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4914         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4915         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4916         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4917         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4918         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4919         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4920         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4921         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4922         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4923         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4924         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4925         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4926         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4927         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4928         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4929         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4930         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4931         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4932         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4933         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4934         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4935         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4936         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4937         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4938         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4939         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4940         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4941         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4942         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4943         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4944         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4945         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4946         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4947         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4948         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4949         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4950         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4951         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4952         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4953         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4954         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4955         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4956         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4957         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4958         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4959         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4960         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4961         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4962         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4963         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4964         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4965         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4966         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4967         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4968         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4969         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4970         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4971         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4972         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4973         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4974         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4975         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4976         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4977         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4978         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4979         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4980         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4981         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4982         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4983         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4984         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4985         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4986         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4987         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4988         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4989         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4990         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4991         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4992         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4993         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4994         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4995         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4996         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4997         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4998         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4999         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5000         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5001         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5002         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5003         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5004         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5005         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5006         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5007         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5008         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5009         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5010         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5011         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5012         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5013         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5014         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5015         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5016         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5017         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5018         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5019         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5020         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5021         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5022         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5023         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5024         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5025         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5026         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5027         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5028         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5029         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5030         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5031         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5032         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5033         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5034         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5035         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5036         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5037         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5038         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5039         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5040         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5041         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5042         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5043         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5044         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5045         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5046         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5047         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5048         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5049         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5050         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5051         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5052         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5053         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5054         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5055         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5056         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5057         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5058         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5059         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5060         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5061         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5062         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5063         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5064         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5065         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5066         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5067         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5068         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5069         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5070         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5071         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5072         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5073         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5074         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5075         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5076         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5077         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5078         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5079         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5080         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5081         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5082         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5083         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5084         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5085         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5086         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5087         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5088         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5089         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5090         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5091         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5092         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5093         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5094         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5095         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5096         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5097         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5098         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5099         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5100         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5101         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5102         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5103         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5104 };
5105
5106 static u32 tg3TsoFwRodata[] = {
5107         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5108         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5109         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5110         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5111         0x00000000,
5112 };
5113
5114 static u32 tg3TsoFwData[] = {
5115         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5116         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5117         0x00000000,
5118 };
5119
5120 /* 5705 needs a special version of the TSO firmware.  */
5121 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5122 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5123 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5124 #define TG3_TSO5_FW_START_ADDR          0x00010000
5125 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5126 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5127 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5128 #define TG3_TSO5_FW_RODATA_LEN          0x50
5129 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5130 #define TG3_TSO5_FW_DATA_LEN            0x20
5131 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5132 #define TG3_TSO5_FW_SBSS_LEN            0x28
5133 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5134 #define TG3_TSO5_FW_BSS_LEN             0x88
5135
5136 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5137         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5138         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5139         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5140         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5141         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5142         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5143         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5144         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5145         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5146         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5147         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5148         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5149         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5150         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5151         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5152         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5153         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5154         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5155         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5156         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5157         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5158         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5159         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5160         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5161         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5162         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5163         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5164         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5165         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5166         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5167         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5168         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5169         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5170         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5171         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5172         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5173         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5174         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5175         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5176         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5177         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5178         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5179         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5180         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5181         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5182         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5183         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5184         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5185         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5186         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5187         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5188         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5189         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5190         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5191         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5192         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5193         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5194         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5195         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5196         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5197         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5198         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5199         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5200         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5201         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5202         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5203         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5204         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5205         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5206         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5207         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5208         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5209         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5210         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5211         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5212         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5213         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5214         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5215         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5216         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5217         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5218         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5219         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5220         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5221         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5222         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5223         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5224         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5225         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5226         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5227         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5228         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5229         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5230         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5231         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5232         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5233         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5234         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5235         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5236         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5237         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5238         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5239         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5240         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5241         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5242         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5243         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5244         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5245         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5246         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5247         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5248         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5249         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5250         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5251         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5252         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5253         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5254         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5255         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5256         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5257         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5258         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5259         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5260         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5261         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5262         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5263         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5264         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5265         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5266         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5267         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5268         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5269         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5270         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5271         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5272         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5273         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5274         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5275         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5276         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5277         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5278         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5279         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5280         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5281         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5282         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5283         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5284         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5285         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5286         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5287         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5288         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5289         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5290         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5291         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5292         0x00000000, 0x00000000, 0x00000000,
5293 };
5294
5295 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5296         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5297         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5298         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5299         0x00000000, 0x00000000, 0x00000000,
5300 };
5301
5302 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5303         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5304         0x00000000, 0x00000000, 0x00000000,
5305 };
5306
5307 /* tp->lock is held. */
5308 static int tg3_load_tso_firmware(struct tg3 *tp)
5309 {
5310         struct fw_info info;
5311         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5312         int err, i;
5313
5314         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5315                 return 0;
5316
5317         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5318                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5319                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5320                 info.text_data = &tg3Tso5FwText[0];
5321                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5322                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5323                 info.rodata_data = &tg3Tso5FwRodata[0];
5324                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5325                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5326                 info.data_data = &tg3Tso5FwData[0];
5327                 cpu_base = RX_CPU_BASE;
5328                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5329                 cpu_scratch_size = (info.text_len +
5330                                     info.rodata_len +
5331                                     info.data_len +
5332                                     TG3_TSO5_FW_SBSS_LEN +
5333                                     TG3_TSO5_FW_BSS_LEN);
5334         } else {
5335                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5336                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5337                 info.text_data = &tg3TsoFwText[0];
5338                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5339                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5340                 info.rodata_data = &tg3TsoFwRodata[0];
5341                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5342                 info.data_len = TG3_TSO_FW_DATA_LEN;
5343                 info.data_data = &tg3TsoFwData[0];
5344                 cpu_base = TX_CPU_BASE;
5345                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5346                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5347         }
5348
5349         err = tg3_load_firmware_cpu(tp, cpu_base,
5350                                     cpu_scratch_base, cpu_scratch_size,
5351                                     &info);
5352         if (err)
5353                 return err;
5354
5355         /* Now startup the cpu. */
5356         tw32(cpu_base + CPU_STATE, 0xffffffff);
5357         tw32_f(cpu_base + CPU_PC,    info.text_base);
5358
5359         for (i = 0; i < 5; i++) {
5360                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5361                         break;
5362                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5363                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5364                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5365                 udelay(1000);
5366         }
5367         if (i >= 5) {
5368                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5369                        "to set CPU PC, is %08x should be %08x\n",
5370                        tp->dev->name, tr32(cpu_base + CPU_PC),
5371                        info.text_base);
5372                 return -ENODEV;
5373         }
5374         tw32(cpu_base + CPU_STATE, 0xffffffff);
5375         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5376         return 0;
5377 }
5378
5379 #endif /* TG3_TSO_SUPPORT != 0 */
5380
5381 /* tp->lock is held. */
5382 static void __tg3_set_mac_addr(struct tg3 *tp)
5383 {
5384         u32 addr_high, addr_low;
5385         int i;
5386
5387         addr_high = ((tp->dev->dev_addr[0] << 8) |
5388                      tp->dev->dev_addr[1]);
5389         addr_low = ((tp->dev->dev_addr[2] << 24) |
5390                     (tp->dev->dev_addr[3] << 16) |
5391                     (tp->dev->dev_addr[4] <<  8) |
5392                     (tp->dev->dev_addr[5] <<  0));
5393         for (i = 0; i < 4; i++) {
5394                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5395                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5396         }
5397
5398         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5399             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5400                 for (i = 0; i < 12; i++) {
5401                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5402                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5403                 }
5404         }
5405
5406         addr_high = (tp->dev->dev_addr[0] +
5407                      tp->dev->dev_addr[1] +
5408                      tp->dev->dev_addr[2] +
5409                      tp->dev->dev_addr[3] +
5410                      tp->dev->dev_addr[4] +
5411                      tp->dev->dev_addr[5]) &
5412                 TX_BACKOFF_SEED_MASK;
5413         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5414 }
5415
5416 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5417 {
5418         struct tg3 *tp = netdev_priv(dev);
5419         struct sockaddr *addr = p;
5420
5421         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5422
5423         spin_lock_bh(&tp->lock);
5424         __tg3_set_mac_addr(tp);
5425         spin_unlock_bh(&tp->lock);
5426
5427         return 0;
5428 }
5429
5430 /* tp->lock is held. */
5431 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5432                            dma_addr_t mapping, u32 maxlen_flags,
5433                            u32 nic_addr)
5434 {
5435         tg3_write_mem(tp,
5436                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5437                       ((u64) mapping >> 32));
5438         tg3_write_mem(tp,
5439                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5440                       ((u64) mapping & 0xffffffff));
5441         tg3_write_mem(tp,
5442                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5443                        maxlen_flags);
5444
5445         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5446                 tg3_write_mem(tp,
5447                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5448                               nic_addr);
5449 }
5450
5451 static void __tg3_set_rx_mode(struct net_device *);
5452 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5453 {
5454         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5455         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5456         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5457         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5458         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5459                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5460                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5461         }
5462         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5463         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5464         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5465                 u32 val = ec->stats_block_coalesce_usecs;
5466
5467                 if (!netif_carrier_ok(tp->dev))
5468                         val = 0;
5469
5470                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5471         }
5472 }
5473
5474 /* tp->lock is held. */
5475 static int tg3_reset_hw(struct tg3 *tp)
5476 {
5477         u32 val, rdmac_mode;
5478         int i, err, limit;
5479
5480         tg3_disable_ints(tp);
5481
5482         tg3_stop_fw(tp);
5483
5484         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5485
5486         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5487                 tg3_abort_hw(tp, 1);
5488         }
5489
5490         err = tg3_chip_reset(tp);
5491         if (err)
5492                 return err;
5493
5494         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5495
5496         /* This works around an issue with Athlon chipsets on
5497          * B3 tigon3 silicon.  This bit has no effect on any
5498          * other revision.  But do not set this on PCI Express
5499          * chips.
5500          */
5501         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5502                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5503         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5504
5505         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5506             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5507                 val = tr32(TG3PCI_PCISTATE);
5508                 val |= PCISTATE_RETRY_SAME_DMA;
5509                 tw32(TG3PCI_PCISTATE, val);
5510         }
5511
5512         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5513                 /* Enable some hw fixes.  */
5514                 val = tr32(TG3PCI_MSI_DATA);
5515                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5516                 tw32(TG3PCI_MSI_DATA, val);
5517         }
5518
5519         /* Descriptor ring init may make accesses to the
5520          * NIC SRAM area to setup the TX descriptors, so we
5521          * can only do this after the hardware has been
5522          * successfully reset.
5523          */
5524         tg3_init_rings(tp);
5525
5526         /* This value is determined during the probe time DMA
5527          * engine test, tg3_test_dma.
5528          */
5529         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5530
5531         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5532                           GRC_MODE_4X_NIC_SEND_RINGS |
5533                           GRC_MODE_NO_TX_PHDR_CSUM |
5534                           GRC_MODE_NO_RX_PHDR_CSUM);
5535         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5536         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5537                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5538         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5539                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5540
5541         tw32(GRC_MODE,
5542              tp->grc_mode |
5543              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5544
5545         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5546         val = tr32(GRC_MISC_CFG);
5547         val &= ~0xff;
5548         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5549         tw32(GRC_MISC_CFG, val);
5550
5551         /* Initialize MBUF/DESC pool. */
5552         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5553                 /* Do nothing.  */
5554         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5555                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5556                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5557                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5558                 else
5559                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5560                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5561                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5562         }
5563 #if TG3_TSO_SUPPORT != 0
5564         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5565                 int fw_len;
5566
5567                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5568                           TG3_TSO5_FW_RODATA_LEN +
5569                           TG3_TSO5_FW_DATA_LEN +
5570                           TG3_TSO5_FW_SBSS_LEN +
5571                           TG3_TSO5_FW_BSS_LEN);
5572                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5573                 tw32(BUFMGR_MB_POOL_ADDR,
5574                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5575                 tw32(BUFMGR_MB_POOL_SIZE,
5576                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5577         }
5578 #endif
5579
5580         if (tp->dev->mtu <= ETH_DATA_LEN) {
5581                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5582                      tp->bufmgr_config.mbuf_read_dma_low_water);
5583                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5584                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5585                 tw32(BUFMGR_MB_HIGH_WATER,
5586                      tp->bufmgr_config.mbuf_high_water);
5587         } else {
5588                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5589                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5590                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5591                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5592                 tw32(BUFMGR_MB_HIGH_WATER,
5593                      tp->bufmgr_config.mbuf_high_water_jumbo);
5594         }
5595         tw32(BUFMGR_DMA_LOW_WATER,
5596              tp->bufmgr_config.dma_low_water);
5597         tw32(BUFMGR_DMA_HIGH_WATER,
5598              tp->bufmgr_config.dma_high_water);
5599
5600         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5601         for (i = 0; i < 2000; i++) {
5602                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5603                         break;
5604                 udelay(10);
5605         }
5606         if (i >= 2000) {
5607                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5608                        tp->dev->name);
5609                 return -ENODEV;
5610         }
5611
5612         /* Setup replenish threshold. */
5613         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5614
5615         /* Initialize TG3_BDINFO's at:
5616          *  RCVDBDI_STD_BD:     standard eth size rx ring
5617          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5618          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5619          *
5620          * like so:
5621          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5622          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5623          *                              ring attribute flags
5624          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5625          *
5626          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5627          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5628          *
5629          * The size of each ring is fixed in the firmware, but the location is
5630          * configurable.
5631          */
5632         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5633              ((u64) tp->rx_std_mapping >> 32));
5634         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5635              ((u64) tp->rx_std_mapping & 0xffffffff));
5636         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5637              NIC_SRAM_RX_BUFFER_DESC);
5638
5639         /* Don't even try to program the JUMBO/MINI buffer descriptor
5640          * configs on 5705.
5641          */
5642         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5643                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5644                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5645         } else {
5646                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5647                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5648
5649                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5650                      BDINFO_FLAGS_DISABLED);
5651
5652                 /* Setup replenish threshold. */
5653                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5654
5655                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5656                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5657                              ((u64) tp->rx_jumbo_mapping >> 32));
5658                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5659                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5660                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5661                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5662                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5663                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5664                 } else {
5665                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5666                              BDINFO_FLAGS_DISABLED);
5667                 }
5668
5669         }
5670
5671         /* There is only one send ring on 5705/5750, no need to explicitly
5672          * disable the others.
5673          */
5674         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5675                 /* Clear out send RCB ring in SRAM. */
5676                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5677                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5678                                       BDINFO_FLAGS_DISABLED);
5679         }
5680
5681         tp->tx_prod = 0;
5682         tp->tx_cons = 0;
5683         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5684         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5685
5686         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5687                        tp->tx_desc_mapping,
5688                        (TG3_TX_RING_SIZE <<
5689                         BDINFO_FLAGS_MAXLEN_SHIFT),
5690                        NIC_SRAM_TX_BUFFER_DESC);
5691
5692         /* There is only one receive return ring on 5705/5750, no need
5693          * to explicitly disable the others.
5694          */
5695         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5696                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5697                      i += TG3_BDINFO_SIZE) {
5698                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5699                                       BDINFO_FLAGS_DISABLED);
5700                 }
5701         }
5702
5703         tp->rx_rcb_ptr = 0;
5704         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5705
5706         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5707                        tp->rx_rcb_mapping,
5708                        (TG3_RX_RCB_RING_SIZE(tp) <<
5709                         BDINFO_FLAGS_MAXLEN_SHIFT),
5710                        0);
5711
5712         tp->rx_std_ptr = tp->rx_pending;
5713         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5714                      tp->rx_std_ptr);
5715
5716         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
5717                                                 tp->rx_jumbo_pending : 0;
5718         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5719                      tp->rx_jumbo_ptr);
5720
5721         /* Initialize MAC address and backoff seed. */
5722         __tg3_set_mac_addr(tp);
5723
5724         /* MTU + ethernet header + FCS + optional VLAN tag */
5725         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5726
5727         /* The slot time is changed by tg3_setup_phy if we
5728          * run at gigabit with half duplex.
5729          */
5730         tw32(MAC_TX_LENGTHS,
5731              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5732              (6 << TX_LENGTHS_IPG_SHIFT) |
5733              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5734
5735         /* Receive rules. */
5736         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5737         tw32(RCVLPC_CONFIG, 0x0181);
5738
5739         /* Calculate RDMAC_MODE setting early, we need it to determine
5740          * the RCVLPC_STATE_ENABLE mask.
5741          */
5742         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5743                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5744                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5745                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5746                       RDMAC_MODE_LNGREAD_ENAB);
5747         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5748                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5749
5750         /* If statement applies to 5705 and 5750 PCI devices only */
5751         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5752              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5753             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5754                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5755                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5756                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5757                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5758                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5759                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5760                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5761                 }
5762         }
5763
5764         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5765                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5766
5767 #if TG3_TSO_SUPPORT != 0
5768         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5769                 rdmac_mode |= (1 << 27);
5770 #endif
5771
5772         /* Receive/send statistics. */
5773         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5774             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5775                 val = tr32(RCVLPC_STATS_ENABLE);
5776                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5777                 tw32(RCVLPC_STATS_ENABLE, val);
5778         } else {
5779                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5780         }
5781         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5782         tw32(SNDDATAI_STATSENAB, 0xffffff);
5783         tw32(SNDDATAI_STATSCTRL,
5784              (SNDDATAI_SCTRL_ENABLE |
5785               SNDDATAI_SCTRL_FASTUPD));
5786
5787         /* Setup host coalescing engine. */
5788         tw32(HOSTCC_MODE, 0);
5789         for (i = 0; i < 2000; i++) {
5790                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5791                         break;
5792                 udelay(10);
5793         }
5794
5795         __tg3_set_coalesce(tp, &tp->coal);
5796
5797         /* set status block DMA address */
5798         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5799              ((u64) tp->status_mapping >> 32));
5800         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5801              ((u64) tp->status_mapping & 0xffffffff));
5802
5803         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5804                 /* Status/statistics block address.  See tg3_timer,
5805                  * the tg3_periodic_fetch_stats call there, and
5806                  * tg3_get_stats to see how this works for 5705/5750 chips.
5807                  */
5808                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5809                      ((u64) tp->stats_mapping >> 32));
5810                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5811                      ((u64) tp->stats_mapping & 0xffffffff));
5812                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5813                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5814         }
5815
5816         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5817
5818         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5819         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5820         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5821                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5822
5823         /* Clear statistics/status block in chip, and status block in ram. */
5824         for (i = NIC_SRAM_STATS_BLK;
5825              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5826              i += sizeof(u32)) {
5827                 tg3_write_mem(tp, i, 0);
5828                 udelay(40);
5829         }
5830         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5831
5832         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5833                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5834         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5835         udelay(40);
5836
5837         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5838          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5839          * register to preserve the GPIO settings for LOMs. The GPIOs,
5840          * whether used as inputs or outputs, are set by boot code after
5841          * reset.
5842          */
5843         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5844                 u32 gpio_mask;
5845
5846                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5847                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5848
5849                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5850                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5851                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5852
5853                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5854
5855                 /* GPIO1 must be driven high for eeprom write protect */
5856                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5857                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5858         }
5859         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5860         udelay(100);
5861
5862         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5863         tp->last_tag = 0;
5864
5865         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5866                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5867                 udelay(40);
5868         }
5869
5870         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5871                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5872                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5873                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5874                WDMAC_MODE_LNGREAD_ENAB);
5875
5876         /* If statement applies to 5705 and 5750 PCI devices only */
5877         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5878              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5879             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5880                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5881                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5882                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5883                         /* nothing */
5884                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5885                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5886                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5887                         val |= WDMAC_MODE_RX_ACCEL;
5888                 }
5889         }
5890
5891         tw32_f(WDMAC_MODE, val);
5892         udelay(40);
5893
5894         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5895                 val = tr32(TG3PCI_X_CAPS);
5896                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5897                         val &= ~PCIX_CAPS_BURST_MASK;
5898                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5899                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5900                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5901                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5902                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5903                                 val |= (tp->split_mode_max_reqs <<
5904                                         PCIX_CAPS_SPLIT_SHIFT);
5905                 }
5906                 tw32(TG3PCI_X_CAPS, val);
5907         }
5908
5909         tw32_f(RDMAC_MODE, rdmac_mode);
5910         udelay(40);
5911
5912         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5913         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5914                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5915         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5916         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5917         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5918         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5919         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5920 #if TG3_TSO_SUPPORT != 0
5921         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5922                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5923 #endif
5924         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5925         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5926
5927         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5928                 err = tg3_load_5701_a0_firmware_fix(tp);
5929                 if (err)
5930                         return err;
5931         }
5932
5933 #if TG3_TSO_SUPPORT != 0
5934         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5935                 err = tg3_load_tso_firmware(tp);
5936                 if (err)
5937                         return err;
5938         }
5939 #endif
5940
5941         tp->tx_mode = TX_MODE_ENABLE;
5942         tw32_f(MAC_TX_MODE, tp->tx_mode);
5943         udelay(100);
5944
5945         tp->rx_mode = RX_MODE_ENABLE;
5946         tw32_f(MAC_RX_MODE, tp->rx_mode);
5947         udelay(10);
5948
5949         if (tp->link_config.phy_is_low_power) {
5950                 tp->link_config.phy_is_low_power = 0;
5951                 tp->link_config.speed = tp->link_config.orig_speed;
5952                 tp->link_config.duplex = tp->link_config.orig_duplex;
5953                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5954         }
5955
5956         tp->mi_mode = MAC_MI_MODE_BASE;
5957         tw32_f(MAC_MI_MODE, tp->mi_mode);
5958         udelay(80);
5959
5960         tw32(MAC_LED_CTRL, tp->led_ctrl);
5961
5962         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5963         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5964                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5965                 udelay(10);
5966         }
5967         tw32_f(MAC_RX_MODE, tp->rx_mode);
5968         udelay(10);
5969
5970         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5971                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5972                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5973                         /* Set drive transmission level to 1.2V  */
5974                         /* only if the signal pre-emphasis bit is not set  */
5975                         val = tr32(MAC_SERDES_CFG);
5976                         val &= 0xfffff000;
5977                         val |= 0x880;
5978                         tw32(MAC_SERDES_CFG, val);
5979                 }
5980                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5981                         tw32(MAC_SERDES_CFG, 0x616000);
5982         }
5983
5984         /* Prevent chip from dropping frames when flow control
5985          * is enabled.
5986          */
5987         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5988
5989         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5990             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5991                 /* Use hardware link auto-negotiation */
5992                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5993         }
5994
5995         err = tg3_setup_phy(tp, 1);
5996         if (err)
5997                 return err;
5998
5999         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6000                 u32 tmp;
6001
6002                 /* Clear CRC stats. */
6003                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6004                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6005                         tg3_readphy(tp, 0x14, &tmp);
6006                 }
6007         }
6008
6009         __tg3_set_rx_mode(tp->dev);
6010
6011         /* Initialize receive rules. */
6012         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6013         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6014         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6015         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6016
6017         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6018             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780))
6019                 limit = 8;
6020         else
6021                 limit = 16;
6022         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6023                 limit -= 4;
6024         switch (limit) {
6025         case 16:
6026                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6027         case 15:
6028                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6029         case 14:
6030                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6031         case 13:
6032                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6033         case 12:
6034                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6035         case 11:
6036                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6037         case 10:
6038                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6039         case 9:
6040                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6041         case 8:
6042                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6043         case 7:
6044                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6045         case 6:
6046                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6047         case 5:
6048                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6049         case 4:
6050                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6051         case 3:
6052                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6053         case 2:
6054         case 1:
6055
6056         default:
6057                 break;
6058         };
6059
6060         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6061
6062         return 0;
6063 }
6064
6065 /* Called at device open time to get the chip ready for
6066  * packet processing.  Invoked with tp->lock held.
6067  */
6068 static int tg3_init_hw(struct tg3 *tp)
6069 {
6070         int err;
6071
6072         /* Force the chip into D0. */
6073         err = tg3_set_power_state(tp, 0);
6074         if (err)
6075                 goto out;
6076
6077         tg3_switch_clocks(tp);
6078
6079         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6080
6081         err = tg3_reset_hw(tp);
6082
6083 out:
6084         return err;
6085 }
6086
6087 #define TG3_STAT_ADD32(PSTAT, REG) \
6088 do {    u32 __val = tr32(REG); \
6089         (PSTAT)->low += __val; \
6090         if ((PSTAT)->low < __val) \
6091                 (PSTAT)->high += 1; \
6092 } while (0)
6093
6094 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6095 {
6096         struct tg3_hw_stats *sp = tp->hw_stats;
6097
6098         if (!netif_carrier_ok(tp->dev))
6099                 return;
6100
6101         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6102         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6103         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6104         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6105         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6106         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6107         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6108         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6109         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6110         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6111         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6112         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6113         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6114
6115         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6116         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6117         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6118         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6119         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6120         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6121         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6122         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6123         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6124         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6125         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6126         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6127         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6128         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6129 }
6130
6131 static void tg3_timer(unsigned long __opaque)
6132 {
6133         struct tg3 *tp = (struct tg3 *) __opaque;
6134
6135         spin_lock(&tp->lock);
6136
6137         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6138                 /* All of this garbage is because when using non-tagged
6139                  * IRQ status the mailbox/status_block protocol the chip
6140                  * uses with the cpu is race prone.
6141                  */
6142                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6143                         tw32(GRC_LOCAL_CTRL,
6144                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6145                 } else {
6146                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6147                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6148                 }
6149
6150                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6151                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6152                         spin_unlock(&tp->lock);
6153                         schedule_work(&tp->reset_task);
6154                         return;
6155                 }
6156         }
6157
6158         /* This part only runs once per second. */
6159         if (!--tp->timer_counter) {
6160                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6161                         tg3_periodic_fetch_stats(tp);
6162
6163                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6164                         u32 mac_stat;
6165                         int phy_event;
6166
6167                         mac_stat = tr32(MAC_STATUS);
6168
6169                         phy_event = 0;
6170                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6171                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6172                                         phy_event = 1;
6173                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6174                                 phy_event = 1;
6175
6176                         if (phy_event)
6177                                 tg3_setup_phy(tp, 0);
6178                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6179                         u32 mac_stat = tr32(MAC_STATUS);
6180                         int need_setup = 0;
6181
6182                         if (netif_carrier_ok(tp->dev) &&
6183                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6184                                 need_setup = 1;
6185                         }
6186                         if (! netif_carrier_ok(tp->dev) &&
6187                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6188                                          MAC_STATUS_SIGNAL_DET))) {
6189                                 need_setup = 1;
6190                         }
6191                         if (need_setup) {
6192                                 tw32_f(MAC_MODE,
6193                                      (tp->mac_mode &
6194                                       ~MAC_MODE_PORT_MODE_MASK));
6195                                 udelay(40);
6196                                 tw32_f(MAC_MODE, tp->mac_mode);
6197                                 udelay(40);
6198                                 tg3_setup_phy(tp, 0);
6199                         }
6200                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6201                         tg3_serdes_parallel_detect(tp);
6202
6203                 tp->timer_counter = tp->timer_multiplier;
6204         }
6205
6206         /* Heartbeat is only sent once every 120 seconds.  */
6207         if (!--tp->asf_counter) {
6208                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6209                         u32 val;
6210
6211                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
6212                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6213                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
6214                         val = tr32(GRC_RX_CPU_EVENT);
6215                         val |= (1 << 14);
6216                         tw32(GRC_RX_CPU_EVENT, val);
6217                 }
6218                 tp->asf_counter = tp->asf_multiplier;
6219         }
6220
6221         spin_unlock(&tp->lock);
6222
6223         tp->timer.expires = jiffies + tp->timer_offset;
6224         add_timer(&tp->timer);
6225 }
6226
6227 static int tg3_test_interrupt(struct tg3 *tp)
6228 {
6229         struct net_device *dev = tp->dev;
6230         int err, i;
6231         u32 int_mbox = 0;
6232
6233         if (!netif_running(dev))
6234                 return -ENODEV;
6235
6236         tg3_disable_ints(tp);
6237
6238         free_irq(tp->pdev->irq, dev);
6239
6240         err = request_irq(tp->pdev->irq, tg3_test_isr,
6241                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6242         if (err)
6243                 return err;
6244
6245         tg3_enable_ints(tp);
6246
6247         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6248                HOSTCC_MODE_NOW);
6249
6250         for (i = 0; i < 5; i++) {
6251                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6252                                         TG3_64BIT_REG_LOW);
6253                 if (int_mbox != 0)
6254                         break;
6255                 msleep(10);
6256         }
6257
6258         tg3_disable_ints(tp);
6259
6260         free_irq(tp->pdev->irq, dev);
6261         
6262         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6263                 err = request_irq(tp->pdev->irq, tg3_msi,
6264                                   SA_SAMPLE_RANDOM, dev->name, dev);
6265         else {
6266                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6267                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6268                         fn = tg3_interrupt_tagged;
6269                 err = request_irq(tp->pdev->irq, fn,
6270                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6271         }
6272
6273         if (err)
6274                 return err;
6275
6276         if (int_mbox != 0)
6277                 return 0;
6278
6279         return -EIO;
6280 }
6281
6282 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6283  * successfully restored
6284  */
6285 static int tg3_test_msi(struct tg3 *tp)
6286 {
6287         struct net_device *dev = tp->dev;
6288         int err;
6289         u16 pci_cmd;
6290
6291         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6292                 return 0;
6293
6294         /* Turn off SERR reporting in case MSI terminates with Master
6295          * Abort.
6296          */
6297         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6298         pci_write_config_word(tp->pdev, PCI_COMMAND,
6299                               pci_cmd & ~PCI_COMMAND_SERR);
6300
6301         err = tg3_test_interrupt(tp);
6302
6303         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6304
6305         if (!err)
6306                 return 0;
6307
6308         /* other failures */
6309         if (err != -EIO)
6310                 return err;
6311
6312         /* MSI test failed, go back to INTx mode */
6313         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6314                "switching to INTx mode. Please report this failure to "
6315                "the PCI maintainer and include system chipset information.\n",
6316                        tp->dev->name);
6317
6318         free_irq(tp->pdev->irq, dev);
6319         pci_disable_msi(tp->pdev);
6320
6321         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6322
6323         {
6324                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6325                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6326                         fn = tg3_interrupt_tagged;
6327
6328                 err = request_irq(tp->pdev->irq, fn,
6329                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6330         }
6331         if (err)
6332                 return err;
6333
6334         /* Need to reset the chip because the MSI cycle may have terminated
6335          * with Master Abort.
6336          */
6337         tg3_full_lock(tp, 1);
6338
6339         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6340         err = tg3_init_hw(tp);
6341
6342         tg3_full_unlock(tp);
6343
6344         if (err)
6345                 free_irq(tp->pdev->irq, dev);
6346
6347         return err;
6348 }
6349
6350 static int tg3_open(struct net_device *dev)
6351 {
6352         struct tg3 *tp = netdev_priv(dev);
6353         int err;
6354
6355         tg3_full_lock(tp, 0);
6356
6357         tg3_disable_ints(tp);
6358         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6359
6360         tg3_full_unlock(tp);
6361
6362         /* The placement of this call is tied
6363          * to the setup and use of Host TX descriptors.
6364          */
6365         err = tg3_alloc_consistent(tp);
6366         if (err)
6367                 return err;
6368
6369         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6370             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6371             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
6372                 /* All MSI supporting chips should support tagged
6373                  * status.  Assert that this is the case.
6374                  */
6375                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6376                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6377                                "Not using MSI.\n", tp->dev->name);
6378                 } else if (pci_enable_msi(tp->pdev) == 0) {
6379                         u32 msi_mode;
6380
6381                         msi_mode = tr32(MSGINT_MODE);
6382                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6383                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6384                 }
6385         }
6386         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6387                 err = request_irq(tp->pdev->irq, tg3_msi,
6388                                   SA_SAMPLE_RANDOM, dev->name, dev);
6389         else {
6390                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6391                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6392                         fn = tg3_interrupt_tagged;
6393
6394                 err = request_irq(tp->pdev->irq, fn,
6395                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6396         }
6397
6398         if (err) {
6399                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6400                         pci_disable_msi(tp->pdev);
6401                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6402                 }
6403                 tg3_free_consistent(tp);
6404                 return err;
6405         }
6406
6407         tg3_full_lock(tp, 0);
6408
6409         err = tg3_init_hw(tp);
6410         if (err) {
6411                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6412                 tg3_free_rings(tp);
6413         } else {
6414                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6415                         tp->timer_offset = HZ;
6416                 else
6417                         tp->timer_offset = HZ / 10;
6418
6419                 BUG_ON(tp->timer_offset > HZ);
6420                 tp->timer_counter = tp->timer_multiplier =
6421                         (HZ / tp->timer_offset);
6422                 tp->asf_counter = tp->asf_multiplier =
6423                         ((HZ / tp->timer_offset) * 120);
6424
6425                 init_timer(&tp->timer);
6426                 tp->timer.expires = jiffies + tp->timer_offset;
6427                 tp->timer.data = (unsigned long) tp;
6428                 tp->timer.function = tg3_timer;
6429         }
6430
6431         tg3_full_unlock(tp);
6432
6433         if (err) {
6434                 free_irq(tp->pdev->irq, dev);
6435                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6436                         pci_disable_msi(tp->pdev);
6437                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6438                 }
6439                 tg3_free_consistent(tp);
6440                 return err;
6441         }
6442
6443         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6444                 err = tg3_test_msi(tp);
6445
6446                 if (err) {
6447                         tg3_full_lock(tp, 0);
6448
6449                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6450                                 pci_disable_msi(tp->pdev);
6451                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6452                         }
6453                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6454                         tg3_free_rings(tp);
6455                         tg3_free_consistent(tp);
6456
6457                         tg3_full_unlock(tp);
6458
6459                         return err;
6460                 }
6461         }
6462
6463         tg3_full_lock(tp, 0);
6464
6465         add_timer(&tp->timer);
6466         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6467         tg3_enable_ints(tp);
6468
6469         tg3_full_unlock(tp);
6470
6471         netif_start_queue(dev);
6472
6473         return 0;
6474 }
6475
6476 #if 0
6477 /*static*/ void tg3_dump_state(struct tg3 *tp)
6478 {
6479         u32 val32, val32_2, val32_3, val32_4, val32_5;
6480         u16 val16;
6481         int i;
6482
6483         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6484         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6485         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6486                val16, val32);
6487
6488         /* MAC block */
6489         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6490                tr32(MAC_MODE), tr32(MAC_STATUS));
6491         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6492                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6493         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6494                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6495         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6496                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6497
6498         /* Send data initiator control block */
6499         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6500                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6501         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6502                tr32(SNDDATAI_STATSCTRL));
6503
6504         /* Send data completion control block */
6505         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6506
6507         /* Send BD ring selector block */
6508         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6509                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6510
6511         /* Send BD initiator control block */
6512         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6513                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6514
6515         /* Send BD completion control block */
6516         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6517
6518         /* Receive list placement control block */
6519         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6520                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6521         printk("       RCVLPC_STATSCTRL[%08x]\n",
6522                tr32(RCVLPC_STATSCTRL));
6523
6524         /* Receive data and receive BD initiator control block */
6525         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6526                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6527
6528         /* Receive data completion control block */
6529         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6530                tr32(RCVDCC_MODE));
6531
6532         /* Receive BD initiator control block */
6533         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6534                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6535
6536         /* Receive BD completion control block */
6537         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6538                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6539
6540         /* Receive list selector control block */
6541         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6542                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6543
6544         /* Mbuf cluster free block */
6545         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6546                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6547
6548         /* Host coalescing control block */
6549         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6550                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6551         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6552                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6553                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6554         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6555                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6556                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6557         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6558                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6559         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6560                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6561
6562         /* Memory arbiter control block */
6563         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6564                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6565
6566         /* Buffer manager control block */
6567         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6568                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6569         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6570                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6571         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6572                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6573                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6574                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6575
6576         /* Read DMA control block */
6577         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6578                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6579
6580         /* Write DMA control block */
6581         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6582                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6583
6584         /* DMA completion block */
6585         printk("DEBUG: DMAC_MODE[%08x]\n",
6586                tr32(DMAC_MODE));
6587
6588         /* GRC block */
6589         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6590                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6591         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6592                tr32(GRC_LOCAL_CTRL));
6593
6594         /* TG3_BDINFOs */
6595         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6596                tr32(RCVDBDI_JUMBO_BD + 0x0),
6597                tr32(RCVDBDI_JUMBO_BD + 0x4),
6598                tr32(RCVDBDI_JUMBO_BD + 0x8),
6599                tr32(RCVDBDI_JUMBO_BD + 0xc));
6600         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6601                tr32(RCVDBDI_STD_BD + 0x0),
6602                tr32(RCVDBDI_STD_BD + 0x4),
6603                tr32(RCVDBDI_STD_BD + 0x8),
6604                tr32(RCVDBDI_STD_BD + 0xc));
6605         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6606                tr32(RCVDBDI_MINI_BD + 0x0),
6607                tr32(RCVDBDI_MINI_BD + 0x4),
6608                tr32(RCVDBDI_MINI_BD + 0x8),
6609                tr32(RCVDBDI_MINI_BD + 0xc));
6610
6611         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6612         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6613         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6614         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6615         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6616                val32, val32_2, val32_3, val32_4);
6617
6618         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6619         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6620         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6621         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6622         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6623                val32, val32_2, val32_3, val32_4);
6624
6625         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6626         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6627         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6628         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6629         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6630         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6631                val32, val32_2, val32_3, val32_4, val32_5);
6632
6633         /* SW status block */
6634         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6635                tp->hw_status->status,
6636                tp->hw_status->status_tag,
6637                tp->hw_status->rx_jumbo_consumer,
6638                tp->hw_status->rx_consumer,
6639                tp->hw_status->rx_mini_consumer,
6640                tp->hw_status->idx[0].rx_producer,
6641                tp->hw_status->idx[0].tx_consumer);
6642
6643         /* SW statistics block */
6644         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6645                ((u32 *)tp->hw_stats)[0],
6646                ((u32 *)tp->hw_stats)[1],
6647                ((u32 *)tp->hw_stats)[2],
6648                ((u32 *)tp->hw_stats)[3]);
6649
6650         /* Mailboxes */
6651         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6652                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6653                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6654                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6655                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6656
6657         /* NIC side send descriptors. */
6658         for (i = 0; i < 6; i++) {
6659                 unsigned long txd;
6660
6661                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6662                         + (i * sizeof(struct tg3_tx_buffer_desc));
6663                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6664                        i,
6665                        readl(txd + 0x0), readl(txd + 0x4),
6666                        readl(txd + 0x8), readl(txd + 0xc));
6667         }
6668
6669         /* NIC side RX descriptors. */
6670         for (i = 0; i < 6; i++) {
6671                 unsigned long rxd;
6672
6673                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6674                         + (i * sizeof(struct tg3_rx_buffer_desc));
6675                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6676                        i,
6677                        readl(rxd + 0x0), readl(rxd + 0x4),
6678                        readl(rxd + 0x8), readl(rxd + 0xc));
6679                 rxd += (4 * sizeof(u32));
6680                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6681                        i,
6682                        readl(rxd + 0x0), readl(rxd + 0x4),
6683                        readl(rxd + 0x8), readl(rxd + 0xc));
6684         }
6685
6686         for (i = 0; i < 6; i++) {
6687                 unsigned long rxd;
6688
6689                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6690                         + (i * sizeof(struct tg3_rx_buffer_desc));
6691                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6692                        i,
6693                        readl(rxd + 0x0), readl(rxd + 0x4),
6694                        readl(rxd + 0x8), readl(rxd + 0xc));
6695                 rxd += (4 * sizeof(u32));
6696                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6697                        i,
6698                        readl(rxd + 0x0), readl(rxd + 0x4),
6699                        readl(rxd + 0x8), readl(rxd + 0xc));
6700         }
6701 }
6702 #endif
6703
6704 static struct net_device_stats *tg3_get_stats(struct net_device *);
6705 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6706
6707 static int tg3_close(struct net_device *dev)
6708 {
6709         struct tg3 *tp = netdev_priv(dev);
6710
6711         netif_stop_queue(dev);
6712
6713         del_timer_sync(&tp->timer);
6714
6715         tg3_full_lock(tp, 1);
6716 #if 0
6717         tg3_dump_state(tp);
6718 #endif
6719
6720         tg3_disable_ints(tp);
6721
6722         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6723         tg3_free_rings(tp);
6724         tp->tg3_flags &=
6725                 ~(TG3_FLAG_INIT_COMPLETE |
6726                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6727         netif_carrier_off(tp->dev);
6728
6729         tg3_full_unlock(tp);
6730
6731         free_irq(tp->pdev->irq, dev);
6732         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6733                 pci_disable_msi(tp->pdev);
6734                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6735         }
6736
6737         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6738                sizeof(tp->net_stats_prev));
6739         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6740                sizeof(tp->estats_prev));
6741
6742         tg3_free_consistent(tp);
6743
6744         return 0;
6745 }
6746
6747 static inline unsigned long get_stat64(tg3_stat64_t *val)
6748 {
6749         unsigned long ret;
6750
6751 #if (BITS_PER_LONG == 32)
6752         ret = val->low;
6753 #else
6754         ret = ((u64)val->high << 32) | ((u64)val->low);
6755 #endif
6756         return ret;
6757 }
6758
6759 static unsigned long calc_crc_errors(struct tg3 *tp)
6760 {
6761         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6762
6763         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6764             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6765              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6766                 u32 val;
6767
6768                 spin_lock_bh(&tp->lock);
6769                 if (!tg3_readphy(tp, 0x1e, &val)) {
6770                         tg3_writephy(tp, 0x1e, val | 0x8000);
6771                         tg3_readphy(tp, 0x14, &val);
6772                 } else
6773                         val = 0;
6774                 spin_unlock_bh(&tp->lock);
6775
6776                 tp->phy_crc_errors += val;
6777
6778                 return tp->phy_crc_errors;
6779         }
6780
6781         return get_stat64(&hw_stats->rx_fcs_errors);
6782 }
6783
6784 #define ESTAT_ADD(member) \
6785         estats->member =        old_estats->member + \
6786                                 get_stat64(&hw_stats->member)
6787
6788 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6789 {
6790         struct tg3_ethtool_stats *estats = &tp->estats;
6791         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6792         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6793
6794         if (!hw_stats)
6795                 return old_estats;
6796
6797         ESTAT_ADD(rx_octets);
6798         ESTAT_ADD(rx_fragments);
6799         ESTAT_ADD(rx_ucast_packets);
6800         ESTAT_ADD(rx_mcast_packets);
6801         ESTAT_ADD(rx_bcast_packets);
6802         ESTAT_ADD(rx_fcs_errors);
6803         ESTAT_ADD(rx_align_errors);
6804         ESTAT_ADD(rx_xon_pause_rcvd);
6805         ESTAT_ADD(rx_xoff_pause_rcvd);
6806         ESTAT_ADD(rx_mac_ctrl_rcvd);
6807         ESTAT_ADD(rx_xoff_entered);
6808         ESTAT_ADD(rx_frame_too_long_errors);
6809         ESTAT_ADD(rx_jabbers);
6810         ESTAT_ADD(rx_undersize_packets);
6811         ESTAT_ADD(rx_in_length_errors);
6812         ESTAT_ADD(rx_out_length_errors);
6813         ESTAT_ADD(rx_64_or_less_octet_packets);
6814         ESTAT_ADD(rx_65_to_127_octet_packets);
6815         ESTAT_ADD(rx_128_to_255_octet_packets);
6816         ESTAT_ADD(rx_256_to_511_octet_packets);
6817         ESTAT_ADD(rx_512_to_1023_octet_packets);
6818         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6819         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6820         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6821         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6822         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6823
6824         ESTAT_ADD(tx_octets);
6825         ESTAT_ADD(tx_collisions);
6826         ESTAT_ADD(tx_xon_sent);
6827         ESTAT_ADD(tx_xoff_sent);
6828         ESTAT_ADD(tx_flow_control);
6829         ESTAT_ADD(tx_mac_errors);
6830         ESTAT_ADD(tx_single_collisions);
6831         ESTAT_ADD(tx_mult_collisions);
6832         ESTAT_ADD(tx_deferred);
6833         ESTAT_ADD(tx_excessive_collisions);
6834         ESTAT_ADD(tx_late_collisions);
6835         ESTAT_ADD(tx_collide_2times);
6836         ESTAT_ADD(tx_collide_3times);
6837         ESTAT_ADD(tx_collide_4times);
6838         ESTAT_ADD(tx_collide_5times);
6839         ESTAT_ADD(tx_collide_6times);
6840         ESTAT_ADD(tx_collide_7times);
6841         ESTAT_ADD(tx_collide_8times);
6842         ESTAT_ADD(tx_collide_9times);
6843         ESTAT_ADD(tx_collide_10times);
6844         ESTAT_ADD(tx_collide_11times);
6845         ESTAT_ADD(tx_collide_12times);
6846         ESTAT_ADD(tx_collide_13times);
6847         ESTAT_ADD(tx_collide_14times);
6848         ESTAT_ADD(tx_collide_15times);
6849         ESTAT_ADD(tx_ucast_packets);
6850         ESTAT_ADD(tx_mcast_packets);
6851         ESTAT_ADD(tx_bcast_packets);
6852         ESTAT_ADD(tx_carrier_sense_errors);
6853         ESTAT_ADD(tx_discards);
6854         ESTAT_ADD(tx_errors);
6855
6856         ESTAT_ADD(dma_writeq_full);
6857         ESTAT_ADD(dma_write_prioq_full);
6858         ESTAT_ADD(rxbds_empty);
6859         ESTAT_ADD(rx_discards);
6860         ESTAT_ADD(rx_errors);
6861         ESTAT_ADD(rx_threshold_hit);
6862
6863         ESTAT_ADD(dma_readq_full);
6864         ESTAT_ADD(dma_read_prioq_full);
6865         ESTAT_ADD(tx_comp_queue_full);
6866
6867         ESTAT_ADD(ring_set_send_prod_index);
6868         ESTAT_ADD(ring_status_update);
6869         ESTAT_ADD(nic_irqs);
6870         ESTAT_ADD(nic_avoided_irqs);
6871         ESTAT_ADD(nic_tx_threshold_hit);
6872
6873         return estats;
6874 }
6875
6876 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6877 {
6878         struct tg3 *tp = netdev_priv(dev);
6879         struct net_device_stats *stats = &tp->net_stats;
6880         struct net_device_stats *old_stats = &tp->net_stats_prev;
6881         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6882
6883         if (!hw_stats)
6884                 return old_stats;
6885
6886         stats->rx_packets = old_stats->rx_packets +
6887                 get_stat64(&hw_stats->rx_ucast_packets) +
6888                 get_stat64(&hw_stats->rx_mcast_packets) +
6889                 get_stat64(&hw_stats->rx_bcast_packets);
6890                 
6891         stats->tx_packets = old_stats->tx_packets +
6892                 get_stat64(&hw_stats->tx_ucast_packets) +
6893                 get_stat64(&hw_stats->tx_mcast_packets) +
6894                 get_stat64(&hw_stats->tx_bcast_packets);
6895
6896         stats->rx_bytes = old_stats->rx_bytes +
6897                 get_stat64(&hw_stats->rx_octets);
6898         stats->tx_bytes = old_stats->tx_bytes +
6899                 get_stat64(&hw_stats->tx_octets);
6900
6901         stats->rx_errors = old_stats->rx_errors +
6902                 get_stat64(&hw_stats->rx_errors) +
6903                 get_stat64(&hw_stats->rx_discards);
6904         stats->tx_errors = old_stats->tx_errors +
6905                 get_stat64(&hw_stats->tx_errors) +
6906                 get_stat64(&hw_stats->tx_mac_errors) +
6907                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6908                 get_stat64(&hw_stats->tx_discards);
6909
6910         stats->multicast = old_stats->multicast +
6911                 get_stat64(&hw_stats->rx_mcast_packets);
6912         stats->collisions = old_stats->collisions +
6913                 get_stat64(&hw_stats->tx_collisions);
6914
6915         stats->rx_length_errors = old_stats->rx_length_errors +
6916                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6917                 get_stat64(&hw_stats->rx_undersize_packets);
6918
6919         stats->rx_over_errors = old_stats->rx_over_errors +
6920                 get_stat64(&hw_stats->rxbds_empty);
6921         stats->rx_frame_errors = old_stats->rx_frame_errors +
6922                 get_stat64(&hw_stats->rx_align_errors);
6923         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6924                 get_stat64(&hw_stats->tx_discards);
6925         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6926                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6927
6928         stats->rx_crc_errors = old_stats->rx_crc_errors +
6929                 calc_crc_errors(tp);
6930
6931         return stats;
6932 }
6933
6934 static inline u32 calc_crc(unsigned char *buf, int len)
6935 {
6936         u32 reg;
6937         u32 tmp;
6938         int j, k;
6939
6940         reg = 0xffffffff;
6941
6942         for (j = 0; j < len; j++) {
6943                 reg ^= buf[j];
6944
6945                 for (k = 0; k < 8; k++) {
6946                         tmp = reg & 0x01;
6947
6948                         reg >>= 1;
6949
6950                         if (tmp) {
6951                                 reg ^= 0xedb88320;
6952                         }
6953                 }
6954         }
6955
6956         return ~reg;
6957 }
6958
6959 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6960 {
6961         /* accept or reject all multicast frames */
6962         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6963         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6964         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6965         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6966 }
6967
6968 static void __tg3_set_rx_mode(struct net_device *dev)
6969 {
6970         struct tg3 *tp = netdev_priv(dev);
6971         u32 rx_mode;
6972
6973         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6974                                   RX_MODE_KEEP_VLAN_TAG);
6975
6976         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6977          * flag clear.
6978          */
6979 #if TG3_VLAN_TAG_USED
6980         if (!tp->vlgrp &&
6981             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6982                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6983 #else
6984         /* By definition, VLAN is disabled always in this
6985          * case.
6986          */
6987         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6988                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6989 #endif
6990
6991         if (dev->flags & IFF_PROMISC) {
6992                 /* Promiscuous mode. */
6993                 rx_mode |= RX_MODE_PROMISC;
6994         } else if (dev->flags & IFF_ALLMULTI) {
6995                 /* Accept all multicast. */
6996                 tg3_set_multi (tp, 1);
6997         } else if (dev->mc_count < 1) {
6998                 /* Reject all multicast. */
6999                 tg3_set_multi (tp, 0);
7000         } else {
7001                 /* Accept one or more multicast(s). */
7002                 struct dev_mc_list *mclist;
7003                 unsigned int i;
7004                 u32 mc_filter[4] = { 0, };
7005                 u32 regidx;
7006                 u32 bit;
7007                 u32 crc;
7008
7009                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7010                      i++, mclist = mclist->next) {
7011
7012                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7013                         bit = ~crc & 0x7f;
7014                         regidx = (bit & 0x60) >> 5;
7015                         bit &= 0x1f;
7016                         mc_filter[regidx] |= (1 << bit);
7017                 }
7018
7019                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7020                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7021                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7022                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7023         }
7024
7025         if (rx_mode != tp->rx_mode) {
7026                 tp->rx_mode = rx_mode;
7027                 tw32_f(MAC_RX_MODE, rx_mode);
7028                 udelay(10);
7029         }
7030 }
7031
7032 static void tg3_set_rx_mode(struct net_device *dev)
7033 {
7034         struct tg3 *tp = netdev_priv(dev);
7035
7036         tg3_full_lock(tp, 0);
7037         __tg3_set_rx_mode(dev);
7038         tg3_full_unlock(tp);
7039 }
7040
7041 #define TG3_REGDUMP_LEN         (32 * 1024)
7042
7043 static int tg3_get_regs_len(struct net_device *dev)
7044 {
7045         return TG3_REGDUMP_LEN;
7046 }
7047
7048 static void tg3_get_regs(struct net_device *dev,
7049                 struct ethtool_regs *regs, void *_p)
7050 {
7051         u32 *p = _p;
7052         struct tg3 *tp = netdev_priv(dev);
7053         u8 *orig_p = _p;
7054         int i;
7055
7056         regs->version = 0;
7057
7058         memset(p, 0, TG3_REGDUMP_LEN);
7059
7060         tg3_full_lock(tp, 0);
7061
7062 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7063 #define GET_REG32_LOOP(base,len)                \
7064 do {    p = (u32 *)(orig_p + (base));           \
7065         for (i = 0; i < len; i += 4)            \
7066                 __GET_REG32((base) + i);        \
7067 } while (0)
7068 #define GET_REG32_1(reg)                        \
7069 do {    p = (u32 *)(orig_p + (reg));            \
7070         __GET_REG32((reg));                     \
7071 } while (0)
7072
7073         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7074         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7075         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7076         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7077         GET_REG32_1(SNDDATAC_MODE);
7078         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7079         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7080         GET_REG32_1(SNDBDC_MODE);
7081         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7082         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7083         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7084         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7085         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7086         GET_REG32_1(RCVDCC_MODE);
7087         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7088         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7089         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7090         GET_REG32_1(MBFREE_MODE);
7091         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7092         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7093         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7094         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7095         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7096         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
7097         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
7098         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7099         GET_REG32_LOOP(FTQ_RESET, 0x120);
7100         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7101         GET_REG32_1(DMAC_MODE);
7102         GET_REG32_LOOP(GRC_MODE, 0x4c);
7103         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7104                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7105
7106 #undef __GET_REG32
7107 #undef GET_REG32_LOOP
7108 #undef GET_REG32_1
7109
7110         tg3_full_unlock(tp);
7111 }
7112
7113 static int tg3_get_eeprom_len(struct net_device *dev)
7114 {
7115         struct tg3 *tp = netdev_priv(dev);
7116
7117         return tp->nvram_size;
7118 }
7119
7120 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7121
7122 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7123 {
7124         struct tg3 *tp = netdev_priv(dev);
7125         int ret;
7126         u8  *pd;
7127         u32 i, offset, len, val, b_offset, b_count;
7128
7129         offset = eeprom->offset;
7130         len = eeprom->len;
7131         eeprom->len = 0;
7132
7133         eeprom->magic = TG3_EEPROM_MAGIC;
7134
7135         if (offset & 3) {
7136                 /* adjustments to start on required 4 byte boundary */
7137                 b_offset = offset & 3;
7138                 b_count = 4 - b_offset;
7139                 if (b_count > len) {
7140                         /* i.e. offset=1 len=2 */
7141                         b_count = len;
7142                 }
7143                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7144                 if (ret)
7145                         return ret;
7146                 val = cpu_to_le32(val);
7147                 memcpy(data, ((char*)&val) + b_offset, b_count);
7148                 len -= b_count;
7149                 offset += b_count;
7150                 eeprom->len += b_count;
7151         }
7152
7153         /* read bytes upto the last 4 byte boundary */
7154         pd = &data[eeprom->len];
7155         for (i = 0; i < (len - (len & 3)); i += 4) {
7156                 ret = tg3_nvram_read(tp, offset + i, &val);
7157                 if (ret) {
7158                         eeprom->len += i;
7159                         return ret;
7160                 }
7161                 val = cpu_to_le32(val);
7162                 memcpy(pd + i, &val, 4);
7163         }
7164         eeprom->len += i;
7165
7166         if (len & 3) {
7167                 /* read last bytes not ending on 4 byte boundary */
7168                 pd = &data[eeprom->len];
7169                 b_count = len & 3;
7170                 b_offset = offset + len - b_count;
7171                 ret = tg3_nvram_read(tp, b_offset, &val);
7172                 if (ret)
7173                         return ret;
7174                 val = cpu_to_le32(val);
7175                 memcpy(pd, ((char*)&val), b_count);
7176                 eeprom->len += b_count;
7177         }
7178         return 0;
7179 }
7180
7181 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7182
7183 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7184 {
7185         struct tg3 *tp = netdev_priv(dev);
7186         int ret;
7187         u32 offset, len, b_offset, odd_len, start, end;
7188         u8 *buf;
7189
7190         if (eeprom->magic != TG3_EEPROM_MAGIC)
7191                 return -EINVAL;
7192
7193         offset = eeprom->offset;
7194         len = eeprom->len;
7195
7196         if ((b_offset = (offset & 3))) {
7197                 /* adjustments to start on required 4 byte boundary */
7198                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7199                 if (ret)
7200                         return ret;
7201                 start = cpu_to_le32(start);
7202                 len += b_offset;
7203                 offset &= ~3;
7204                 if (len < 4)
7205                         len = 4;
7206         }
7207
7208         odd_len = 0;
7209         if (len & 3) {
7210                 /* adjustments to end on required 4 byte boundary */
7211                 odd_len = 1;
7212                 len = (len + 3) & ~3;
7213                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7214                 if (ret)
7215                         return ret;
7216                 end = cpu_to_le32(end);
7217         }
7218
7219         buf = data;
7220         if (b_offset || odd_len) {
7221                 buf = kmalloc(len, GFP_KERNEL);
7222                 if (buf == 0)
7223                         return -ENOMEM;
7224                 if (b_offset)
7225                         memcpy(buf, &start, 4);
7226                 if (odd_len)
7227                         memcpy(buf+len-4, &end, 4);
7228                 memcpy(buf + b_offset, data, eeprom->len);
7229         }
7230
7231         ret = tg3_nvram_write_block(tp, offset, len, buf);
7232
7233         if (buf != data)
7234                 kfree(buf);
7235
7236         return ret;
7237 }
7238
7239 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7240 {
7241         struct tg3 *tp = netdev_priv(dev);
7242   
7243         cmd->supported = (SUPPORTED_Autoneg);
7244
7245         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7246                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7247                                    SUPPORTED_1000baseT_Full);
7248
7249         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
7250                 cmd->supported |= (SUPPORTED_100baseT_Half |
7251                                   SUPPORTED_100baseT_Full |
7252                                   SUPPORTED_10baseT_Half |
7253                                   SUPPORTED_10baseT_Full |
7254                                   SUPPORTED_MII);
7255         else
7256                 cmd->supported |= SUPPORTED_FIBRE;
7257   
7258         cmd->advertising = tp->link_config.advertising;
7259         if (netif_running(dev)) {
7260                 cmd->speed = tp->link_config.active_speed;
7261                 cmd->duplex = tp->link_config.active_duplex;
7262         }
7263         cmd->port = 0;
7264         cmd->phy_address = PHY_ADDR;
7265         cmd->transceiver = 0;
7266         cmd->autoneg = tp->link_config.autoneg;
7267         cmd->maxtxpkt = 0;
7268         cmd->maxrxpkt = 0;
7269         return 0;
7270 }
7271   
7272 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7273 {
7274         struct tg3 *tp = netdev_priv(dev);
7275   
7276         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7277                 /* These are the only valid advertisement bits allowed.  */
7278                 if (cmd->autoneg == AUTONEG_ENABLE &&
7279                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7280                                           ADVERTISED_1000baseT_Full |
7281                                           ADVERTISED_Autoneg |
7282                                           ADVERTISED_FIBRE)))
7283                         return -EINVAL;
7284         }
7285
7286         tg3_full_lock(tp, 0);
7287
7288         tp->link_config.autoneg = cmd->autoneg;
7289         if (cmd->autoneg == AUTONEG_ENABLE) {
7290                 tp->link_config.advertising = cmd->advertising;
7291                 tp->link_config.speed = SPEED_INVALID;
7292                 tp->link_config.duplex = DUPLEX_INVALID;
7293         } else {
7294                 tp->link_config.advertising = 0;
7295                 tp->link_config.speed = cmd->speed;
7296                 tp->link_config.duplex = cmd->duplex;
7297         }
7298   
7299         if (netif_running(dev))
7300                 tg3_setup_phy(tp, 1);
7301
7302         tg3_full_unlock(tp);
7303   
7304         return 0;
7305 }
7306   
7307 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7308 {
7309         struct tg3 *tp = netdev_priv(dev);
7310   
7311         strcpy(info->driver, DRV_MODULE_NAME);
7312         strcpy(info->version, DRV_MODULE_VERSION);
7313         strcpy(info->bus_info, pci_name(tp->pdev));
7314 }
7315   
7316 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7317 {
7318         struct tg3 *tp = netdev_priv(dev);
7319   
7320         wol->supported = WAKE_MAGIC;
7321         wol->wolopts = 0;
7322         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7323                 wol->wolopts = WAKE_MAGIC;
7324         memset(&wol->sopass, 0, sizeof(wol->sopass));
7325 }
7326   
7327 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7328 {
7329         struct tg3 *tp = netdev_priv(dev);
7330   
7331         if (wol->wolopts & ~WAKE_MAGIC)
7332                 return -EINVAL;
7333         if ((wol->wolopts & WAKE_MAGIC) &&
7334             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7335             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7336                 return -EINVAL;
7337   
7338         spin_lock_bh(&tp->lock);
7339         if (wol->wolopts & WAKE_MAGIC)
7340                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7341         else
7342                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7343         spin_unlock_bh(&tp->lock);
7344   
7345         return 0;
7346 }
7347   
7348 static u32 tg3_get_msglevel(struct net_device *dev)
7349 {
7350         struct tg3 *tp = netdev_priv(dev);
7351         return tp->msg_enable;
7352 }
7353   
7354 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7355 {
7356         struct tg3 *tp = netdev_priv(dev);
7357         tp->msg_enable = value;
7358 }
7359   
7360 #if TG3_TSO_SUPPORT != 0
7361 static int tg3_set_tso(struct net_device *dev, u32 value)
7362 {
7363         struct tg3 *tp = netdev_priv(dev);
7364
7365         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7366                 if (value)
7367                         return -EINVAL;
7368                 return 0;
7369         }
7370         return ethtool_op_set_tso(dev, value);
7371 }
7372 #endif
7373   
7374 static int tg3_nway_reset(struct net_device *dev)
7375 {
7376         struct tg3 *tp = netdev_priv(dev);
7377         u32 bmcr;
7378         int r;
7379   
7380         if (!netif_running(dev))
7381                 return -EAGAIN;
7382
7383         spin_lock_bh(&tp->lock);
7384         r = -EINVAL;
7385         tg3_readphy(tp, MII_BMCR, &bmcr);
7386         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7387             (bmcr & BMCR_ANENABLE)) {
7388                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
7389                 r = 0;
7390         }
7391         spin_unlock_bh(&tp->lock);
7392   
7393         return r;
7394 }
7395   
7396 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7397 {
7398         struct tg3 *tp = netdev_priv(dev);
7399   
7400         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7401         ering->rx_mini_max_pending = 0;
7402         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7403
7404         ering->rx_pending = tp->rx_pending;
7405         ering->rx_mini_pending = 0;
7406         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7407         ering->tx_pending = tp->tx_pending;
7408 }
7409   
7410 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7411 {
7412         struct tg3 *tp = netdev_priv(dev);
7413         int irq_sync = 0;
7414   
7415         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7416             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7417             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7418                 return -EINVAL;
7419   
7420         if (netif_running(dev)) {
7421                 tg3_netif_stop(tp);
7422                 irq_sync = 1;
7423         }
7424
7425         tg3_full_lock(tp, irq_sync);
7426   
7427         tp->rx_pending = ering->rx_pending;
7428
7429         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7430             tp->rx_pending > 63)
7431                 tp->rx_pending = 63;
7432         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7433         tp->tx_pending = ering->tx_pending;
7434
7435         if (netif_running(dev)) {
7436                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7437                 tg3_init_hw(tp);
7438                 tg3_netif_start(tp);
7439         }
7440
7441         tg3_full_unlock(tp);
7442   
7443         return 0;
7444 }
7445   
7446 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7447 {
7448         struct tg3 *tp = netdev_priv(dev);
7449   
7450         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7451         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7452         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7453 }
7454   
7455 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7456 {
7457         struct tg3 *tp = netdev_priv(dev);
7458         int irq_sync = 0;
7459   
7460         if (netif_running(dev)) {
7461                 tg3_netif_stop(tp);
7462                 irq_sync = 1;
7463         }
7464
7465         tg3_full_lock(tp, irq_sync);
7466
7467         if (epause->autoneg)
7468                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7469         else
7470                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7471         if (epause->rx_pause)
7472                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7473         else
7474                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7475         if (epause->tx_pause)
7476                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7477         else
7478                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7479
7480         if (netif_running(dev)) {
7481                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7482                 tg3_init_hw(tp);
7483                 tg3_netif_start(tp);
7484         }
7485
7486         tg3_full_unlock(tp);
7487   
7488         return 0;
7489 }
7490   
7491 static u32 tg3_get_rx_csum(struct net_device *dev)
7492 {
7493         struct tg3 *tp = netdev_priv(dev);
7494         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7495 }
7496   
7497 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7498 {
7499         struct tg3 *tp = netdev_priv(dev);
7500   
7501         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7502                 if (data != 0)
7503                         return -EINVAL;
7504                 return 0;
7505         }
7506   
7507         spin_lock_bh(&tp->lock);
7508         if (data)
7509                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7510         else
7511                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7512         spin_unlock_bh(&tp->lock);
7513   
7514         return 0;
7515 }
7516   
7517 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7518 {
7519         struct tg3 *tp = netdev_priv(dev);
7520   
7521         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7522                 if (data != 0)
7523                         return -EINVAL;
7524                 return 0;
7525         }
7526   
7527         if (data)
7528                 dev->features |= NETIF_F_IP_CSUM;
7529         else
7530                 dev->features &= ~NETIF_F_IP_CSUM;
7531
7532         return 0;
7533 }
7534
7535 static int tg3_get_stats_count (struct net_device *dev)
7536 {
7537         return TG3_NUM_STATS;
7538 }
7539
7540 static int tg3_get_test_count (struct net_device *dev)
7541 {
7542         return TG3_NUM_TEST;
7543 }
7544
7545 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7546 {
7547         switch (stringset) {
7548         case ETH_SS_STATS:
7549                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7550                 break;
7551         case ETH_SS_TEST:
7552                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7553                 break;
7554         default:
7555                 WARN_ON(1);     /* we need a WARN() */
7556                 break;
7557         }
7558 }
7559
7560 static void tg3_get_ethtool_stats (struct net_device *dev,
7561                                    struct ethtool_stats *estats, u64 *tmp_stats)
7562 {
7563         struct tg3 *tp = netdev_priv(dev);
7564         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7565 }
7566
7567 #define NVRAM_TEST_SIZE 0x100
7568
7569 static int tg3_test_nvram(struct tg3 *tp)
7570 {
7571         u32 *buf, csum;
7572         int i, j, err = 0;
7573
7574         buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7575         if (buf == NULL)
7576                 return -ENOMEM;
7577
7578         for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7579                 u32 val;
7580
7581                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7582                         break;
7583                 buf[j] = cpu_to_le32(val);
7584         }
7585         if (i < NVRAM_TEST_SIZE)
7586                 goto out;
7587
7588         err = -EIO;
7589         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7590                 goto out;
7591
7592         /* Bootstrap checksum at offset 0x10 */
7593         csum = calc_crc((unsigned char *) buf, 0x10);
7594         if(csum != cpu_to_le32(buf[0x10/4]))
7595                 goto out;
7596
7597         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7598         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7599         if (csum != cpu_to_le32(buf[0xfc/4]))
7600                  goto out;
7601
7602         err = 0;
7603
7604 out:
7605         kfree(buf);
7606         return err;
7607 }
7608
7609 #define TG3_SERDES_TIMEOUT_SEC  2
7610 #define TG3_COPPER_TIMEOUT_SEC  6
7611
7612 static int tg3_test_link(struct tg3 *tp)
7613 {
7614         int i, max;
7615
7616         if (!netif_running(tp->dev))
7617                 return -ENODEV;
7618
7619         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7620                 max = TG3_SERDES_TIMEOUT_SEC;
7621         else
7622                 max = TG3_COPPER_TIMEOUT_SEC;
7623
7624         for (i = 0; i < max; i++) {
7625                 if (netif_carrier_ok(tp->dev))
7626                         return 0;
7627
7628                 if (msleep_interruptible(1000))
7629                         break;
7630         }
7631
7632         return -EIO;
7633 }
7634
7635 /* Only test the commonly used registers */
7636 static int tg3_test_registers(struct tg3 *tp)
7637 {
7638         int i, is_5705;
7639         u32 offset, read_mask, write_mask, val, save_val, read_val;
7640         static struct {
7641                 u16 offset;
7642                 u16 flags;
7643 #define TG3_FL_5705     0x1
7644 #define TG3_FL_NOT_5705 0x2
7645 #define TG3_FL_NOT_5788 0x4
7646                 u32 read_mask;
7647                 u32 write_mask;
7648         } reg_tbl[] = {
7649                 /* MAC Control Registers */
7650                 { MAC_MODE, TG3_FL_NOT_5705,
7651                         0x00000000, 0x00ef6f8c },
7652                 { MAC_MODE, TG3_FL_5705,
7653                         0x00000000, 0x01ef6b8c },
7654                 { MAC_STATUS, TG3_FL_NOT_5705,
7655                         0x03800107, 0x00000000 },
7656                 { MAC_STATUS, TG3_FL_5705,
7657                         0x03800100, 0x00000000 },
7658                 { MAC_ADDR_0_HIGH, 0x0000,
7659                         0x00000000, 0x0000ffff },
7660                 { MAC_ADDR_0_LOW, 0x0000,
7661                         0x00000000, 0xffffffff },
7662                 { MAC_RX_MTU_SIZE, 0x0000,
7663                         0x00000000, 0x0000ffff },
7664                 { MAC_TX_MODE, 0x0000,
7665                         0x00000000, 0x00000070 },
7666                 { MAC_TX_LENGTHS, 0x0000,
7667                         0x00000000, 0x00003fff },
7668                 { MAC_RX_MODE, TG3_FL_NOT_5705,
7669                         0x00000000, 0x000007fc },
7670                 { MAC_RX_MODE, TG3_FL_5705,
7671                         0x00000000, 0x000007dc },
7672                 { MAC_HASH_REG_0, 0x0000,
7673                         0x00000000, 0xffffffff },
7674                 { MAC_HASH_REG_1, 0x0000,
7675                         0x00000000, 0xffffffff },
7676                 { MAC_HASH_REG_2, 0x0000,
7677                         0x00000000, 0xffffffff },
7678                 { MAC_HASH_REG_3, 0x0000,
7679                         0x00000000, 0xffffffff },
7680
7681                 /* Receive Data and Receive BD Initiator Control Registers. */
7682                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7683                         0x00000000, 0xffffffff },
7684                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7685                         0x00000000, 0xffffffff },
7686                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7687                         0x00000000, 0x00000003 },
7688                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7689                         0x00000000, 0xffffffff },
7690                 { RCVDBDI_STD_BD+0, 0x0000,
7691                         0x00000000, 0xffffffff },
7692                 { RCVDBDI_STD_BD+4, 0x0000,
7693                         0x00000000, 0xffffffff },
7694                 { RCVDBDI_STD_BD+8, 0x0000,
7695                         0x00000000, 0xffff0002 },
7696                 { RCVDBDI_STD_BD+0xc, 0x0000,
7697                         0x00000000, 0xffffffff },
7698         
7699                 /* Receive BD Initiator Control Registers. */
7700                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7701                         0x00000000, 0xffffffff },
7702                 { RCVBDI_STD_THRESH, TG3_FL_5705,
7703                         0x00000000, 0x000003ff },
7704                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7705                         0x00000000, 0xffffffff },
7706         
7707                 /* Host Coalescing Control Registers. */
7708                 { HOSTCC_MODE, TG3_FL_NOT_5705,
7709                         0x00000000, 0x00000004 },
7710                 { HOSTCC_MODE, TG3_FL_5705,
7711                         0x00000000, 0x000000f6 },
7712                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7713                         0x00000000, 0xffffffff },
7714                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7715                         0x00000000, 0x000003ff },
7716                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7717                         0x00000000, 0xffffffff },
7718                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7719                         0x00000000, 0x000003ff },
7720                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7721                         0x00000000, 0xffffffff },
7722                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7723                         0x00000000, 0x000000ff },
7724                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7725                         0x00000000, 0xffffffff },
7726                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7727                         0x00000000, 0x000000ff },
7728                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7729                         0x00000000, 0xffffffff },
7730                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7731                         0x00000000, 0xffffffff },
7732                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7733                         0x00000000, 0xffffffff },
7734                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7735                         0x00000000, 0x000000ff },
7736                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7737                         0x00000000, 0xffffffff },
7738                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7739                         0x00000000, 0x000000ff },
7740                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7741                         0x00000000, 0xffffffff },
7742                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7743                         0x00000000, 0xffffffff },
7744                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7745                         0x00000000, 0xffffffff },
7746                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7747                         0x00000000, 0xffffffff },
7748                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7749                         0x00000000, 0xffffffff },
7750                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7751                         0xffffffff, 0x00000000 },
7752                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7753                         0xffffffff, 0x00000000 },
7754
7755                 /* Buffer Manager Control Registers. */
7756                 { BUFMGR_MB_POOL_ADDR, 0x0000,
7757                         0x00000000, 0x007fff80 },
7758                 { BUFMGR_MB_POOL_SIZE, 0x0000,
7759                         0x00000000, 0x007fffff },
7760                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7761                         0x00000000, 0x0000003f },
7762                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7763                         0x00000000, 0x000001ff },
7764                 { BUFMGR_MB_HIGH_WATER, 0x0000,
7765                         0x00000000, 0x000001ff },
7766                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7767                         0xffffffff, 0x00000000 },
7768                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7769                         0xffffffff, 0x00000000 },
7770         
7771                 /* Mailbox Registers */
7772                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7773                         0x00000000, 0x000001ff },
7774                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7775                         0x00000000, 0x000001ff },
7776                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7777                         0x00000000, 0x000007ff },
7778                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7779                         0x00000000, 0x000001ff },
7780
7781                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7782         };
7783
7784         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7785                 is_5705 = 1;
7786         else
7787                 is_5705 = 0;
7788
7789         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7790                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7791                         continue;
7792
7793                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7794                         continue;
7795
7796                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7797                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
7798                         continue;
7799
7800                 offset = (u32) reg_tbl[i].offset;
7801                 read_mask = reg_tbl[i].read_mask;
7802                 write_mask = reg_tbl[i].write_mask;
7803
7804                 /* Save the original register content */
7805                 save_val = tr32(offset);
7806
7807                 /* Determine the read-only value. */
7808                 read_val = save_val & read_mask;
7809
7810                 /* Write zero to the register, then make sure the read-only bits
7811                  * are not changed and the read/write bits are all zeros.
7812                  */
7813                 tw32(offset, 0);
7814
7815                 val = tr32(offset);
7816
7817                 /* Test the read-only and read/write bits. */
7818                 if (((val & read_mask) != read_val) || (val & write_mask))
7819                         goto out;
7820
7821                 /* Write ones to all the bits defined by RdMask and WrMask, then
7822                  * make sure the read-only bits are not changed and the
7823                  * read/write bits are all ones.
7824                  */
7825                 tw32(offset, read_mask | write_mask);
7826
7827                 val = tr32(offset);
7828
7829                 /* Test the read-only bits. */
7830                 if ((val & read_mask) != read_val)
7831                         goto out;
7832
7833                 /* Test the read/write bits. */
7834                 if ((val & write_mask) != write_mask)
7835                         goto out;
7836
7837                 tw32(offset, save_val);
7838         }
7839
7840         return 0;
7841
7842 out:
7843         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7844         tw32(offset, save_val);
7845         return -EIO;
7846 }
7847
7848 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7849 {
7850         static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7851         int i;
7852         u32 j;
7853
7854         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7855                 for (j = 0; j < len; j += 4) {
7856                         u32 val;
7857
7858                         tg3_write_mem(tp, offset + j, test_pattern[i]);
7859                         tg3_read_mem(tp, offset + j, &val);
7860                         if (val != test_pattern[i])
7861                                 return -EIO;
7862                 }
7863         }
7864         return 0;
7865 }
7866
7867 static int tg3_test_memory(struct tg3 *tp)
7868 {
7869         static struct mem_entry {
7870                 u32 offset;
7871                 u32 len;
7872         } mem_tbl_570x[] = {
7873                 { 0x00000000, 0x01000},
7874                 { 0x00002000, 0x1c000},
7875                 { 0xffffffff, 0x00000}
7876         }, mem_tbl_5705[] = {
7877                 { 0x00000100, 0x0000c},
7878                 { 0x00000200, 0x00008},
7879                 { 0x00000b50, 0x00400},
7880                 { 0x00004000, 0x00800},
7881                 { 0x00006000, 0x01000},
7882                 { 0x00008000, 0x02000},
7883                 { 0x00010000, 0x0e000},
7884                 { 0xffffffff, 0x00000}
7885         };
7886         struct mem_entry *mem_tbl;
7887         int err = 0;
7888         int i;
7889
7890         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7891                 mem_tbl = mem_tbl_5705;
7892         else
7893                 mem_tbl = mem_tbl_570x;
7894
7895         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7896                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7897                     mem_tbl[i].len)) != 0)
7898                         break;
7899         }
7900         
7901         return err;
7902 }
7903
7904 static int tg3_test_loopback(struct tg3 *tp)
7905 {
7906         u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key;
7907         u32 desc_idx;
7908         struct sk_buff *skb, *rx_skb;
7909         u8 *tx_data;
7910         dma_addr_t map;
7911         int num_pkts, tx_len, rx_len, i, err;
7912         struct tg3_rx_buffer_desc *desc;
7913
7914         if (!netif_running(tp->dev))
7915                 return -ENODEV;
7916
7917         err = -EIO;
7918
7919         tg3_reset_hw(tp);
7920
7921         mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7922                    MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7923                    MAC_MODE_PORT_MODE_GMII;
7924         tw32(MAC_MODE, mac_mode);
7925
7926         tx_len = 1514;
7927         skb = dev_alloc_skb(tx_len);
7928         tx_data = skb_put(skb, tx_len);
7929         memcpy(tx_data, tp->dev->dev_addr, 6);
7930         memset(tx_data + 6, 0x0, 8);
7931
7932         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
7933
7934         for (i = 14; i < tx_len; i++)
7935                 tx_data[i] = (u8) (i & 0xff);
7936
7937         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
7938
7939         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7940              HOSTCC_MODE_NOW);
7941
7942         udelay(10);
7943
7944         rx_start_idx = tp->hw_status->idx[0].rx_producer;
7945
7946         send_idx = 0;
7947         num_pkts = 0;
7948
7949         tg3_set_txd(tp, send_idx, map, tx_len, 0, 1);
7950
7951         send_idx++;
7952         num_pkts++;
7953
7954         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
7955         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
7956
7957         udelay(10);
7958
7959         for (i = 0; i < 10; i++) {
7960                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7961                        HOSTCC_MODE_NOW);
7962
7963                 udelay(10);
7964
7965                 tx_idx = tp->hw_status->idx[0].tx_consumer;
7966                 rx_idx = tp->hw_status->idx[0].rx_producer;
7967                 if ((tx_idx == send_idx) &&
7968                     (rx_idx == (rx_start_idx + num_pkts)))
7969                         break;
7970         }
7971
7972         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
7973         dev_kfree_skb(skb);
7974
7975         if (tx_idx != send_idx)
7976                 goto out;
7977
7978         if (rx_idx != rx_start_idx + num_pkts)
7979                 goto out;
7980
7981         desc = &tp->rx_rcb[rx_start_idx];
7982         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
7983         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
7984         if (opaque_key != RXD_OPAQUE_RING_STD)
7985                 goto out;
7986
7987         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
7988             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
7989                 goto out;
7990
7991         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
7992         if (rx_len != tx_len)
7993                 goto out;
7994
7995         rx_skb = tp->rx_std_buffers[desc_idx].skb;
7996
7997         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
7998         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
7999
8000         for (i = 14; i < tx_len; i++) {
8001                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8002                         goto out;
8003         }
8004         err = 0;
8005         
8006         /* tg3_free_rings will unmap and free the rx_skb */
8007 out:
8008         return err;
8009 }
8010
8011 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8012                           u64 *data)
8013 {
8014         struct tg3 *tp = netdev_priv(dev);
8015
8016         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8017
8018         if (tg3_test_nvram(tp) != 0) {
8019                 etest->flags |= ETH_TEST_FL_FAILED;
8020                 data[0] = 1;
8021         }
8022         if (tg3_test_link(tp) != 0) {
8023                 etest->flags |= ETH_TEST_FL_FAILED;
8024                 data[1] = 1;
8025         }
8026         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8027                 int irq_sync = 0;
8028
8029                 if (netif_running(dev)) {
8030                         tg3_netif_stop(tp);
8031                         irq_sync = 1;
8032                 }
8033
8034                 tg3_full_lock(tp, irq_sync);
8035
8036                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8037                 tg3_nvram_lock(tp);
8038                 tg3_halt_cpu(tp, RX_CPU_BASE);
8039                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8040                         tg3_halt_cpu(tp, TX_CPU_BASE);
8041                 tg3_nvram_unlock(tp);
8042
8043                 if (tg3_test_registers(tp) != 0) {
8044                         etest->flags |= ETH_TEST_FL_FAILED;
8045                         data[2] = 1;
8046                 }
8047                 if (tg3_test_memory(tp) != 0) {
8048                         etest->flags |= ETH_TEST_FL_FAILED;
8049                         data[3] = 1;
8050                 }
8051                 if (tg3_test_loopback(tp) != 0) {
8052                         etest->flags |= ETH_TEST_FL_FAILED;
8053                         data[4] = 1;
8054                 }
8055
8056                 tg3_full_unlock(tp);
8057
8058                 if (tg3_test_interrupt(tp) != 0) {
8059                         etest->flags |= ETH_TEST_FL_FAILED;
8060                         data[5] = 1;
8061                 }
8062
8063                 tg3_full_lock(tp, 0);
8064
8065                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8066                 if (netif_running(dev)) {
8067                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8068                         tg3_init_hw(tp);
8069                         tg3_netif_start(tp);
8070                 }
8071
8072                 tg3_full_unlock(tp);
8073         }
8074 }
8075
8076 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8077 {
8078         struct mii_ioctl_data *data = if_mii(ifr);
8079         struct tg3 *tp = netdev_priv(dev);
8080         int err;
8081
8082         switch(cmd) {
8083         case SIOCGMIIPHY:
8084                 data->phy_id = PHY_ADDR;
8085
8086                 /* fallthru */
8087         case SIOCGMIIREG: {
8088                 u32 mii_regval;
8089
8090                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8091                         break;                  /* We have no PHY */
8092
8093                 spin_lock_bh(&tp->lock);
8094                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8095                 spin_unlock_bh(&tp->lock);
8096
8097                 data->val_out = mii_regval;
8098
8099                 return err;
8100         }
8101
8102         case SIOCSMIIREG:
8103                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8104                         break;                  /* We have no PHY */
8105
8106                 if (!capable(CAP_NET_ADMIN))
8107                         return -EPERM;
8108
8109                 spin_lock_bh(&tp->lock);
8110                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8111                 spin_unlock_bh(&tp->lock);
8112
8113                 return err;
8114
8115         default:
8116                 /* do nothing */
8117                 break;
8118         }
8119         return -EOPNOTSUPP;
8120 }
8121
8122 #if TG3_VLAN_TAG_USED
8123 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8124 {
8125         struct tg3 *tp = netdev_priv(dev);
8126
8127         tg3_full_lock(tp, 0);
8128
8129         tp->vlgrp = grp;
8130
8131         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8132         __tg3_set_rx_mode(dev);
8133
8134         tg3_full_unlock(tp);
8135 }
8136
8137 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8138 {
8139         struct tg3 *tp = netdev_priv(dev);
8140
8141         tg3_full_lock(tp, 0);
8142         if (tp->vlgrp)
8143                 tp->vlgrp->vlan_devices[vid] = NULL;
8144         tg3_full_unlock(tp);
8145 }
8146 #endif
8147
8148 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8149 {
8150         struct tg3 *tp = netdev_priv(dev);
8151
8152         memcpy(ec, &tp->coal, sizeof(*ec));
8153         return 0;
8154 }
8155
8156 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8157 {
8158         struct tg3 *tp = netdev_priv(dev);
8159         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8160         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8161
8162         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8163                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8164                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8165                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8166                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8167         }
8168
8169         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8170             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8171             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8172             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8173             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8174             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8175             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8176             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8177             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8178             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8179                 return -EINVAL;
8180
8181         /* No rx interrupts will be generated if both are zero */
8182         if ((ec->rx_coalesce_usecs == 0) &&
8183             (ec->rx_max_coalesced_frames == 0))
8184                 return -EINVAL;
8185
8186         /* No tx interrupts will be generated if both are zero */
8187         if ((ec->tx_coalesce_usecs == 0) &&
8188             (ec->tx_max_coalesced_frames == 0))
8189                 return -EINVAL;
8190
8191         /* Only copy relevant parameters, ignore all others. */
8192         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8193         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8194         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8195         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8196         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8197         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8198         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8199         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8200         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8201
8202         if (netif_running(dev)) {
8203                 tg3_full_lock(tp, 0);
8204                 __tg3_set_coalesce(tp, &tp->coal);
8205                 tg3_full_unlock(tp);
8206         }
8207         return 0;
8208 }
8209
8210 static struct ethtool_ops tg3_ethtool_ops = {
8211         .get_settings           = tg3_get_settings,
8212         .set_settings           = tg3_set_settings,
8213         .get_drvinfo            = tg3_get_drvinfo,
8214         .get_regs_len           = tg3_get_regs_len,
8215         .get_regs               = tg3_get_regs,
8216         .get_wol                = tg3_get_wol,
8217         .set_wol                = tg3_set_wol,
8218         .get_msglevel           = tg3_get_msglevel,
8219         .set_msglevel           = tg3_set_msglevel,
8220         .nway_reset             = tg3_nway_reset,
8221         .get_link               = ethtool_op_get_link,
8222         .get_eeprom_len         = tg3_get_eeprom_len,
8223         .get_eeprom             = tg3_get_eeprom,
8224         .set_eeprom             = tg3_set_eeprom,
8225         .get_ringparam          = tg3_get_ringparam,
8226         .set_ringparam          = tg3_set_ringparam,
8227         .get_pauseparam         = tg3_get_pauseparam,
8228         .set_pauseparam         = tg3_set_pauseparam,
8229         .get_rx_csum            = tg3_get_rx_csum,
8230         .set_rx_csum            = tg3_set_rx_csum,
8231         .get_tx_csum            = ethtool_op_get_tx_csum,
8232         .set_tx_csum            = tg3_set_tx_csum,
8233         .get_sg                 = ethtool_op_get_sg,
8234         .set_sg                 = ethtool_op_set_sg,
8235 #if TG3_TSO_SUPPORT != 0
8236         .get_tso                = ethtool_op_get_tso,
8237         .set_tso                = tg3_set_tso,
8238 #endif
8239         .self_test_count        = tg3_get_test_count,
8240         .self_test              = tg3_self_test,
8241         .get_strings            = tg3_get_strings,
8242         .get_stats_count        = tg3_get_stats_count,
8243         .get_ethtool_stats      = tg3_get_ethtool_stats,
8244         .get_coalesce           = tg3_get_coalesce,
8245         .set_coalesce           = tg3_set_coalesce,
8246 };
8247
8248 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8249 {
8250         u32 cursize, val;
8251
8252         tp->nvram_size = EEPROM_CHIP_SIZE;
8253
8254         if (tg3_nvram_read(tp, 0, &val) != 0)
8255                 return;
8256
8257         if (swab32(val) != TG3_EEPROM_MAGIC)
8258                 return;
8259
8260         /*
8261          * Size the chip by reading offsets at increasing powers of two.
8262          * When we encounter our validation signature, we know the addressing
8263          * has wrapped around, and thus have our chip size.
8264          */
8265         cursize = 0x800;
8266
8267         while (cursize < tp->nvram_size) {
8268                 if (tg3_nvram_read(tp, cursize, &val) != 0)
8269                         return;
8270
8271                 if (swab32(val) == TG3_EEPROM_MAGIC)
8272                         break;
8273
8274                 cursize <<= 1;
8275         }
8276
8277         tp->nvram_size = cursize;
8278 }
8279                 
8280 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8281 {
8282         u32 val;
8283
8284         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8285                 if (val != 0) {
8286                         tp->nvram_size = (val >> 16) * 1024;
8287                         return;
8288                 }
8289         }
8290         tp->nvram_size = 0x20000;
8291 }
8292
8293 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8294 {
8295         u32 nvcfg1;
8296
8297         nvcfg1 = tr32(NVRAM_CFG1);
8298         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8299                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8300         }
8301         else {
8302                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8303                 tw32(NVRAM_CFG1, nvcfg1);
8304         }
8305
8306         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8307                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8308                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8309                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8310                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8311                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8312                                 break;
8313                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8314                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8315                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8316                                 break;
8317                         case FLASH_VENDOR_ATMEL_EEPROM:
8318                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8319                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8320                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8321                                 break;
8322                         case FLASH_VENDOR_ST:
8323                                 tp->nvram_jedecnum = JEDEC_ST;
8324                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8325                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8326                                 break;
8327                         case FLASH_VENDOR_SAIFUN:
8328                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8329                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8330                                 break;
8331                         case FLASH_VENDOR_SST_SMALL:
8332                         case FLASH_VENDOR_SST_LARGE:
8333                                 tp->nvram_jedecnum = JEDEC_SST;
8334                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8335                                 break;
8336                 }
8337         }
8338         else {
8339                 tp->nvram_jedecnum = JEDEC_ATMEL;
8340                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8341                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8342         }
8343 }
8344
8345 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8346 {
8347         u32 nvcfg1;
8348
8349         nvcfg1 = tr32(NVRAM_CFG1);
8350
8351         /* NVRAM protection for TPM */
8352         if (nvcfg1 & (1 << 27))
8353                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8354
8355         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8356                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8357                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8358                         tp->nvram_jedecnum = JEDEC_ATMEL;
8359                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8360                         break;
8361                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8362                         tp->nvram_jedecnum = JEDEC_ATMEL;
8363                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8364                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8365                         break;
8366                 case FLASH_5752VENDOR_ST_M45PE10:
8367                 case FLASH_5752VENDOR_ST_M45PE20:
8368                 case FLASH_5752VENDOR_ST_M45PE40:
8369                         tp->nvram_jedecnum = JEDEC_ST;
8370                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8371                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8372                         break;
8373         }
8374
8375         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8376                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8377                         case FLASH_5752PAGE_SIZE_256:
8378                                 tp->nvram_pagesize = 256;
8379                                 break;
8380                         case FLASH_5752PAGE_SIZE_512:
8381                                 tp->nvram_pagesize = 512;
8382                                 break;
8383                         case FLASH_5752PAGE_SIZE_1K:
8384                                 tp->nvram_pagesize = 1024;
8385                                 break;
8386                         case FLASH_5752PAGE_SIZE_2K:
8387                                 tp->nvram_pagesize = 2048;
8388                                 break;
8389                         case FLASH_5752PAGE_SIZE_4K:
8390                                 tp->nvram_pagesize = 4096;
8391                                 break;
8392                         case FLASH_5752PAGE_SIZE_264:
8393                                 tp->nvram_pagesize = 264;
8394                                 break;
8395                 }
8396         }
8397         else {
8398                 /* For eeprom, set pagesize to maximum eeprom size */
8399                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8400
8401                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8402                 tw32(NVRAM_CFG1, nvcfg1);
8403         }
8404 }
8405
8406 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8407 static void __devinit tg3_nvram_init(struct tg3 *tp)
8408 {
8409         int j;
8410
8411         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8412                 return;
8413
8414         tw32_f(GRC_EEPROM_ADDR,
8415              (EEPROM_ADDR_FSM_RESET |
8416               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8417                EEPROM_ADDR_CLKPERD_SHIFT)));
8418
8419         /* XXX schedule_timeout() ... */
8420         for (j = 0; j < 100; j++)
8421                 udelay(10);
8422
8423         /* Enable seeprom accesses. */
8424         tw32_f(GRC_LOCAL_CTRL,
8425              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8426         udelay(100);
8427
8428         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8429             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8430                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8431
8432                 tg3_enable_nvram_access(tp);
8433
8434                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8435                         tg3_get_5752_nvram_info(tp);
8436                 else
8437                         tg3_get_nvram_info(tp);
8438
8439                 tg3_get_nvram_size(tp);
8440
8441                 tg3_disable_nvram_access(tp);
8442
8443         } else {
8444                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8445
8446                 tg3_get_eeprom_size(tp);
8447         }
8448 }
8449
8450 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8451                                         u32 offset, u32 *val)
8452 {
8453         u32 tmp;
8454         int i;
8455
8456         if (offset > EEPROM_ADDR_ADDR_MASK ||
8457             (offset % 4) != 0)
8458                 return -EINVAL;
8459
8460         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8461                                         EEPROM_ADDR_DEVID_MASK |
8462                                         EEPROM_ADDR_READ);
8463         tw32(GRC_EEPROM_ADDR,
8464              tmp |
8465              (0 << EEPROM_ADDR_DEVID_SHIFT) |
8466              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8467               EEPROM_ADDR_ADDR_MASK) |
8468              EEPROM_ADDR_READ | EEPROM_ADDR_START);
8469
8470         for (i = 0; i < 10000; i++) {
8471                 tmp = tr32(GRC_EEPROM_ADDR);
8472
8473                 if (tmp & EEPROM_ADDR_COMPLETE)
8474                         break;
8475                 udelay(100);
8476         }
8477         if (!(tmp & EEPROM_ADDR_COMPLETE))
8478                 return -EBUSY;
8479
8480         *val = tr32(GRC_EEPROM_DATA);
8481         return 0;
8482 }
8483
8484 #define NVRAM_CMD_TIMEOUT 10000
8485
8486 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8487 {
8488         int i;
8489
8490         tw32(NVRAM_CMD, nvram_cmd);
8491         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8492                 udelay(10);
8493                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8494                         udelay(10);
8495                         break;
8496                 }
8497         }
8498         if (i == NVRAM_CMD_TIMEOUT) {
8499                 return -EBUSY;
8500         }
8501         return 0;
8502 }
8503
8504 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8505 {
8506         int ret;
8507
8508         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8509                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8510                 return -EINVAL;
8511         }
8512
8513         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8514                 return tg3_nvram_read_using_eeprom(tp, offset, val);
8515
8516         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8517                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8518                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8519
8520                 offset = ((offset / tp->nvram_pagesize) <<
8521                           ATMEL_AT45DB0X1B_PAGE_POS) +
8522                         (offset % tp->nvram_pagesize);
8523         }
8524
8525         if (offset > NVRAM_ADDR_MSK)
8526                 return -EINVAL;
8527
8528         tg3_nvram_lock(tp);
8529
8530         tg3_enable_nvram_access(tp);
8531
8532         tw32(NVRAM_ADDR, offset);
8533         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8534                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8535
8536         if (ret == 0)
8537                 *val = swab32(tr32(NVRAM_RDDATA));
8538
8539         tg3_nvram_unlock(tp);
8540
8541         tg3_disable_nvram_access(tp);
8542
8543         return ret;
8544 }
8545
8546 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8547                                     u32 offset, u32 len, u8 *buf)
8548 {
8549         int i, j, rc = 0;
8550         u32 val;
8551
8552         for (i = 0; i < len; i += 4) {
8553                 u32 addr, data;
8554
8555                 addr = offset + i;
8556
8557                 memcpy(&data, buf + i, 4);
8558
8559                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8560
8561                 val = tr32(GRC_EEPROM_ADDR);
8562                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8563
8564                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8565                         EEPROM_ADDR_READ);
8566                 tw32(GRC_EEPROM_ADDR, val |
8567                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
8568                         (addr & EEPROM_ADDR_ADDR_MASK) |
8569                         EEPROM_ADDR_START |
8570                         EEPROM_ADDR_WRITE);
8571                 
8572                 for (j = 0; j < 10000; j++) {
8573                         val = tr32(GRC_EEPROM_ADDR);
8574
8575                         if (val & EEPROM_ADDR_COMPLETE)
8576                                 break;
8577                         udelay(100);
8578                 }
8579                 if (!(val & EEPROM_ADDR_COMPLETE)) {
8580                         rc = -EBUSY;
8581                         break;
8582                 }
8583         }
8584
8585         return rc;
8586 }
8587
8588 /* offset and length are dword aligned */
8589 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8590                 u8 *buf)
8591 {
8592         int ret = 0;
8593         u32 pagesize = tp->nvram_pagesize;
8594         u32 pagemask = pagesize - 1;
8595         u32 nvram_cmd;
8596         u8 *tmp;
8597
8598         tmp = kmalloc(pagesize, GFP_KERNEL);
8599         if (tmp == NULL)
8600                 return -ENOMEM;
8601
8602         while (len) {
8603                 int j;
8604                 u32 phy_addr, page_off, size;
8605
8606                 phy_addr = offset & ~pagemask;
8607         
8608                 for (j = 0; j < pagesize; j += 4) {
8609                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
8610                                                 (u32 *) (tmp + j))))
8611                                 break;
8612                 }
8613                 if (ret)
8614                         break;
8615
8616                 page_off = offset & pagemask;
8617                 size = pagesize;
8618                 if (len < size)
8619                         size = len;
8620
8621                 len -= size;
8622
8623                 memcpy(tmp + page_off, buf, size);
8624
8625                 offset = offset + (pagesize - page_off);
8626
8627                 tg3_enable_nvram_access(tp);
8628
8629                 /*
8630                  * Before we can erase the flash page, we need
8631                  * to issue a special "write enable" command.
8632                  */
8633                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8634
8635                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8636                         break;
8637
8638                 /* Erase the target page */
8639                 tw32(NVRAM_ADDR, phy_addr);
8640
8641                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8642                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8643
8644                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8645                         break;
8646
8647                 /* Issue another write enable to start the write. */
8648                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8649
8650                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8651                         break;
8652
8653                 for (j = 0; j < pagesize; j += 4) {
8654                         u32 data;
8655
8656                         data = *((u32 *) (tmp + j));
8657                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
8658
8659                         tw32(NVRAM_ADDR, phy_addr + j);
8660
8661                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8662                                 NVRAM_CMD_WR;
8663
8664                         if (j == 0)
8665                                 nvram_cmd |= NVRAM_CMD_FIRST;
8666                         else if (j == (pagesize - 4))
8667                                 nvram_cmd |= NVRAM_CMD_LAST;
8668
8669                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8670                                 break;
8671                 }
8672                 if (ret)
8673                         break;
8674         }
8675
8676         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8677         tg3_nvram_exec_cmd(tp, nvram_cmd);
8678
8679         kfree(tmp);
8680
8681         return ret;
8682 }
8683
8684 /* offset and length are dword aligned */
8685 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8686                 u8 *buf)
8687 {
8688         int i, ret = 0;
8689
8690         for (i = 0; i < len; i += 4, offset += 4) {
8691                 u32 data, page_off, phy_addr, nvram_cmd;
8692
8693                 memcpy(&data, buf + i, 4);
8694                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8695
8696                 page_off = offset % tp->nvram_pagesize;
8697
8698                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8699                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8700
8701                         phy_addr = ((offset / tp->nvram_pagesize) <<
8702                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8703                 }
8704                 else {
8705                         phy_addr = offset;
8706                 }
8707
8708                 tw32(NVRAM_ADDR, phy_addr);
8709
8710                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8711
8712                 if ((page_off == 0) || (i == 0))
8713                         nvram_cmd |= NVRAM_CMD_FIRST;
8714                 else if (page_off == (tp->nvram_pagesize - 4))
8715                         nvram_cmd |= NVRAM_CMD_LAST;
8716
8717                 if (i == (len - 4))
8718                         nvram_cmd |= NVRAM_CMD_LAST;
8719
8720                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
8721                         (nvram_cmd & NVRAM_CMD_FIRST)) {
8722
8723                         if ((ret = tg3_nvram_exec_cmd(tp,
8724                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8725                                 NVRAM_CMD_DONE)))
8726
8727                                 break;
8728                 }
8729                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8730                         /* We always do complete word writes to eeprom. */
8731                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8732                 }
8733
8734                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8735                         break;
8736         }
8737         return ret;
8738 }
8739
8740 /* offset and length are dword aligned */
8741 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8742 {
8743         int ret;
8744
8745         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8746                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8747                 return -EINVAL;
8748         }
8749
8750         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8751                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8752                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
8753                 udelay(40);
8754         }
8755
8756         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8757                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8758         }
8759         else {
8760                 u32 grc_mode;
8761
8762                 tg3_nvram_lock(tp);
8763
8764                 tg3_enable_nvram_access(tp);
8765                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8766                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
8767                         tw32(NVRAM_WRITE1, 0x406);
8768
8769                 grc_mode = tr32(GRC_MODE);
8770                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8771
8772                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8773                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8774
8775                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
8776                                 buf);
8777                 }
8778                 else {
8779                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8780                                 buf);
8781                 }
8782
8783                 grc_mode = tr32(GRC_MODE);
8784                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8785
8786                 tg3_disable_nvram_access(tp);
8787                 tg3_nvram_unlock(tp);
8788         }
8789
8790         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8791                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8792                 udelay(40);
8793         }
8794
8795         return ret;
8796 }
8797
8798 struct subsys_tbl_ent {
8799         u16 subsys_vendor, subsys_devid;
8800         u32 phy_id;
8801 };
8802
8803 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8804         /* Broadcom boards. */
8805         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8806         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8807         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8808         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
8809         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8810         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8811         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
8812         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8813         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8814         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8815         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8816
8817         /* 3com boards. */
8818         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8819         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8820         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
8821         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8822         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8823
8824         /* DELL boards. */
8825         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8826         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8827         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8828         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8829
8830         /* Compaq boards. */
8831         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8832         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8833         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
8834         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8835         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8836
8837         /* IBM boards. */
8838         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8839 };
8840
8841 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
8842 {
8843         int i;
8844
8845         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
8846                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
8847                      tp->pdev->subsystem_vendor) &&
8848                     (subsys_id_to_phy_id[i].subsys_devid ==
8849                      tp->pdev->subsystem_device))
8850                         return &subsys_id_to_phy_id[i];
8851         }
8852         return NULL;
8853 }
8854
8855 /* Since this function may be called in D3-hot power state during
8856  * tg3_init_one(), only config cycles are allowed.
8857  */
8858 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
8859 {
8860         u32 val;
8861
8862         /* Make sure register accesses (indirect or otherwise)
8863          * will function correctly.
8864          */
8865         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8866                                tp->misc_host_ctrl);
8867
8868         tp->phy_id = PHY_ID_INVALID;
8869         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8870
8871         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8872         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8873                 u32 nic_cfg, led_cfg;
8874                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
8875                 int eeprom_phy_serdes = 0;
8876
8877                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8878                 tp->nic_sram_data_cfg = nic_cfg;
8879
8880                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
8881                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
8882                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8883                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8884                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
8885                     (ver > 0) && (ver < 0x100))
8886                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
8887
8888                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
8889                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
8890                         eeprom_phy_serdes = 1;
8891
8892                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
8893                 if (nic_phy_id != 0) {
8894                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
8895                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
8896
8897                         eeprom_phy_id  = (id1 >> 16) << 10;
8898                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
8899                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
8900                 } else
8901                         eeprom_phy_id = 0;
8902
8903                 tp->phy_id = eeprom_phy_id;
8904                 if (eeprom_phy_serdes) {
8905                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8906                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
8907                         else
8908                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8909                 }
8910
8911                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8912                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
8913                                     SHASTA_EXT_LED_MODE_MASK);
8914                 else
8915                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
8916
8917                 switch (led_cfg) {
8918                 default:
8919                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
8920                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8921                         break;
8922
8923                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
8924                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8925                         break;
8926
8927                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
8928                         tp->led_ctrl = LED_CTRL_MODE_MAC;
8929
8930                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
8931                          * read on some older 5700/5701 bootcode.
8932                          */
8933                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
8934                             ASIC_REV_5700 ||
8935                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
8936                             ASIC_REV_5701)
8937                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8938
8939                         break;
8940
8941                 case SHASTA_EXT_LED_SHARED:
8942                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
8943                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8944                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
8945                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8946                                                  LED_CTRL_MODE_PHY_2);
8947                         break;
8948
8949                 case SHASTA_EXT_LED_MAC:
8950                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
8951                         break;
8952
8953                 case SHASTA_EXT_LED_COMBO:
8954                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
8955                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
8956                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8957                                                  LED_CTRL_MODE_PHY_2);
8958                         break;
8959
8960                 };
8961
8962                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8963                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
8964                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
8965                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8966
8967                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8968                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8969                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
8970                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
8971
8972                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8973                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
8974                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8975                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
8976                 }
8977                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
8978                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
8979
8980                 if (cfg2 & (1 << 17))
8981                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
8982
8983                 /* serdes signal pre-emphasis in register 0x590 set by */
8984                 /* bootcode if bit 18 is set */
8985                 if (cfg2 & (1 << 18))
8986                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
8987         }
8988 }
8989
8990 static int __devinit tg3_phy_probe(struct tg3 *tp)
8991 {
8992         u32 hw_phy_id_1, hw_phy_id_2;
8993         u32 hw_phy_id, hw_phy_id_masked;
8994         int err;
8995
8996         /* Reading the PHY ID register can conflict with ASF
8997          * firwmare access to the PHY hardware.
8998          */
8999         err = 0;
9000         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9001                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9002         } else {
9003                 /* Now read the physical PHY_ID from the chip and verify
9004                  * that it is sane.  If it doesn't look good, we fall back
9005                  * to either the hard-coded table based PHY_ID and failing
9006                  * that the value found in the eeprom area.
9007                  */
9008                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9009                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9010
9011                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9012                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9013                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9014
9015                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9016         }
9017
9018         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9019                 tp->phy_id = hw_phy_id;
9020                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9021                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9022                 else
9023                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9024         } else {
9025                 if (tp->phy_id != PHY_ID_INVALID) {
9026                         /* Do nothing, phy ID already set up in
9027                          * tg3_get_eeprom_hw_cfg().
9028                          */
9029                 } else {
9030                         struct subsys_tbl_ent *p;
9031
9032                         /* No eeprom signature?  Try the hardcoded
9033                          * subsys device table.
9034                          */
9035                         p = lookup_by_subsys(tp);
9036                         if (!p)
9037                                 return -ENODEV;
9038
9039                         tp->phy_id = p->phy_id;
9040                         if (!tp->phy_id ||
9041                             tp->phy_id == PHY_ID_BCM8002)
9042                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9043                 }
9044         }
9045
9046         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9047             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9048                 u32 bmsr, adv_reg, tg3_ctrl;
9049
9050                 tg3_readphy(tp, MII_BMSR, &bmsr);
9051                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9052                     (bmsr & BMSR_LSTATUS))
9053                         goto skip_phy_reset;
9054                     
9055                 err = tg3_phy_reset(tp);
9056                 if (err)
9057                         return err;
9058
9059                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9060                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9061                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9062                 tg3_ctrl = 0;
9063                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9064                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9065                                     MII_TG3_CTRL_ADV_1000_FULL);
9066                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9067                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9068                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9069                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9070                 }
9071
9072                 if (!tg3_copper_is_advertising_all(tp)) {
9073                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9074
9075                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9076                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9077
9078                         tg3_writephy(tp, MII_BMCR,
9079                                      BMCR_ANENABLE | BMCR_ANRESTART);
9080                 }
9081                 tg3_phy_set_wirespeed(tp);
9082
9083                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9084                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9085                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9086         }
9087
9088 skip_phy_reset:
9089         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9090                 err = tg3_init_5401phy_dsp(tp);
9091                 if (err)
9092                         return err;
9093         }
9094
9095         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9096                 err = tg3_init_5401phy_dsp(tp);
9097         }
9098
9099         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9100                 tp->link_config.advertising =
9101                         (ADVERTISED_1000baseT_Half |
9102                          ADVERTISED_1000baseT_Full |
9103                          ADVERTISED_Autoneg |
9104                          ADVERTISED_FIBRE);
9105         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9106                 tp->link_config.advertising &=
9107                         ~(ADVERTISED_1000baseT_Half |
9108                           ADVERTISED_1000baseT_Full);
9109
9110         return err;
9111 }
9112
9113 static void __devinit tg3_read_partno(struct tg3 *tp)
9114 {
9115         unsigned char vpd_data[256];
9116         int i;
9117
9118         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9119                 /* Sun decided not to put the necessary bits in the
9120                  * NVRAM of their onboard tg3 parts :(
9121                  */
9122                 strcpy(tp->board_part_number, "Sun 570X");
9123                 return;
9124         }
9125
9126         for (i = 0; i < 256; i += 4) {
9127                 u32 tmp;
9128
9129                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9130                         goto out_not_found;
9131
9132                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9133                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9134                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9135                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9136         }
9137
9138         /* Now parse and find the part number. */
9139         for (i = 0; i < 256; ) {
9140                 unsigned char val = vpd_data[i];
9141                 int block_end;
9142
9143                 if (val == 0x82 || val == 0x91) {
9144                         i = (i + 3 +
9145                              (vpd_data[i + 1] +
9146                               (vpd_data[i + 2] << 8)));
9147                         continue;
9148                 }
9149
9150                 if (val != 0x90)
9151                         goto out_not_found;
9152
9153                 block_end = (i + 3 +
9154                              (vpd_data[i + 1] +
9155                               (vpd_data[i + 2] << 8)));
9156                 i += 3;
9157                 while (i < block_end) {
9158                         if (vpd_data[i + 0] == 'P' &&
9159                             vpd_data[i + 1] == 'N') {
9160                                 int partno_len = vpd_data[i + 2];
9161
9162                                 if (partno_len > 24)
9163                                         goto out_not_found;
9164
9165                                 memcpy(tp->board_part_number,
9166                                        &vpd_data[i + 3],
9167                                        partno_len);
9168
9169                                 /* Success. */
9170                                 return;
9171                         }
9172                 }
9173
9174                 /* Part number not found. */
9175                 goto out_not_found;
9176         }
9177
9178 out_not_found:
9179         strcpy(tp->board_part_number, "none");
9180 }
9181
9182 #ifdef CONFIG_SPARC64
9183 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9184 {
9185         struct pci_dev *pdev = tp->pdev;
9186         struct pcidev_cookie *pcp = pdev->sysdata;
9187
9188         if (pcp != NULL) {
9189                 int node = pcp->prom_node;
9190                 u32 venid;
9191                 int err;
9192
9193                 err = prom_getproperty(node, "subsystem-vendor-id",
9194                                        (char *) &venid, sizeof(venid));
9195                 if (err == 0 || err == -1)
9196                         return 0;
9197                 if (venid == PCI_VENDOR_ID_SUN)
9198                         return 1;
9199         }
9200         return 0;
9201 }
9202 #endif
9203
9204 static int __devinit tg3_get_invariants(struct tg3 *tp)
9205 {
9206         static struct pci_device_id write_reorder_chipsets[] = {
9207                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9208                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9209                 { },
9210         };
9211         u32 misc_ctrl_reg;
9212         u32 cacheline_sz_reg;
9213         u32 pci_state_reg, grc_misc_cfg;
9214         u32 val;
9215         u16 pci_cmd;
9216         int err;
9217
9218 #ifdef CONFIG_SPARC64
9219         if (tg3_is_sun_570X(tp))
9220                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9221 #endif
9222
9223         /* If we have an AMD 762 chipset, write
9224          * reordering to the mailbox registers done by the host
9225          * controller can cause major troubles.  We read back from
9226          * every mailbox register write to force the writes to be
9227          * posted to the chip in order.
9228          */
9229         if (pci_dev_present(write_reorder_chipsets))
9230                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9231
9232         /* Force memory write invalidate off.  If we leave it on,
9233          * then on 5700_BX chips we have to enable a workaround.
9234          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9235          * to match the cacheline size.  The Broadcom driver have this
9236          * workaround but turns MWI off all the times so never uses
9237          * it.  This seems to suggest that the workaround is insufficient.
9238          */
9239         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9240         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9241         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9242
9243         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9244          * has the register indirect write enable bit set before
9245          * we try to access any of the MMIO registers.  It is also
9246          * critical that the PCI-X hw workaround situation is decided
9247          * before that as well.
9248          */
9249         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9250                               &misc_ctrl_reg);
9251
9252         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9253                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9254
9255         /* Wrong chip ID in 5752 A0. This code can be removed later
9256          * as A0 is not in production.
9257          */
9258         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9259                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9260
9261         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9262          * we need to disable memory and use config. cycles
9263          * only to access all registers. The 5702/03 chips
9264          * can mistakenly decode the special cycles from the
9265          * ICH chipsets as memory write cycles, causing corruption
9266          * of register and memory space. Only certain ICH bridges
9267          * will drive special cycles with non-zero data during the
9268          * address phase which can fall within the 5703's address
9269          * range. This is not an ICH bug as the PCI spec allows
9270          * non-zero address during special cycles. However, only
9271          * these ICH bridges are known to drive non-zero addresses
9272          * during special cycles.
9273          *
9274          * Since special cycles do not cross PCI bridges, we only
9275          * enable this workaround if the 5703 is on the secondary
9276          * bus of these ICH bridges.
9277          */
9278         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9279             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9280                 static struct tg3_dev_id {
9281                         u32     vendor;
9282                         u32     device;
9283                         u32     rev;
9284                 } ich_chipsets[] = {
9285                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9286                           PCI_ANY_ID },
9287                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9288                           PCI_ANY_ID },
9289                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9290                           0xa },
9291                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9292                           PCI_ANY_ID },
9293                         { },
9294                 };
9295                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9296                 struct pci_dev *bridge = NULL;
9297
9298                 while (pci_id->vendor != 0) {
9299                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
9300                                                 bridge);
9301                         if (!bridge) {
9302                                 pci_id++;
9303                                 continue;
9304                         }
9305                         if (pci_id->rev != PCI_ANY_ID) {
9306                                 u8 rev;
9307
9308                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
9309                                                      &rev);
9310                                 if (rev > pci_id->rev)
9311                                         continue;
9312                         }
9313                         if (bridge->subordinate &&
9314                             (bridge->subordinate->number ==
9315                              tp->pdev->bus->number)) {
9316
9317                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9318                                 pci_dev_put(bridge);
9319                                 break;
9320                         }
9321                 }
9322         }
9323
9324         /* Find msi capability. */
9325         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9326                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9327
9328         /* Initialize misc host control in PCI block. */
9329         tp->misc_host_ctrl |= (misc_ctrl_reg &
9330                                MISC_HOST_CTRL_CHIPREV);
9331         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9332                                tp->misc_host_ctrl);
9333
9334         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9335                               &cacheline_sz_reg);
9336
9337         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
9338         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
9339         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
9340         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
9341
9342         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9343             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9344             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9345                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9346
9347         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9348             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9349                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9350
9351         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9352                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9353
9354         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9355             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9356             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9357                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9358
9359         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9360                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9361
9362         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9363             tp->pci_lat_timer < 64) {
9364                 tp->pci_lat_timer = 64;
9365
9366                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
9367                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
9368                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
9369                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
9370
9371                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9372                                        cacheline_sz_reg);
9373         }
9374
9375         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9376                               &pci_state_reg);
9377
9378         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9379                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9380
9381                 /* If this is a 5700 BX chipset, and we are in PCI-X
9382                  * mode, enable register write workaround.
9383                  *
9384                  * The workaround is to use indirect register accesses
9385                  * for all chip writes not to mailbox registers.
9386                  */
9387                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9388                         u32 pm_reg;
9389                         u16 pci_cmd;
9390
9391                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9392
9393                         /* The chip can have it's power management PCI config
9394                          * space registers clobbered due to this bug.
9395                          * So explicitly force the chip into D0 here.
9396                          */
9397                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9398                                               &pm_reg);
9399                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9400                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9401                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9402                                                pm_reg);
9403
9404                         /* Also, force SERR#/PERR# in PCI command. */
9405                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9406                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9407                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9408                 }
9409         }
9410
9411         /* 5700 BX chips need to have their TX producer index mailboxes
9412          * written twice to workaround a bug.
9413          */
9414         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9415                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9416
9417         /* Back to back register writes can cause problems on this chip,
9418          * the workaround is to read back all reg writes except those to
9419          * mailbox regs.  See tg3_write_indirect_reg32().
9420          *
9421          * PCI Express 5750_A0 rev chips need this workaround too.
9422          */
9423         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9424             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9425              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9426                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9427
9428         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9429                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9430         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9431                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9432
9433         /* Chip-specific fixup from Broadcom driver */
9434         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9435             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9436                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9437                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9438         }
9439
9440         /* Default fast path register access methods */
9441         tp->read32 = tg3_read32;
9442         tp->write32 = tg3_write32;
9443         tp->read32_mbox = tg3_read32;
9444         tp->write32_mbox = tg3_write32;
9445         tp->write32_tx_mbox = tg3_write32;
9446         tp->write32_rx_mbox = tg3_write32;
9447
9448         /* Various workaround register access methods */
9449         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9450                 tp->write32 = tg3_write_indirect_reg32;
9451         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9452                 tp->write32 = tg3_write_flush_reg32;
9453
9454         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9455             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9456                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9457                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9458                         tp->write32_rx_mbox = tg3_write_flush_reg32;
9459         }
9460
9461         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9462                 tp->read32 = tg3_read_indirect_reg32;
9463                 tp->write32 = tg3_write_indirect_reg32;
9464                 tp->read32_mbox = tg3_read_indirect_mbox;
9465                 tp->write32_mbox = tg3_write_indirect_mbox;
9466                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9467                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9468
9469                 iounmap(tp->regs);
9470                 tp->regs = 0;
9471
9472                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9473                 pci_cmd &= ~PCI_COMMAND_MEMORY;
9474                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9475         }
9476
9477         /* Get eeprom hw config before calling tg3_set_power_state().
9478          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9479          * determined before calling tg3_set_power_state() so that
9480          * we know whether or not to switch out of Vaux power.
9481          * When the flag is set, it means that GPIO1 is used for eeprom
9482          * write protect and also implies that it is a LOM where GPIOs
9483          * are not used to switch power.
9484          */ 
9485         tg3_get_eeprom_hw_cfg(tp);
9486
9487         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9488          * GPIO1 driven high will bring 5700's external PHY out of reset.
9489          * It is also used as eeprom write protect on LOMs.
9490          */
9491         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9492         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9493             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9494                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9495                                        GRC_LCLCTRL_GPIO_OUTPUT1);
9496         /* Unused GPIO3 must be driven as output on 5752 because there
9497          * are no pull-up resistors on unused GPIO pins.
9498          */
9499         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9500                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9501
9502         /* Force the chip into D0. */
9503         err = tg3_set_power_state(tp, 0);
9504         if (err) {
9505                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9506                        pci_name(tp->pdev));
9507                 return err;
9508         }
9509
9510         /* 5700 B0 chips do not support checksumming correctly due
9511          * to hardware bugs.
9512          */
9513         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9514                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9515
9516         /* Pseudo-header checksum is done by hardware logic and not
9517          * the offload processers, so make the chip do the pseudo-
9518          * header checksums on receive.  For transmit it is more
9519          * convenient to do the pseudo-header checksum in software
9520          * as Linux does that on transmit for us in all cases.
9521          */
9522         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9523         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9524
9525         /* Derive initial jumbo mode from MTU assigned in
9526          * ether_setup() via the alloc_etherdev() call
9527          */
9528         if (tp->dev->mtu > ETH_DATA_LEN &&
9529             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780)
9530                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
9531
9532         /* Determine WakeOnLan speed to use. */
9533         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9534             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9535             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9536             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9537                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9538         } else {
9539                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9540         }
9541
9542         /* A few boards don't want Ethernet@WireSpeed phy feature */
9543         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9544             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9545              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
9546              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9547             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9548                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9549
9550         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9551             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9552                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9553         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9554                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9555
9556         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9557                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9558
9559         tp->coalesce_mode = 0;
9560         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9561             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9562                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9563
9564         /* Initialize MAC MI mode, polling disabled. */
9565         tw32_f(MAC_MI_MODE, tp->mi_mode);
9566         udelay(80);
9567
9568         /* Initialize data/descriptor byte/word swapping. */
9569         val = tr32(GRC_MODE);
9570         val &= GRC_MODE_HOST_STACKUP;
9571         tw32(GRC_MODE, val | tp->grc_mode);
9572
9573         tg3_switch_clocks(tp);
9574
9575         /* Clear this out for sanity. */
9576         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9577
9578         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9579                               &pci_state_reg);
9580         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9581             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9582                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9583
9584                 if (chiprevid == CHIPREV_ID_5701_A0 ||
9585                     chiprevid == CHIPREV_ID_5701_B0 ||
9586                     chiprevid == CHIPREV_ID_5701_B2 ||
9587                     chiprevid == CHIPREV_ID_5701_B5) {
9588                         void __iomem *sram_base;
9589
9590                         /* Write some dummy words into the SRAM status block
9591                          * area, see if it reads back correctly.  If the return
9592                          * value is bad, force enable the PCIX workaround.
9593                          */
9594                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9595
9596                         writel(0x00000000, sram_base);
9597                         writel(0x00000000, sram_base + 4);
9598                         writel(0xffffffff, sram_base + 4);
9599                         if (readl(sram_base) != 0x00000000)
9600                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9601                 }
9602         }
9603
9604         udelay(50);
9605         tg3_nvram_init(tp);
9606
9607         grc_misc_cfg = tr32(GRC_MISC_CFG);
9608         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9609
9610         /* Broadcom's driver says that CIOBE multisplit has a bug */
9611 #if 0
9612         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9613             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9614                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9615                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9616         }
9617 #endif
9618         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9619             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9620              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9621                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9622
9623         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9624             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9625                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9626         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9627                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9628                                       HOSTCC_MODE_CLRTICK_TXBD);
9629
9630                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9631                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9632                                        tp->misc_host_ctrl);
9633         }
9634
9635         /* these are limited to 10/100 only */
9636         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9637              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9638             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9639              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9640              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9641               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9642               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9643             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9644              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9645               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9646                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9647
9648         err = tg3_phy_probe(tp);
9649         if (err) {
9650                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9651                        pci_name(tp->pdev), err);
9652                 /* ... but do not return immediately ... */
9653         }
9654
9655         tg3_read_partno(tp);
9656
9657         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9658                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9659         } else {
9660                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9661                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9662                 else
9663                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9664         }
9665
9666         /* 5700 {AX,BX} chips have a broken status block link
9667          * change bit implementation, so we must use the
9668          * status register in those cases.
9669          */
9670         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9671                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9672         else
9673                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9674
9675         /* The led_ctrl is set during tg3_phy_probe, here we might
9676          * have to force the link status polling mechanism based
9677          * upon subsystem IDs.
9678          */
9679         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9680             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9681                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9682                                   TG3_FLAG_USE_LINKCHG_REG);
9683         }
9684
9685         /* For all SERDES we poll the MAC status register. */
9686         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9687                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9688         else
9689                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9690
9691         /* It seems all chips can get confused if TX buffers
9692          * straddle the 4GB address boundary in some cases.
9693          */
9694         tp->dev->hard_start_xmit = tg3_start_xmit;
9695
9696         tp->rx_offset = 2;
9697         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9698             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9699                 tp->rx_offset = 0;
9700
9701         /* By default, disable wake-on-lan.  User can change this
9702          * using ETHTOOL_SWOL.
9703          */
9704         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9705
9706         return err;
9707 }
9708
9709 #ifdef CONFIG_SPARC64
9710 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9711 {
9712         struct net_device *dev = tp->dev;
9713         struct pci_dev *pdev = tp->pdev;
9714         struct pcidev_cookie *pcp = pdev->sysdata;
9715
9716         if (pcp != NULL) {
9717                 int node = pcp->prom_node;
9718
9719                 if (prom_getproplen(node, "local-mac-address") == 6) {
9720                         prom_getproperty(node, "local-mac-address",
9721                                          dev->dev_addr, 6);
9722                         return 0;
9723                 }
9724         }
9725         return -ENODEV;
9726 }
9727
9728 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9729 {
9730         struct net_device *dev = tp->dev;
9731
9732         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
9733         return 0;
9734 }
9735 #endif
9736
9737 static int __devinit tg3_get_device_address(struct tg3 *tp)
9738 {
9739         struct net_device *dev = tp->dev;
9740         u32 hi, lo, mac_offset;
9741
9742 #ifdef CONFIG_SPARC64
9743         if (!tg3_get_macaddr_sparc(tp))
9744                 return 0;
9745 #endif
9746
9747         mac_offset = 0x7c;
9748         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9749              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9750             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
9751                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9752                         mac_offset = 0xcc;
9753                 if (tg3_nvram_lock(tp))
9754                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9755                 else
9756                         tg3_nvram_unlock(tp);
9757         }
9758
9759         /* First try to get it from MAC address mailbox. */
9760         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9761         if ((hi >> 16) == 0x484b) {
9762                 dev->dev_addr[0] = (hi >>  8) & 0xff;
9763                 dev->dev_addr[1] = (hi >>  0) & 0xff;
9764
9765                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9766                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9767                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9768                 dev->dev_addr[4] = (lo >>  8) & 0xff;
9769                 dev->dev_addr[5] = (lo >>  0) & 0xff;
9770         }
9771         /* Next, try NVRAM. */
9772         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9773                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9774                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9775                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9776                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9777                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
9778                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
9779                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9780                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9781         }
9782         /* Finally just fetch it out of the MAC control regs. */
9783         else {
9784                 hi = tr32(MAC_ADDR_0_HIGH);
9785                 lo = tr32(MAC_ADDR_0_LOW);
9786
9787                 dev->dev_addr[5] = lo & 0xff;
9788                 dev->dev_addr[4] = (lo >> 8) & 0xff;
9789                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9790                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9791                 dev->dev_addr[1] = hi & 0xff;
9792                 dev->dev_addr[0] = (hi >> 8) & 0xff;
9793         }
9794
9795         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9796 #ifdef CONFIG_SPARC64
9797                 if (!tg3_get_default_macaddr_sparc(tp))
9798                         return 0;
9799 #endif
9800                 return -EINVAL;
9801         }
9802         return 0;
9803 }
9804
9805 #define BOUNDARY_SINGLE_CACHELINE       1
9806 #define BOUNDARY_MULTI_CACHELINE        2
9807
9808 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9809 {
9810         int cacheline_size;
9811         u8 byte;
9812         int goal;
9813
9814         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9815         if (byte == 0)
9816                 cacheline_size = 1024;
9817         else
9818                 cacheline_size = (int) byte * 4;
9819
9820         /* On 5703 and later chips, the boundary bits have no
9821          * effect.
9822          */
9823         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9824             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9825             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9826                 goto out;
9827
9828 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9829         goal = BOUNDARY_MULTI_CACHELINE;
9830 #else
9831 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9832         goal = BOUNDARY_SINGLE_CACHELINE;
9833 #else
9834         goal = 0;
9835 #endif
9836 #endif
9837
9838         if (!goal)
9839                 goto out;
9840
9841         /* PCI controllers on most RISC systems tend to disconnect
9842          * when a device tries to burst across a cache-line boundary.
9843          * Therefore, letting tg3 do so just wastes PCI bandwidth.
9844          *
9845          * Unfortunately, for PCI-E there are only limited
9846          * write-side controls for this, and thus for reads
9847          * we will still get the disconnects.  We'll also waste
9848          * these PCI cycles for both read and write for chips
9849          * other than 5700 and 5701 which do not implement the
9850          * boundary bits.
9851          */
9852         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9853             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9854                 switch (cacheline_size) {
9855                 case 16:
9856                 case 32:
9857                 case 64:
9858                 case 128:
9859                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9860                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9861                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9862                         } else {
9863                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9864                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9865                         }
9866                         break;
9867
9868                 case 256:
9869                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9870                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9871                         break;
9872
9873                 default:
9874                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9875                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9876                         break;
9877                 };
9878         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9879                 switch (cacheline_size) {
9880                 case 16:
9881                 case 32:
9882                 case 64:
9883                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9884                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9885                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9886                                 break;
9887                         }
9888                         /* fallthrough */
9889                 case 128:
9890                 default:
9891                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9892                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
9893                         break;
9894                 };
9895         } else {
9896                 switch (cacheline_size) {
9897                 case 16:
9898                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9899                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
9900                                         DMA_RWCTRL_WRITE_BNDRY_16);
9901                                 break;
9902                         }
9903                         /* fallthrough */
9904                 case 32:
9905                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9906                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
9907                                         DMA_RWCTRL_WRITE_BNDRY_32);
9908                                 break;
9909                         }
9910                         /* fallthrough */
9911                 case 64:
9912                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9913                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
9914                                         DMA_RWCTRL_WRITE_BNDRY_64);
9915                                 break;
9916                         }
9917                         /* fallthrough */
9918                 case 128:
9919                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9920                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
9921                                         DMA_RWCTRL_WRITE_BNDRY_128);
9922                                 break;
9923                         }
9924                         /* fallthrough */
9925                 case 256:
9926                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
9927                                 DMA_RWCTRL_WRITE_BNDRY_256);
9928                         break;
9929                 case 512:
9930                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
9931                                 DMA_RWCTRL_WRITE_BNDRY_512);
9932                         break;
9933                 case 1024:
9934                 default:
9935                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
9936                                 DMA_RWCTRL_WRITE_BNDRY_1024);
9937                         break;
9938                 };
9939         }
9940
9941 out:
9942         return val;
9943 }
9944
9945 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
9946 {
9947         struct tg3_internal_buffer_desc test_desc;
9948         u32 sram_dma_descs;
9949         int i, ret;
9950
9951         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
9952
9953         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
9954         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
9955         tw32(RDMAC_STATUS, 0);
9956         tw32(WDMAC_STATUS, 0);
9957
9958         tw32(BUFMGR_MODE, 0);
9959         tw32(FTQ_RESET, 0);
9960
9961         test_desc.addr_hi = ((u64) buf_dma) >> 32;
9962         test_desc.addr_lo = buf_dma & 0xffffffff;
9963         test_desc.nic_mbuf = 0x00002100;
9964         test_desc.len = size;
9965
9966         /*
9967          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
9968          * the *second* time the tg3 driver was getting loaded after an
9969          * initial scan.
9970          *
9971          * Broadcom tells me:
9972          *   ...the DMA engine is connected to the GRC block and a DMA
9973          *   reset may affect the GRC block in some unpredictable way...
9974          *   The behavior of resets to individual blocks has not been tested.
9975          *
9976          * Broadcom noted the GRC reset will also reset all sub-components.
9977          */
9978         if (to_device) {
9979                 test_desc.cqid_sqid = (13 << 8) | 2;
9980
9981                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
9982                 udelay(40);
9983         } else {
9984                 test_desc.cqid_sqid = (16 << 8) | 7;
9985
9986                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
9987                 udelay(40);
9988         }
9989         test_desc.flags = 0x00000005;
9990
9991         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
9992                 u32 val;
9993
9994                 val = *(((u32 *)&test_desc) + i);
9995                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
9996                                        sram_dma_descs + (i * sizeof(u32)));
9997                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
9998         }
9999         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10000
10001         if (to_device) {
10002                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10003         } else {
10004                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10005         }
10006
10007         ret = -ENODEV;
10008         for (i = 0; i < 40; i++) {
10009                 u32 val;
10010
10011                 if (to_device)
10012                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10013                 else
10014                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10015                 if ((val & 0xffff) == sram_dma_descs) {
10016                         ret = 0;
10017                         break;
10018                 }
10019
10020                 udelay(100);
10021         }
10022
10023         return ret;
10024 }
10025
10026 #define TEST_BUFFER_SIZE        0x2000
10027
10028 static int __devinit tg3_test_dma(struct tg3 *tp)
10029 {
10030         dma_addr_t buf_dma;
10031         u32 *buf, saved_dma_rwctrl;
10032         int ret;
10033
10034         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10035         if (!buf) {
10036                 ret = -ENOMEM;
10037                 goto out_nofree;
10038         }
10039
10040         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10041                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10042
10043         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10044
10045         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10046                 /* DMA read watermark not used on PCIE */
10047                 tp->dma_rwctrl |= 0x00180000;
10048         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10049                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10050                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10051                         tp->dma_rwctrl |= 0x003f0000;
10052                 else
10053                         tp->dma_rwctrl |= 0x003f000f;
10054         } else {
10055                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10056                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10057                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10058
10059                         if (ccval == 0x6 || ccval == 0x7)
10060                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10061
10062                         /* Set bit 23 to enable PCIX hw bug fix */
10063                         tp->dma_rwctrl |= 0x009f0000;
10064                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10065                         /* 5780 always in PCIX mode */
10066                         tp->dma_rwctrl |= 0x00144000;
10067                 } else {
10068                         tp->dma_rwctrl |= 0x001b000f;
10069                 }
10070         }
10071
10072         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10073             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10074                 tp->dma_rwctrl &= 0xfffffff0;
10075
10076         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10077             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10078                 /* Remove this if it causes problems for some boards. */
10079                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10080
10081                 /* On 5700/5701 chips, we need to set this bit.
10082                  * Otherwise the chip will issue cacheline transactions
10083                  * to streamable DMA memory with not all the byte
10084                  * enables turned on.  This is an error on several
10085                  * RISC PCI controllers, in particular sparc64.
10086                  *
10087                  * On 5703/5704 chips, this bit has been reassigned
10088                  * a different meaning.  In particular, it is used
10089                  * on those chips to enable a PCI-X workaround.
10090                  */
10091                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10092         }
10093
10094         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10095
10096 #if 0
10097         /* Unneeded, already done by tg3_get_invariants.  */
10098         tg3_switch_clocks(tp);
10099 #endif
10100
10101         ret = 0;
10102         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10103             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10104                 goto out;
10105
10106         /* It is best to perform DMA test with maximum write burst size
10107          * to expose the 5700/5701 write DMA bug.
10108          */
10109         saved_dma_rwctrl = tp->dma_rwctrl;
10110         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10111         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10112
10113         while (1) {
10114                 u32 *p = buf, i;
10115
10116                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10117                         p[i] = i;
10118
10119                 /* Send the buffer to the chip. */
10120                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10121                 if (ret) {
10122                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10123                         break;
10124                 }
10125
10126 #if 0
10127                 /* validate data reached card RAM correctly. */
10128                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10129                         u32 val;
10130                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10131                         if (le32_to_cpu(val) != p[i]) {
10132                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10133                                 /* ret = -ENODEV here? */
10134                         }
10135                         p[i] = 0;
10136                 }
10137 #endif
10138                 /* Now read it back. */
10139                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10140                 if (ret) {
10141                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10142
10143                         break;
10144                 }
10145
10146                 /* Verify it. */
10147                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10148                         if (p[i] == i)
10149                                 continue;
10150
10151                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10152                             DMA_RWCTRL_WRITE_BNDRY_16) {
10153                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10154                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10155                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10156                                 break;
10157                         } else {
10158                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10159                                 ret = -ENODEV;
10160                                 goto out;
10161                         }
10162                 }
10163
10164                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10165                         /* Success. */
10166                         ret = 0;
10167                         break;
10168                 }
10169         }
10170         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10171             DMA_RWCTRL_WRITE_BNDRY_16) {
10172                 static struct pci_device_id dma_wait_state_chipsets[] = {
10173                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10174                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10175                         { },
10176                 };
10177
10178                 /* DMA test passed without adjusting DMA boundary,
10179                  * now look for chipsets that are known to expose the
10180                  * DMA bug without failing the test.
10181                  */
10182                 if (pci_dev_present(dma_wait_state_chipsets)) {
10183                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10184                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10185                 }
10186                 else
10187                         /* Safe to use the calculated DMA boundary. */
10188                         tp->dma_rwctrl = saved_dma_rwctrl;
10189
10190                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10191         }
10192
10193 out:
10194         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10195 out_nofree:
10196         return ret;
10197 }
10198
10199 static void __devinit tg3_init_link_config(struct tg3 *tp)
10200 {
10201         tp->link_config.advertising =
10202                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10203                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10204                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10205                  ADVERTISED_Autoneg | ADVERTISED_MII);
10206         tp->link_config.speed = SPEED_INVALID;
10207         tp->link_config.duplex = DUPLEX_INVALID;
10208         tp->link_config.autoneg = AUTONEG_ENABLE;
10209         netif_carrier_off(tp->dev);
10210         tp->link_config.active_speed = SPEED_INVALID;
10211         tp->link_config.active_duplex = DUPLEX_INVALID;
10212         tp->link_config.phy_is_low_power = 0;
10213         tp->link_config.orig_speed = SPEED_INVALID;
10214         tp->link_config.orig_duplex = DUPLEX_INVALID;
10215         tp->link_config.orig_autoneg = AUTONEG_INVALID;
10216 }
10217
10218 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10219 {
10220         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10221                 tp->bufmgr_config.mbuf_read_dma_low_water =
10222                         DEFAULT_MB_RDMA_LOW_WATER_5705;
10223                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10224                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10225                 tp->bufmgr_config.mbuf_high_water =
10226                         DEFAULT_MB_HIGH_WATER_5705;
10227
10228                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10229                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10230                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10231                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10232                 tp->bufmgr_config.mbuf_high_water_jumbo =
10233                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10234         } else {
10235                 tp->bufmgr_config.mbuf_read_dma_low_water =
10236                         DEFAULT_MB_RDMA_LOW_WATER;
10237                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10238                         DEFAULT_MB_MACRX_LOW_WATER;
10239                 tp->bufmgr_config.mbuf_high_water =
10240                         DEFAULT_MB_HIGH_WATER;
10241
10242                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10243                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10244                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10245                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10246                 tp->bufmgr_config.mbuf_high_water_jumbo =
10247                         DEFAULT_MB_HIGH_WATER_JUMBO;
10248         }
10249
10250         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10251         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10252 }
10253
10254 static char * __devinit tg3_phy_string(struct tg3 *tp)
10255 {
10256         switch (tp->phy_id & PHY_ID_MASK) {
10257         case PHY_ID_BCM5400:    return "5400";
10258         case PHY_ID_BCM5401:    return "5401";
10259         case PHY_ID_BCM5411:    return "5411";
10260         case PHY_ID_BCM5701:    return "5701";
10261         case PHY_ID_BCM5703:    return "5703";
10262         case PHY_ID_BCM5704:    return "5704";
10263         case PHY_ID_BCM5705:    return "5705";
10264         case PHY_ID_BCM5750:    return "5750";
10265         case PHY_ID_BCM5752:    return "5752";
10266         case PHY_ID_BCM5780:    return "5780";
10267         case PHY_ID_BCM8002:    return "8002/serdes";
10268         case 0:                 return "serdes";
10269         default:                return "unknown";
10270         };
10271 }
10272
10273 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
10274 {
10275         struct pci_dev *peer;
10276         unsigned int func, devnr = tp->pdev->devfn & ~7;
10277
10278         for (func = 0; func < 8; func++) {
10279                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10280                 if (peer && peer != tp->pdev)
10281                         break;
10282                 pci_dev_put(peer);
10283         }
10284         if (!peer || peer == tp->pdev)
10285                 BUG();
10286
10287         /*
10288          * We don't need to keep the refcount elevated; there's no way
10289          * to remove one half of this device without removing the other
10290          */
10291         pci_dev_put(peer);
10292
10293         return peer;
10294 }
10295
10296 static void __devinit tg3_init_coal(struct tg3 *tp)
10297 {
10298         struct ethtool_coalesce *ec = &tp->coal;
10299
10300         memset(ec, 0, sizeof(*ec));
10301         ec->cmd = ETHTOOL_GCOALESCE;
10302         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10303         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10304         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10305         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10306         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10307         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10308         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10309         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10310         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10311
10312         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10313                                  HOSTCC_MODE_CLRTICK_TXBD)) {
10314                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10315                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10316                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10317                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10318         }
10319
10320         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10321                 ec->rx_coalesce_usecs_irq = 0;
10322                 ec->tx_coalesce_usecs_irq = 0;
10323                 ec->stats_block_coalesce_usecs = 0;
10324         }
10325 }
10326
10327 static int __devinit tg3_init_one(struct pci_dev *pdev,
10328                                   const struct pci_device_id *ent)
10329 {
10330         static int tg3_version_printed = 0;
10331         unsigned long tg3reg_base, tg3reg_len;
10332         struct net_device *dev;
10333         struct tg3 *tp;
10334         int i, err, pci_using_dac, pm_cap;
10335
10336         if (tg3_version_printed++ == 0)
10337                 printk(KERN_INFO "%s", version);
10338
10339         err = pci_enable_device(pdev);
10340         if (err) {
10341                 printk(KERN_ERR PFX "Cannot enable PCI device, "
10342                        "aborting.\n");
10343                 return err;
10344         }
10345
10346         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10347                 printk(KERN_ERR PFX "Cannot find proper PCI device "
10348                        "base address, aborting.\n");
10349                 err = -ENODEV;
10350                 goto err_out_disable_pdev;
10351         }
10352
10353         err = pci_request_regions(pdev, DRV_MODULE_NAME);
10354         if (err) {
10355                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10356                        "aborting.\n");
10357                 goto err_out_disable_pdev;
10358         }
10359
10360         pci_set_master(pdev);
10361
10362         /* Find power-management capability. */
10363         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10364         if (pm_cap == 0) {
10365                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10366                        "aborting.\n");
10367                 err = -EIO;
10368                 goto err_out_free_res;
10369         }
10370
10371         /* Configure DMA attributes. */
10372         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
10373         if (!err) {
10374                 pci_using_dac = 1;
10375                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
10376                 if (err < 0) {
10377                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10378                                "for consistent allocations\n");
10379                         goto err_out_free_res;
10380                 }
10381         } else {
10382                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
10383                 if (err) {
10384                         printk(KERN_ERR PFX "No usable DMA configuration, "
10385                                "aborting.\n");
10386                         goto err_out_free_res;
10387                 }
10388                 pci_using_dac = 0;
10389         }
10390
10391         tg3reg_base = pci_resource_start(pdev, 0);
10392         tg3reg_len = pci_resource_len(pdev, 0);
10393
10394         dev = alloc_etherdev(sizeof(*tp));
10395         if (!dev) {
10396                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10397                 err = -ENOMEM;
10398                 goto err_out_free_res;
10399         }
10400
10401         SET_MODULE_OWNER(dev);
10402         SET_NETDEV_DEV(dev, &pdev->dev);
10403
10404         if (pci_using_dac)
10405                 dev->features |= NETIF_F_HIGHDMA;
10406         dev->features |= NETIF_F_LLTX;
10407 #if TG3_VLAN_TAG_USED
10408         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10409         dev->vlan_rx_register = tg3_vlan_rx_register;
10410         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10411 #endif
10412
10413         tp = netdev_priv(dev);
10414         tp->pdev = pdev;
10415         tp->dev = dev;
10416         tp->pm_cap = pm_cap;
10417         tp->mac_mode = TG3_DEF_MAC_MODE;
10418         tp->rx_mode = TG3_DEF_RX_MODE;
10419         tp->tx_mode = TG3_DEF_TX_MODE;
10420         tp->mi_mode = MAC_MI_MODE_BASE;
10421         if (tg3_debug > 0)
10422                 tp->msg_enable = tg3_debug;
10423         else
10424                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10425
10426         /* The word/byte swap controls here control register access byte
10427          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
10428          * setting below.
10429          */
10430         tp->misc_host_ctrl =
10431                 MISC_HOST_CTRL_MASK_PCI_INT |
10432                 MISC_HOST_CTRL_WORD_SWAP |
10433                 MISC_HOST_CTRL_INDIR_ACCESS |
10434                 MISC_HOST_CTRL_PCISTATE_RW;
10435
10436         /* The NONFRM (non-frame) byte/word swap controls take effect
10437          * on descriptor entries, anything which isn't packet data.
10438          *
10439          * The StrongARM chips on the board (one for tx, one for rx)
10440          * are running in big-endian mode.
10441          */
10442         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10443                         GRC_MODE_WSWAP_NONFRM_DATA);
10444 #ifdef __BIG_ENDIAN
10445         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10446 #endif
10447         spin_lock_init(&tp->lock);
10448         spin_lock_init(&tp->tx_lock);
10449         spin_lock_init(&tp->indirect_lock);
10450         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10451
10452         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10453         if (tp->regs == 0UL) {
10454                 printk(KERN_ERR PFX "Cannot map device registers, "
10455                        "aborting.\n");
10456                 err = -ENOMEM;
10457                 goto err_out_free_dev;
10458         }
10459
10460         tg3_init_link_config(tp);
10461
10462         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10463         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10464         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10465
10466         dev->open = tg3_open;
10467         dev->stop = tg3_close;
10468         dev->get_stats = tg3_get_stats;
10469         dev->set_multicast_list = tg3_set_rx_mode;
10470         dev->set_mac_address = tg3_set_mac_addr;
10471         dev->do_ioctl = tg3_ioctl;
10472         dev->tx_timeout = tg3_tx_timeout;
10473         dev->poll = tg3_poll;
10474         dev->ethtool_ops = &tg3_ethtool_ops;
10475         dev->weight = 64;
10476         dev->watchdog_timeo = TG3_TX_TIMEOUT;
10477         dev->change_mtu = tg3_change_mtu;
10478         dev->irq = pdev->irq;
10479 #ifdef CONFIG_NET_POLL_CONTROLLER
10480         dev->poll_controller = tg3_poll_controller;
10481 #endif
10482
10483         err = tg3_get_invariants(tp);
10484         if (err) {
10485                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10486                        "aborting.\n");
10487                 goto err_out_iounmap;
10488         }
10489
10490         tg3_init_bufmgr_config(tp);
10491
10492 #if TG3_TSO_SUPPORT != 0
10493         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10494                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10495         }
10496         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10497             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10498             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10499             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10500                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10501         } else {
10502                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10503         }
10504
10505         /* TSO is off by default, user can enable using ethtool.  */
10506 #if 0
10507         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10508                 dev->features |= NETIF_F_TSO;
10509 #endif
10510
10511 #endif
10512
10513         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10514             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10515             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10516                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10517                 tp->rx_pending = 63;
10518         }
10519
10520         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10521                 tp->pdev_peer = tg3_find_5704_peer(tp);
10522
10523         err = tg3_get_device_address(tp);
10524         if (err) {
10525                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10526                        "aborting.\n");
10527                 goto err_out_iounmap;
10528         }
10529
10530         /*
10531          * Reset chip in case UNDI or EFI driver did not shutdown
10532          * DMA self test will enable WDMAC and we'll see (spurious)
10533          * pending DMA on the PCI bus at that point.
10534          */
10535         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10536             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10537                 pci_save_state(tp->pdev);
10538                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
10539                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10540         }
10541
10542         err = tg3_test_dma(tp);
10543         if (err) {
10544                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10545                 goto err_out_iounmap;
10546         }
10547
10548         /* Tigon3 can do ipv4 only... and some chips have buggy
10549          * checksumming.
10550          */
10551         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10552                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10553                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10554         } else
10555                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10556
10557         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10558                 dev->features &= ~NETIF_F_HIGHDMA;
10559
10560         /* flow control autonegotiation is default behavior */
10561         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10562
10563         tg3_init_coal(tp);
10564
10565         /* Now that we have fully setup the chip, save away a snapshot
10566          * of the PCI config space.  We need to restore this after
10567          * GRC_MISC_CFG core clock resets and some resume events.
10568          */
10569         pci_save_state(tp->pdev);
10570
10571         err = register_netdev(dev);
10572         if (err) {
10573                 printk(KERN_ERR PFX "Cannot register net device, "
10574                        "aborting.\n");
10575                 goto err_out_iounmap;
10576         }
10577
10578         pci_set_drvdata(pdev, dev);
10579
10580         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
10581                dev->name,
10582                tp->board_part_number,
10583                tp->pci_chip_rev_id,
10584                tg3_phy_string(tp),
10585                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
10586                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
10587                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
10588                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
10589                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
10590                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10591
10592         for (i = 0; i < 6; i++)
10593                 printk("%2.2x%c", dev->dev_addr[i],
10594                        i == 5 ? '\n' : ':');
10595
10596         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10597                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10598                "TSOcap[%d] \n",
10599                dev->name,
10600                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10601                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10602                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10603                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10604                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10605                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10606                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10607         printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10608                dev->name, tp->dma_rwctrl);
10609
10610         return 0;
10611
10612 err_out_iounmap:
10613         if (tp->regs) {
10614                 iounmap(tp->regs);
10615                 tp->regs = 0;
10616         }
10617
10618 err_out_free_dev:
10619         free_netdev(dev);
10620
10621 err_out_free_res:
10622         pci_release_regions(pdev);
10623
10624 err_out_disable_pdev:
10625         pci_disable_device(pdev);
10626         pci_set_drvdata(pdev, NULL);
10627         return err;
10628 }
10629
10630 static void __devexit tg3_remove_one(struct pci_dev *pdev)
10631 {
10632         struct net_device *dev = pci_get_drvdata(pdev);
10633
10634         if (dev) {
10635                 struct tg3 *tp = netdev_priv(dev);
10636
10637                 unregister_netdev(dev);
10638                 if (tp->regs) {
10639                         iounmap(tp->regs);
10640                         tp->regs = 0;
10641                 }
10642                 free_netdev(dev);
10643                 pci_release_regions(pdev);
10644                 pci_disable_device(pdev);
10645                 pci_set_drvdata(pdev, NULL);
10646         }
10647 }
10648
10649 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10650 {
10651         struct net_device *dev = pci_get_drvdata(pdev);
10652         struct tg3 *tp = netdev_priv(dev);
10653         int err;
10654
10655         if (!netif_running(dev))
10656                 return 0;
10657
10658         tg3_netif_stop(tp);
10659
10660         del_timer_sync(&tp->timer);
10661
10662         tg3_full_lock(tp, 1);
10663         tg3_disable_ints(tp);
10664         tg3_full_unlock(tp);
10665
10666         netif_device_detach(dev);
10667
10668         tg3_full_lock(tp, 0);
10669         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10670         tg3_full_unlock(tp);
10671
10672         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10673         if (err) {
10674                 tg3_full_lock(tp, 0);
10675
10676                 tg3_init_hw(tp);
10677
10678                 tp->timer.expires = jiffies + tp->timer_offset;
10679                 add_timer(&tp->timer);
10680
10681                 netif_device_attach(dev);
10682                 tg3_netif_start(tp);
10683
10684                 tg3_full_unlock(tp);
10685         }
10686
10687         return err;
10688 }
10689
10690 static int tg3_resume(struct pci_dev *pdev)
10691 {
10692         struct net_device *dev = pci_get_drvdata(pdev);
10693         struct tg3 *tp = netdev_priv(dev);
10694         int err;
10695
10696         if (!netif_running(dev))
10697                 return 0;
10698
10699         pci_restore_state(tp->pdev);
10700
10701         err = tg3_set_power_state(tp, 0);
10702         if (err)
10703                 return err;
10704
10705         netif_device_attach(dev);
10706
10707         tg3_full_lock(tp, 0);
10708
10709         tg3_init_hw(tp);
10710
10711         tp->timer.expires = jiffies + tp->timer_offset;
10712         add_timer(&tp->timer);
10713
10714         tg3_netif_start(tp);
10715
10716         tg3_full_unlock(tp);
10717
10718         return 0;
10719 }
10720
10721 static struct pci_driver tg3_driver = {
10722         .name           = DRV_MODULE_NAME,
10723         .id_table       = tg3_pci_tbl,
10724         .probe          = tg3_init_one,
10725         .remove         = __devexit_p(tg3_remove_one),
10726         .suspend        = tg3_suspend,
10727         .resume         = tg3_resume
10728 };
10729
10730 static int __init tg3_init(void)
10731 {
10732         return pci_module_init(&tg3_driver);
10733 }
10734
10735 static void __exit tg3_cleanup(void)
10736 {
10737         pci_unregister_driver(&tg3_driver);
10738 }
10739
10740 module_init(tg3_init);
10741 module_exit(tg3_cleanup);