e129a6a581f646bf6ceea8d7c820a2569637ebb9
[cascardo/linux.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <asm/system.h>
48 #include <asm/io.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51
52 #ifdef CONFIG_SPARC
53 #include <asm/idprom.h>
54 #include <asm/prom.h>
55 #endif
56
57 #define BAR_0   0
58 #define BAR_2   2
59
60 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61 #define TG3_VLAN_TAG_USED 1
62 #else
63 #define TG3_VLAN_TAG_USED 0
64 #endif
65
66 #define TG3_TSO_SUPPORT 1
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.94"
73 #define DRV_MODULE_RELDATE      "August 14, 2008"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
130 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
131
132 /* minimum number of free TX descriptors required to wake up TX process */
133 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
134
135 /* number of ETHTOOL_GSTATS u64's */
136 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
138 #define TG3_NUM_TEST            6
139
140 static char version[] __devinitdata =
141         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
142
143 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
144 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
145 MODULE_LICENSE("GPL");
146 MODULE_VERSION(DRV_MODULE_VERSION);
147
148 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
149 module_param(tg3_debug, int, 0);
150 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
151
152 static struct pci_device_id tg3_pci_tbl[] = {
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
209         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
210         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
211         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
212         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
213         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
214         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
215         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
216         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
217         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
218         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
219         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
220         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
221         {}
222 };
223
224 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
225
226 static const struct {
227         const char string[ETH_GSTRING_LEN];
228 } ethtool_stats_keys[TG3_NUM_STATS] = {
229         { "rx_octets" },
230         { "rx_fragments" },
231         { "rx_ucast_packets" },
232         { "rx_mcast_packets" },
233         { "rx_bcast_packets" },
234         { "rx_fcs_errors" },
235         { "rx_align_errors" },
236         { "rx_xon_pause_rcvd" },
237         { "rx_xoff_pause_rcvd" },
238         { "rx_mac_ctrl_rcvd" },
239         { "rx_xoff_entered" },
240         { "rx_frame_too_long_errors" },
241         { "rx_jabbers" },
242         { "rx_undersize_packets" },
243         { "rx_in_length_errors" },
244         { "rx_out_length_errors" },
245         { "rx_64_or_less_octet_packets" },
246         { "rx_65_to_127_octet_packets" },
247         { "rx_128_to_255_octet_packets" },
248         { "rx_256_to_511_octet_packets" },
249         { "rx_512_to_1023_octet_packets" },
250         { "rx_1024_to_1522_octet_packets" },
251         { "rx_1523_to_2047_octet_packets" },
252         { "rx_2048_to_4095_octet_packets" },
253         { "rx_4096_to_8191_octet_packets" },
254         { "rx_8192_to_9022_octet_packets" },
255
256         { "tx_octets" },
257         { "tx_collisions" },
258
259         { "tx_xon_sent" },
260         { "tx_xoff_sent" },
261         { "tx_flow_control" },
262         { "tx_mac_errors" },
263         { "tx_single_collisions" },
264         { "tx_mult_collisions" },
265         { "tx_deferred" },
266         { "tx_excessive_collisions" },
267         { "tx_late_collisions" },
268         { "tx_collide_2times" },
269         { "tx_collide_3times" },
270         { "tx_collide_4times" },
271         { "tx_collide_5times" },
272         { "tx_collide_6times" },
273         { "tx_collide_7times" },
274         { "tx_collide_8times" },
275         { "tx_collide_9times" },
276         { "tx_collide_10times" },
277         { "tx_collide_11times" },
278         { "tx_collide_12times" },
279         { "tx_collide_13times" },
280         { "tx_collide_14times" },
281         { "tx_collide_15times" },
282         { "tx_ucast_packets" },
283         { "tx_mcast_packets" },
284         { "tx_bcast_packets" },
285         { "tx_carrier_sense_errors" },
286         { "tx_discards" },
287         { "tx_errors" },
288
289         { "dma_writeq_full" },
290         { "dma_write_prioq_full" },
291         { "rxbds_empty" },
292         { "rx_discards" },
293         { "rx_errors" },
294         { "rx_threshold_hit" },
295
296         { "dma_readq_full" },
297         { "dma_read_prioq_full" },
298         { "tx_comp_queue_full" },
299
300         { "ring_set_send_prod_index" },
301         { "ring_status_update" },
302         { "nic_irqs" },
303         { "nic_avoided_irqs" },
304         { "nic_tx_threshold_hit" }
305 };
306
307 static const struct {
308         const char string[ETH_GSTRING_LEN];
309 } ethtool_test_keys[TG3_NUM_TEST] = {
310         { "nvram test     (online) " },
311         { "link test      (online) " },
312         { "register test  (offline)" },
313         { "memory test    (offline)" },
314         { "loopback test  (offline)" },
315         { "interrupt test (offline)" },
316 };
317
318 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
319 {
320         writel(val, tp->regs + off);
321 }
322
323 static u32 tg3_read32(struct tg3 *tp, u32 off)
324 {
325         return (readl(tp->regs + off));
326 }
327
328 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
329 {
330         writel(val, tp->aperegs + off);
331 }
332
333 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
334 {
335         return (readl(tp->aperegs + off));
336 }
337
338 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
339 {
340         unsigned long flags;
341
342         spin_lock_irqsave(&tp->indirect_lock, flags);
343         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
344         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
345         spin_unlock_irqrestore(&tp->indirect_lock, flags);
346 }
347
348 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
349 {
350         writel(val, tp->regs + off);
351         readl(tp->regs + off);
352 }
353
354 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
355 {
356         unsigned long flags;
357         u32 val;
358
359         spin_lock_irqsave(&tp->indirect_lock, flags);
360         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
361         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
362         spin_unlock_irqrestore(&tp->indirect_lock, flags);
363         return val;
364 }
365
366 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
367 {
368         unsigned long flags;
369
370         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
371                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
372                                        TG3_64BIT_REG_LOW, val);
373                 return;
374         }
375         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
376                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
377                                        TG3_64BIT_REG_LOW, val);
378                 return;
379         }
380
381         spin_lock_irqsave(&tp->indirect_lock, flags);
382         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
383         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
384         spin_unlock_irqrestore(&tp->indirect_lock, flags);
385
386         /* In indirect mode when disabling interrupts, we also need
387          * to clear the interrupt bit in the GRC local ctrl register.
388          */
389         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
390             (val == 0x1)) {
391                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
392                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
393         }
394 }
395
396 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
397 {
398         unsigned long flags;
399         u32 val;
400
401         spin_lock_irqsave(&tp->indirect_lock, flags);
402         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
403         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
404         spin_unlock_irqrestore(&tp->indirect_lock, flags);
405         return val;
406 }
407
408 /* usec_wait specifies the wait time in usec when writing to certain registers
409  * where it is unsafe to read back the register without some delay.
410  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
411  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
412  */
413 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
414 {
415         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
416             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
417                 /* Non-posted methods */
418                 tp->write32(tp, off, val);
419         else {
420                 /* Posted method */
421                 tg3_write32(tp, off, val);
422                 if (usec_wait)
423                         udelay(usec_wait);
424                 tp->read32(tp, off);
425         }
426         /* Wait again after the read for the posted method to guarantee that
427          * the wait time is met.
428          */
429         if (usec_wait)
430                 udelay(usec_wait);
431 }
432
433 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
434 {
435         tp->write32_mbox(tp, off, val);
436         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
437             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
438                 tp->read32_mbox(tp, off);
439 }
440
441 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
442 {
443         void __iomem *mbox = tp->regs + off;
444         writel(val, mbox);
445         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
446                 writel(val, mbox);
447         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
448                 readl(mbox);
449 }
450
451 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
452 {
453         return (readl(tp->regs + off + GRCMBOX_BASE));
454 }
455
456 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
457 {
458         writel(val, tp->regs + off + GRCMBOX_BASE);
459 }
460
461 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
462 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
463 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
464 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
465 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
466
467 #define tw32(reg,val)           tp->write32(tp, reg, val)
468 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
469 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
470 #define tr32(reg)               tp->read32(tp, reg)
471
472 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
473 {
474         unsigned long flags;
475
476         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
477             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
478                 return;
479
480         spin_lock_irqsave(&tp->indirect_lock, flags);
481         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
482                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
483                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
484
485                 /* Always leave this as zero. */
486                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
487         } else {
488                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
489                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
490
491                 /* Always leave this as zero. */
492                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
493         }
494         spin_unlock_irqrestore(&tp->indirect_lock, flags);
495 }
496
497 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
498 {
499         unsigned long flags;
500
501         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
502             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
503                 *val = 0;
504                 return;
505         }
506
507         spin_lock_irqsave(&tp->indirect_lock, flags);
508         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
509                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
510                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
511
512                 /* Always leave this as zero. */
513                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
514         } else {
515                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
516                 *val = tr32(TG3PCI_MEM_WIN_DATA);
517
518                 /* Always leave this as zero. */
519                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
520         }
521         spin_unlock_irqrestore(&tp->indirect_lock, flags);
522 }
523
524 static void tg3_ape_lock_init(struct tg3 *tp)
525 {
526         int i;
527
528         /* Make sure the driver hasn't any stale locks. */
529         for (i = 0; i < 8; i++)
530                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
531                                 APE_LOCK_GRANT_DRIVER);
532 }
533
534 static int tg3_ape_lock(struct tg3 *tp, int locknum)
535 {
536         int i, off;
537         int ret = 0;
538         u32 status;
539
540         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
541                 return 0;
542
543         switch (locknum) {
544                 case TG3_APE_LOCK_GRC:
545                 case TG3_APE_LOCK_MEM:
546                         break;
547                 default:
548                         return -EINVAL;
549         }
550
551         off = 4 * locknum;
552
553         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
554
555         /* Wait for up to 1 millisecond to acquire lock. */
556         for (i = 0; i < 100; i++) {
557                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
558                 if (status == APE_LOCK_GRANT_DRIVER)
559                         break;
560                 udelay(10);
561         }
562
563         if (status != APE_LOCK_GRANT_DRIVER) {
564                 /* Revoke the lock request. */
565                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
566                                 APE_LOCK_GRANT_DRIVER);
567
568                 ret = -EBUSY;
569         }
570
571         return ret;
572 }
573
574 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
575 {
576         int off;
577
578         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
579                 return;
580
581         switch (locknum) {
582                 case TG3_APE_LOCK_GRC:
583                 case TG3_APE_LOCK_MEM:
584                         break;
585                 default:
586                         return;
587         }
588
589         off = 4 * locknum;
590         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
591 }
592
593 static void tg3_disable_ints(struct tg3 *tp)
594 {
595         tw32(TG3PCI_MISC_HOST_CTRL,
596              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
597         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
598 }
599
600 static inline void tg3_cond_int(struct tg3 *tp)
601 {
602         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
603             (tp->hw_status->status & SD_STATUS_UPDATED))
604                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
605         else
606                 tw32(HOSTCC_MODE, tp->coalesce_mode |
607                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
608 }
609
610 static void tg3_enable_ints(struct tg3 *tp)
611 {
612         tp->irq_sync = 0;
613         wmb();
614
615         tw32(TG3PCI_MISC_HOST_CTRL,
616              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
617         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
618                        (tp->last_tag << 24));
619         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
620                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
621                                (tp->last_tag << 24));
622         tg3_cond_int(tp);
623 }
624
625 static inline unsigned int tg3_has_work(struct tg3 *tp)
626 {
627         struct tg3_hw_status *sblk = tp->hw_status;
628         unsigned int work_exists = 0;
629
630         /* check for phy events */
631         if (!(tp->tg3_flags &
632               (TG3_FLAG_USE_LINKCHG_REG |
633                TG3_FLAG_POLL_SERDES))) {
634                 if (sblk->status & SD_STATUS_LINK_CHG)
635                         work_exists = 1;
636         }
637         /* check for RX/TX work to do */
638         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
639             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
640                 work_exists = 1;
641
642         return work_exists;
643 }
644
645 /* tg3_restart_ints
646  *  similar to tg3_enable_ints, but it accurately determines whether there
647  *  is new work pending and can return without flushing the PIO write
648  *  which reenables interrupts
649  */
650 static void tg3_restart_ints(struct tg3 *tp)
651 {
652         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
653                      tp->last_tag << 24);
654         mmiowb();
655
656         /* When doing tagged status, this work check is unnecessary.
657          * The last_tag we write above tells the chip which piece of
658          * work we've completed.
659          */
660         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
661             tg3_has_work(tp))
662                 tw32(HOSTCC_MODE, tp->coalesce_mode |
663                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
664 }
665
666 static inline void tg3_netif_stop(struct tg3 *tp)
667 {
668         tp->dev->trans_start = jiffies; /* prevent tx timeout */
669         napi_disable(&tp->napi);
670         netif_tx_disable(tp->dev);
671 }
672
673 static inline void tg3_netif_start(struct tg3 *tp)
674 {
675         netif_wake_queue(tp->dev);
676         /* NOTE: unconditional netif_wake_queue is only appropriate
677          * so long as all callers are assured to have free tx slots
678          * (such as after tg3_init_hw)
679          */
680         napi_enable(&tp->napi);
681         tp->hw_status->status |= SD_STATUS_UPDATED;
682         tg3_enable_ints(tp);
683 }
684
685 static void tg3_switch_clocks(struct tg3 *tp)
686 {
687         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
688         u32 orig_clock_ctrl;
689
690         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
691             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
692                 return;
693
694         orig_clock_ctrl = clock_ctrl;
695         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
696                        CLOCK_CTRL_CLKRUN_OENABLE |
697                        0x1f);
698         tp->pci_clock_ctrl = clock_ctrl;
699
700         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
701                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
702                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
703                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
704                 }
705         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
706                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
707                             clock_ctrl |
708                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
709                             40);
710                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
711                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
712                             40);
713         }
714         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
715 }
716
717 #define PHY_BUSY_LOOPS  5000
718
719 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
720 {
721         u32 frame_val;
722         unsigned int loops;
723         int ret;
724
725         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
726                 tw32_f(MAC_MI_MODE,
727                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
728                 udelay(80);
729         }
730
731         *val = 0x0;
732
733         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
734                       MI_COM_PHY_ADDR_MASK);
735         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
736                       MI_COM_REG_ADDR_MASK);
737         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
738
739         tw32_f(MAC_MI_COM, frame_val);
740
741         loops = PHY_BUSY_LOOPS;
742         while (loops != 0) {
743                 udelay(10);
744                 frame_val = tr32(MAC_MI_COM);
745
746                 if ((frame_val & MI_COM_BUSY) == 0) {
747                         udelay(5);
748                         frame_val = tr32(MAC_MI_COM);
749                         break;
750                 }
751                 loops -= 1;
752         }
753
754         ret = -EBUSY;
755         if (loops != 0) {
756                 *val = frame_val & MI_COM_DATA_MASK;
757                 ret = 0;
758         }
759
760         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
761                 tw32_f(MAC_MI_MODE, tp->mi_mode);
762                 udelay(80);
763         }
764
765         return ret;
766 }
767
768 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
769 {
770         u32 frame_val;
771         unsigned int loops;
772         int ret;
773
774         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
775             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
776                 return 0;
777
778         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
779                 tw32_f(MAC_MI_MODE,
780                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
781                 udelay(80);
782         }
783
784         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
785                       MI_COM_PHY_ADDR_MASK);
786         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
787                       MI_COM_REG_ADDR_MASK);
788         frame_val |= (val & MI_COM_DATA_MASK);
789         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
790
791         tw32_f(MAC_MI_COM, frame_val);
792
793         loops = PHY_BUSY_LOOPS;
794         while (loops != 0) {
795                 udelay(10);
796                 frame_val = tr32(MAC_MI_COM);
797                 if ((frame_val & MI_COM_BUSY) == 0) {
798                         udelay(5);
799                         frame_val = tr32(MAC_MI_COM);
800                         break;
801                 }
802                 loops -= 1;
803         }
804
805         ret = -EBUSY;
806         if (loops != 0)
807                 ret = 0;
808
809         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
810                 tw32_f(MAC_MI_MODE, tp->mi_mode);
811                 udelay(80);
812         }
813
814         return ret;
815 }
816
817 static int tg3_bmcr_reset(struct tg3 *tp)
818 {
819         u32 phy_control;
820         int limit, err;
821
822         /* OK, reset it, and poll the BMCR_RESET bit until it
823          * clears or we time out.
824          */
825         phy_control = BMCR_RESET;
826         err = tg3_writephy(tp, MII_BMCR, phy_control);
827         if (err != 0)
828                 return -EBUSY;
829
830         limit = 5000;
831         while (limit--) {
832                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
833                 if (err != 0)
834                         return -EBUSY;
835
836                 if ((phy_control & BMCR_RESET) == 0) {
837                         udelay(40);
838                         break;
839                 }
840                 udelay(10);
841         }
842         if (limit <= 0)
843                 return -EBUSY;
844
845         return 0;
846 }
847
848 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
849 {
850         struct tg3 *tp = (struct tg3 *)bp->priv;
851         u32 val;
852
853         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
854                 return -EAGAIN;
855
856         if (tg3_readphy(tp, reg, &val))
857                 return -EIO;
858
859         return val;
860 }
861
862 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
863 {
864         struct tg3 *tp = (struct tg3 *)bp->priv;
865
866         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
867                 return -EAGAIN;
868
869         if (tg3_writephy(tp, reg, val))
870                 return -EIO;
871
872         return 0;
873 }
874
875 static int tg3_mdio_reset(struct mii_bus *bp)
876 {
877         return 0;
878 }
879
880 static void tg3_mdio_config(struct tg3 *tp)
881 {
882         u32 val;
883
884         if (tp->mdio_bus->phy_map[PHY_ADDR]->interface !=
885             PHY_INTERFACE_MODE_RGMII)
886                 return;
887
888         val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
889                                     MAC_PHYCFG1_RGMII_SND_STAT_EN);
890         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
891                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
892                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
893                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
894                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
895         }
896         tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
897
898         val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
899         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
900                 val |= MAC_PHYCFG2_INBAND_ENABLE;
901         tw32(MAC_PHYCFG2, val);
902
903         val = tr32(MAC_EXT_RGMII_MODE);
904         val &= ~(MAC_RGMII_MODE_RX_INT_B |
905                  MAC_RGMII_MODE_RX_QUALITY |
906                  MAC_RGMII_MODE_RX_ACTIVITY |
907                  MAC_RGMII_MODE_RX_ENG_DET |
908                  MAC_RGMII_MODE_TX_ENABLE |
909                  MAC_RGMII_MODE_TX_LOWPWR |
910                  MAC_RGMII_MODE_TX_RESET);
911         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
912                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
913                         val |= MAC_RGMII_MODE_RX_INT_B |
914                                MAC_RGMII_MODE_RX_QUALITY |
915                                MAC_RGMII_MODE_RX_ACTIVITY |
916                                MAC_RGMII_MODE_RX_ENG_DET;
917                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
918                         val |= MAC_RGMII_MODE_TX_ENABLE |
919                                MAC_RGMII_MODE_TX_LOWPWR |
920                                MAC_RGMII_MODE_TX_RESET;
921         }
922         tw32(MAC_EXT_RGMII_MODE, val);
923 }
924
925 static void tg3_mdio_start(struct tg3 *tp)
926 {
927         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
928                 mutex_lock(&tp->mdio_bus->mdio_lock);
929                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
930                 mutex_unlock(&tp->mdio_bus->mdio_lock);
931         }
932
933         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
934         tw32_f(MAC_MI_MODE, tp->mi_mode);
935         udelay(80);
936
937         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
938                 tg3_mdio_config(tp);
939 }
940
941 static void tg3_mdio_stop(struct tg3 *tp)
942 {
943         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
944                 mutex_lock(&tp->mdio_bus->mdio_lock);
945                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
946                 mutex_unlock(&tp->mdio_bus->mdio_lock);
947         }
948 }
949
950 static int tg3_mdio_init(struct tg3 *tp)
951 {
952         int i;
953         u32 reg;
954         struct phy_device *phydev;
955
956         tg3_mdio_start(tp);
957
958         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
959             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
960                 return 0;
961
962         tp->mdio_bus = mdiobus_alloc();
963         if (tp->mdio_bus == NULL)
964                 return -ENOMEM;
965
966         tp->mdio_bus->name     = "tg3 mdio bus";
967         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
968                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
969         tp->mdio_bus->priv     = tp;
970         tp->mdio_bus->parent   = &tp->pdev->dev;
971         tp->mdio_bus->read     = &tg3_mdio_read;
972         tp->mdio_bus->write    = &tg3_mdio_write;
973         tp->mdio_bus->reset    = &tg3_mdio_reset;
974         tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
975         tp->mdio_bus->irq      = &tp->mdio_irq[0];
976
977         for (i = 0; i < PHY_MAX_ADDR; i++)
978                 tp->mdio_bus->irq[i] = PHY_POLL;
979
980         /* The bus registration will look for all the PHYs on the mdio bus.
981          * Unfortunately, it does not ensure the PHY is powered up before
982          * accessing the PHY ID registers.  A chip reset is the
983          * quickest way to bring the device back to an operational state..
984          */
985         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
986                 tg3_bmcr_reset(tp);
987
988         i = mdiobus_register(tp->mdio_bus);
989         if (i) {
990                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
991                         tp->dev->name, i);
992                 return i;
993         }
994
995         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
996
997         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
998
999         switch (phydev->phy_id) {
1000         case TG3_PHY_ID_BCM50610:
1001                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1002                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1003                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1004                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1005                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1006                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1007                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1008                 break;
1009         case TG3_PHY_ID_BCMAC131:
1010                 phydev->interface = PHY_INTERFACE_MODE_MII;
1011                 break;
1012         }
1013
1014         tg3_mdio_config(tp);
1015
1016         return 0;
1017 }
1018
1019 static void tg3_mdio_fini(struct tg3 *tp)
1020 {
1021         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1022                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1023                 mdiobus_unregister(tp->mdio_bus);
1024                 mdiobus_free(tp->mdio_bus);
1025                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1026         }
1027 }
1028
1029 /* tp->lock is held. */
1030 static inline void tg3_generate_fw_event(struct tg3 *tp)
1031 {
1032         u32 val;
1033
1034         val = tr32(GRC_RX_CPU_EVENT);
1035         val |= GRC_RX_CPU_DRIVER_EVENT;
1036         tw32_f(GRC_RX_CPU_EVENT, val);
1037
1038         tp->last_event_jiffies = jiffies;
1039 }
1040
1041 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1042
1043 /* tp->lock is held. */
1044 static void tg3_wait_for_event_ack(struct tg3 *tp)
1045 {
1046         int i;
1047         unsigned int delay_cnt;
1048         long time_remain;
1049
1050         /* If enough time has passed, no wait is necessary. */
1051         time_remain = (long)(tp->last_event_jiffies + 1 +
1052                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1053                       (long)jiffies;
1054         if (time_remain < 0)
1055                 return;
1056
1057         /* Check if we can shorten the wait time. */
1058         delay_cnt = jiffies_to_usecs(time_remain);
1059         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1060                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1061         delay_cnt = (delay_cnt >> 3) + 1;
1062
1063         for (i = 0; i < delay_cnt; i++) {
1064                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1065                         break;
1066                 udelay(8);
1067         }
1068 }
1069
1070 /* tp->lock is held. */
1071 static void tg3_ump_link_report(struct tg3 *tp)
1072 {
1073         u32 reg;
1074         u32 val;
1075
1076         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1077             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1078                 return;
1079
1080         tg3_wait_for_event_ack(tp);
1081
1082         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1083
1084         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1085
1086         val = 0;
1087         if (!tg3_readphy(tp, MII_BMCR, &reg))
1088                 val = reg << 16;
1089         if (!tg3_readphy(tp, MII_BMSR, &reg))
1090                 val |= (reg & 0xffff);
1091         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1092
1093         val = 0;
1094         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1095                 val = reg << 16;
1096         if (!tg3_readphy(tp, MII_LPA, &reg))
1097                 val |= (reg & 0xffff);
1098         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1099
1100         val = 0;
1101         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1102                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1103                         val = reg << 16;
1104                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1105                         val |= (reg & 0xffff);
1106         }
1107         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1108
1109         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1110                 val = reg << 16;
1111         else
1112                 val = 0;
1113         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1114
1115         tg3_generate_fw_event(tp);
1116 }
1117
1118 static void tg3_link_report(struct tg3 *tp)
1119 {
1120         if (!netif_carrier_ok(tp->dev)) {
1121                 if (netif_msg_link(tp))
1122                         printk(KERN_INFO PFX "%s: Link is down.\n",
1123                                tp->dev->name);
1124                 tg3_ump_link_report(tp);
1125         } else if (netif_msg_link(tp)) {
1126                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1127                        tp->dev->name,
1128                        (tp->link_config.active_speed == SPEED_1000 ?
1129                         1000 :
1130                         (tp->link_config.active_speed == SPEED_100 ?
1131                          100 : 10)),
1132                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1133                         "full" : "half"));
1134
1135                 printk(KERN_INFO PFX
1136                        "%s: Flow control is %s for TX and %s for RX.\n",
1137                        tp->dev->name,
1138                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1139                        "on" : "off",
1140                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1141                        "on" : "off");
1142                 tg3_ump_link_report(tp);
1143         }
1144 }
1145
1146 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1147 {
1148         u16 miireg;
1149
1150         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1151                 miireg = ADVERTISE_PAUSE_CAP;
1152         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1153                 miireg = ADVERTISE_PAUSE_ASYM;
1154         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1155                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1156         else
1157                 miireg = 0;
1158
1159         return miireg;
1160 }
1161
1162 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1163 {
1164         u16 miireg;
1165
1166         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1167                 miireg = ADVERTISE_1000XPAUSE;
1168         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1169                 miireg = ADVERTISE_1000XPSE_ASYM;
1170         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1171                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1172         else
1173                 miireg = 0;
1174
1175         return miireg;
1176 }
1177
1178 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1179 {
1180         u8 cap = 0;
1181
1182         if (lcladv & ADVERTISE_PAUSE_CAP) {
1183                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1184                         if (rmtadv & LPA_PAUSE_CAP)
1185                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1186                         else if (rmtadv & LPA_PAUSE_ASYM)
1187                                 cap = TG3_FLOW_CTRL_RX;
1188                 } else {
1189                         if (rmtadv & LPA_PAUSE_CAP)
1190                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1191                 }
1192         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1193                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1194                         cap = TG3_FLOW_CTRL_TX;
1195         }
1196
1197         return cap;
1198 }
1199
1200 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1201 {
1202         u8 cap = 0;
1203
1204         if (lcladv & ADVERTISE_1000XPAUSE) {
1205                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1206                         if (rmtadv & LPA_1000XPAUSE)
1207                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1208                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1209                                 cap = TG3_FLOW_CTRL_RX;
1210                 } else {
1211                         if (rmtadv & LPA_1000XPAUSE)
1212                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1213                 }
1214         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1215                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1216                         cap = TG3_FLOW_CTRL_TX;
1217         }
1218
1219         return cap;
1220 }
1221
1222 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1223 {
1224         u8 autoneg;
1225         u8 flowctrl = 0;
1226         u32 old_rx_mode = tp->rx_mode;
1227         u32 old_tx_mode = tp->tx_mode;
1228
1229         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1230                 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1231         else
1232                 autoneg = tp->link_config.autoneg;
1233
1234         if (autoneg == AUTONEG_ENABLE &&
1235             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1236                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1237                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1238                 else
1239                         flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1240         } else
1241                 flowctrl = tp->link_config.flowctrl;
1242
1243         tp->link_config.active_flowctrl = flowctrl;
1244
1245         if (flowctrl & TG3_FLOW_CTRL_RX)
1246                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1247         else
1248                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1249
1250         if (old_rx_mode != tp->rx_mode)
1251                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1252
1253         if (flowctrl & TG3_FLOW_CTRL_TX)
1254                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1255         else
1256                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1257
1258         if (old_tx_mode != tp->tx_mode)
1259                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1260 }
1261
1262 static void tg3_adjust_link(struct net_device *dev)
1263 {
1264         u8 oldflowctrl, linkmesg = 0;
1265         u32 mac_mode, lcl_adv, rmt_adv;
1266         struct tg3 *tp = netdev_priv(dev);
1267         struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1268
1269         spin_lock(&tp->lock);
1270
1271         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1272                                     MAC_MODE_HALF_DUPLEX);
1273
1274         oldflowctrl = tp->link_config.active_flowctrl;
1275
1276         if (phydev->link) {
1277                 lcl_adv = 0;
1278                 rmt_adv = 0;
1279
1280                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1281                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1282                 else
1283                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1284
1285                 if (phydev->duplex == DUPLEX_HALF)
1286                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1287                 else {
1288                         lcl_adv = tg3_advert_flowctrl_1000T(
1289                                   tp->link_config.flowctrl);
1290
1291                         if (phydev->pause)
1292                                 rmt_adv = LPA_PAUSE_CAP;
1293                         if (phydev->asym_pause)
1294                                 rmt_adv |= LPA_PAUSE_ASYM;
1295                 }
1296
1297                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1298         } else
1299                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1300
1301         if (mac_mode != tp->mac_mode) {
1302                 tp->mac_mode = mac_mode;
1303                 tw32_f(MAC_MODE, tp->mac_mode);
1304                 udelay(40);
1305         }
1306
1307         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1308                 tw32(MAC_TX_LENGTHS,
1309                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1310                       (6 << TX_LENGTHS_IPG_SHIFT) |
1311                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1312         else
1313                 tw32(MAC_TX_LENGTHS,
1314                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1315                       (6 << TX_LENGTHS_IPG_SHIFT) |
1316                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1317
1318         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1319             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1320             phydev->speed != tp->link_config.active_speed ||
1321             phydev->duplex != tp->link_config.active_duplex ||
1322             oldflowctrl != tp->link_config.active_flowctrl)
1323             linkmesg = 1;
1324
1325         tp->link_config.active_speed = phydev->speed;
1326         tp->link_config.active_duplex = phydev->duplex;
1327
1328         spin_unlock(&tp->lock);
1329
1330         if (linkmesg)
1331                 tg3_link_report(tp);
1332 }
1333
1334 static int tg3_phy_init(struct tg3 *tp)
1335 {
1336         struct phy_device *phydev;
1337
1338         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1339                 return 0;
1340
1341         /* Bring the PHY back to a known state. */
1342         tg3_bmcr_reset(tp);
1343
1344         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1345
1346         /* Attach the MAC to the PHY. */
1347         phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1348                              phydev->dev_flags, phydev->interface);
1349         if (IS_ERR(phydev)) {
1350                 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1351                 return PTR_ERR(phydev);
1352         }
1353
1354         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1355
1356         /* Mask with MAC supported features. */
1357         phydev->supported &= (PHY_GBIT_FEATURES |
1358                               SUPPORTED_Pause |
1359                               SUPPORTED_Asym_Pause);
1360
1361         phydev->advertising = phydev->supported;
1362
1363         printk(KERN_INFO
1364                "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
1365                tp->dev->name, phydev->drv->name, phydev->dev.bus_id);
1366
1367         return 0;
1368 }
1369
1370 static void tg3_phy_start(struct tg3 *tp)
1371 {
1372         struct phy_device *phydev;
1373
1374         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1375                 return;
1376
1377         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1378
1379         if (tp->link_config.phy_is_low_power) {
1380                 tp->link_config.phy_is_low_power = 0;
1381                 phydev->speed = tp->link_config.orig_speed;
1382                 phydev->duplex = tp->link_config.orig_duplex;
1383                 phydev->autoneg = tp->link_config.orig_autoneg;
1384                 phydev->advertising = tp->link_config.orig_advertising;
1385         }
1386
1387         phy_start(phydev);
1388
1389         phy_start_aneg(phydev);
1390 }
1391
1392 static void tg3_phy_stop(struct tg3 *tp)
1393 {
1394         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1395                 return;
1396
1397         phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1398 }
1399
1400 static void tg3_phy_fini(struct tg3 *tp)
1401 {
1402         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1403                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1404                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1405         }
1406 }
1407
1408 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1409 {
1410         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1411         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1412 }
1413
1414 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1415 {
1416         u32 phy;
1417
1418         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1419             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1420                 return;
1421
1422         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1423                 u32 ephy;
1424
1425                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1426                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1427                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1428                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1429                                 if (enable)
1430                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1431                                 else
1432                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1433                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1434                         }
1435                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1436                 }
1437         } else {
1438                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1439                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1440                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1441                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1442                         if (enable)
1443                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1444                         else
1445                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1446                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1447                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1448                 }
1449         }
1450 }
1451
1452 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1453 {
1454         u32 val;
1455
1456         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1457                 return;
1458
1459         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1460             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1461                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1462                              (val | (1 << 15) | (1 << 4)));
1463 }
1464
1465 static void tg3_phy_apply_otp(struct tg3 *tp)
1466 {
1467         u32 otp, phy;
1468
1469         if (!tp->phy_otp)
1470                 return;
1471
1472         otp = tp->phy_otp;
1473
1474         /* Enable SM_DSP clock and tx 6dB coding. */
1475         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1476               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1477               MII_TG3_AUXCTL_ACTL_TX_6DB;
1478         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1479
1480         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1481         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1482         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1483
1484         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1485               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1486         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1487
1488         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1489         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1490         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1491
1492         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1493         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1494
1495         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1496         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1497
1498         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1499               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1500         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1501
1502         /* Turn off SM_DSP clock. */
1503         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1504               MII_TG3_AUXCTL_ACTL_TX_6DB;
1505         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1506 }
1507
1508 static int tg3_wait_macro_done(struct tg3 *tp)
1509 {
1510         int limit = 100;
1511
1512         while (limit--) {
1513                 u32 tmp32;
1514
1515                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1516                         if ((tmp32 & 0x1000) == 0)
1517                                 break;
1518                 }
1519         }
1520         if (limit <= 0)
1521                 return -EBUSY;
1522
1523         return 0;
1524 }
1525
1526 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1527 {
1528         static const u32 test_pat[4][6] = {
1529         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1530         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1531         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1532         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1533         };
1534         int chan;
1535
1536         for (chan = 0; chan < 4; chan++) {
1537                 int i;
1538
1539                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1540                              (chan * 0x2000) | 0x0200);
1541                 tg3_writephy(tp, 0x16, 0x0002);
1542
1543                 for (i = 0; i < 6; i++)
1544                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1545                                      test_pat[chan][i]);
1546
1547                 tg3_writephy(tp, 0x16, 0x0202);
1548                 if (tg3_wait_macro_done(tp)) {
1549                         *resetp = 1;
1550                         return -EBUSY;
1551                 }
1552
1553                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1554                              (chan * 0x2000) | 0x0200);
1555                 tg3_writephy(tp, 0x16, 0x0082);
1556                 if (tg3_wait_macro_done(tp)) {
1557                         *resetp = 1;
1558                         return -EBUSY;
1559                 }
1560
1561                 tg3_writephy(tp, 0x16, 0x0802);
1562                 if (tg3_wait_macro_done(tp)) {
1563                         *resetp = 1;
1564                         return -EBUSY;
1565                 }
1566
1567                 for (i = 0; i < 6; i += 2) {
1568                         u32 low, high;
1569
1570                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1571                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1572                             tg3_wait_macro_done(tp)) {
1573                                 *resetp = 1;
1574                                 return -EBUSY;
1575                         }
1576                         low &= 0x7fff;
1577                         high &= 0x000f;
1578                         if (low != test_pat[chan][i] ||
1579                             high != test_pat[chan][i+1]) {
1580                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1581                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1582                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1583
1584                                 return -EBUSY;
1585                         }
1586                 }
1587         }
1588
1589         return 0;
1590 }
1591
1592 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1593 {
1594         int chan;
1595
1596         for (chan = 0; chan < 4; chan++) {
1597                 int i;
1598
1599                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1600                              (chan * 0x2000) | 0x0200);
1601                 tg3_writephy(tp, 0x16, 0x0002);
1602                 for (i = 0; i < 6; i++)
1603                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1604                 tg3_writephy(tp, 0x16, 0x0202);
1605                 if (tg3_wait_macro_done(tp))
1606                         return -EBUSY;
1607         }
1608
1609         return 0;
1610 }
1611
1612 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1613 {
1614         u32 reg32, phy9_orig;
1615         int retries, do_phy_reset, err;
1616
1617         retries = 10;
1618         do_phy_reset = 1;
1619         do {
1620                 if (do_phy_reset) {
1621                         err = tg3_bmcr_reset(tp);
1622                         if (err)
1623                                 return err;
1624                         do_phy_reset = 0;
1625                 }
1626
1627                 /* Disable transmitter and interrupt.  */
1628                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1629                         continue;
1630
1631                 reg32 |= 0x3000;
1632                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1633
1634                 /* Set full-duplex, 1000 mbps.  */
1635                 tg3_writephy(tp, MII_BMCR,
1636                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1637
1638                 /* Set to master mode.  */
1639                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1640                         continue;
1641
1642                 tg3_writephy(tp, MII_TG3_CTRL,
1643                              (MII_TG3_CTRL_AS_MASTER |
1644                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1645
1646                 /* Enable SM_DSP_CLOCK and 6dB.  */
1647                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1648
1649                 /* Block the PHY control access.  */
1650                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1651                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1652
1653                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1654                 if (!err)
1655                         break;
1656         } while (--retries);
1657
1658         err = tg3_phy_reset_chanpat(tp);
1659         if (err)
1660                 return err;
1661
1662         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1663         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1664
1665         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1666         tg3_writephy(tp, 0x16, 0x0000);
1667
1668         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1669             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1670                 /* Set Extended packet length bit for jumbo frames */
1671                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1672         }
1673         else {
1674                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1675         }
1676
1677         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1678
1679         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1680                 reg32 &= ~0x3000;
1681                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1682         } else if (!err)
1683                 err = -EBUSY;
1684
1685         return err;
1686 }
1687
1688 /* This will reset the tigon3 PHY if there is no valid
1689  * link unless the FORCE argument is non-zero.
1690  */
1691 static int tg3_phy_reset(struct tg3 *tp)
1692 {
1693         u32 cpmuctrl;
1694         u32 phy_status;
1695         int err;
1696
1697         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1698                 u32 val;
1699
1700                 val = tr32(GRC_MISC_CFG);
1701                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1702                 udelay(40);
1703         }
1704         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1705         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1706         if (err != 0)
1707                 return -EBUSY;
1708
1709         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1710                 netif_carrier_off(tp->dev);
1711                 tg3_link_report(tp);
1712         }
1713
1714         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1715             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1716             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1717                 err = tg3_phy_reset_5703_4_5(tp);
1718                 if (err)
1719                         return err;
1720                 goto out;
1721         }
1722
1723         cpmuctrl = 0;
1724         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1725             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1726                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1727                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1728                         tw32(TG3_CPMU_CTRL,
1729                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1730         }
1731
1732         err = tg3_bmcr_reset(tp);
1733         if (err)
1734                 return err;
1735
1736         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1737                 u32 phy;
1738
1739                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1740                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1741
1742                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1743         }
1744
1745         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1746                 u32 val;
1747
1748                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1749                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1750                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1751                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1752                         udelay(40);
1753                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1754                 }
1755
1756                 /* Disable GPHY autopowerdown. */
1757                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1758                              MII_TG3_MISC_SHDW_WREN |
1759                              MII_TG3_MISC_SHDW_APD_SEL |
1760                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1761         }
1762
1763         tg3_phy_apply_otp(tp);
1764
1765 out:
1766         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1767                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1768                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1769                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1770                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1771                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1772                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1773         }
1774         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1775                 tg3_writephy(tp, 0x1c, 0x8d68);
1776                 tg3_writephy(tp, 0x1c, 0x8d68);
1777         }
1778         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1779                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1780                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1781                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1782                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1783                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1784                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1785                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1786                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1787         }
1788         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1789                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1790                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1791                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1792                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1793                         tg3_writephy(tp, MII_TG3_TEST1,
1794                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1795                 } else
1796                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1797                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1798         }
1799         /* Set Extended packet length bit (bit 14) on all chips that */
1800         /* support jumbo frames */
1801         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1802                 /* Cannot do read-modify-write on 5401 */
1803                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1804         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1805                 u32 phy_reg;
1806
1807                 /* Set bit 14 with read-modify-write to preserve other bits */
1808                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1809                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1810                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1811         }
1812
1813         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1814          * jumbo frames transmission.
1815          */
1816         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1817                 u32 phy_reg;
1818
1819                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1820                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1821                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1822         }
1823
1824         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1825                 /* adjust output voltage */
1826                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1827         }
1828
1829         tg3_phy_toggle_automdix(tp, 1);
1830         tg3_phy_set_wirespeed(tp);
1831         return 0;
1832 }
1833
1834 static void tg3_frob_aux_power(struct tg3 *tp)
1835 {
1836         struct tg3 *tp_peer = tp;
1837
1838         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1839                 return;
1840
1841         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1842             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1843                 struct net_device *dev_peer;
1844
1845                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1846                 /* remove_one() may have been run on the peer. */
1847                 if (!dev_peer)
1848                         tp_peer = tp;
1849                 else
1850                         tp_peer = netdev_priv(dev_peer);
1851         }
1852
1853         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1854             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1855             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1856             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1857                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1858                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1859                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1860                                     (GRC_LCLCTRL_GPIO_OE0 |
1861                                      GRC_LCLCTRL_GPIO_OE1 |
1862                                      GRC_LCLCTRL_GPIO_OE2 |
1863                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1864                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1865                                     100);
1866                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1867                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1868                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1869                                              GRC_LCLCTRL_GPIO_OE1 |
1870                                              GRC_LCLCTRL_GPIO_OE2 |
1871                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
1872                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
1873                                              tp->grc_local_ctrl;
1874                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1875
1876                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1877                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1878
1879                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1880                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1881                 } else {
1882                         u32 no_gpio2;
1883                         u32 grc_local_ctrl = 0;
1884
1885                         if (tp_peer != tp &&
1886                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1887                                 return;
1888
1889                         /* Workaround to prevent overdrawing Amps. */
1890                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1891                             ASIC_REV_5714) {
1892                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1893                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1894                                             grc_local_ctrl, 100);
1895                         }
1896
1897                         /* On 5753 and variants, GPIO2 cannot be used. */
1898                         no_gpio2 = tp->nic_sram_data_cfg &
1899                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1900
1901                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1902                                          GRC_LCLCTRL_GPIO_OE1 |
1903                                          GRC_LCLCTRL_GPIO_OE2 |
1904                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1905                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1906                         if (no_gpio2) {
1907                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1908                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1909                         }
1910                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1911                                                     grc_local_ctrl, 100);
1912
1913                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1914
1915                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1916                                                     grc_local_ctrl, 100);
1917
1918                         if (!no_gpio2) {
1919                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1920                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1921                                             grc_local_ctrl, 100);
1922                         }
1923                 }
1924         } else {
1925                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1926                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1927                         if (tp_peer != tp &&
1928                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1929                                 return;
1930
1931                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1932                                     (GRC_LCLCTRL_GPIO_OE1 |
1933                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1934
1935                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1936                                     GRC_LCLCTRL_GPIO_OE1, 100);
1937
1938                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1939                                     (GRC_LCLCTRL_GPIO_OE1 |
1940                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1941                 }
1942         }
1943 }
1944
1945 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1946 {
1947         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1948                 return 1;
1949         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1950                 if (speed != SPEED_10)
1951                         return 1;
1952         } else if (speed == SPEED_10)
1953                 return 1;
1954
1955         return 0;
1956 }
1957
1958 static int tg3_setup_phy(struct tg3 *, int);
1959
1960 #define RESET_KIND_SHUTDOWN     0
1961 #define RESET_KIND_INIT         1
1962 #define RESET_KIND_SUSPEND      2
1963
1964 static void tg3_write_sig_post_reset(struct tg3 *, int);
1965 static int tg3_halt_cpu(struct tg3 *, u32);
1966 static int tg3_nvram_lock(struct tg3 *);
1967 static void tg3_nvram_unlock(struct tg3 *);
1968
1969 static void tg3_power_down_phy(struct tg3 *tp)
1970 {
1971         u32 val;
1972
1973         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1974                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1975                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1976                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1977
1978                         sg_dig_ctrl |=
1979                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1980                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1981                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1982                 }
1983                 return;
1984         }
1985
1986         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1987                 tg3_bmcr_reset(tp);
1988                 val = tr32(GRC_MISC_CFG);
1989                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1990                 udelay(40);
1991                 return;
1992         } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1993                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1994                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1995                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1996         }
1997
1998         /* The PHY should not be powered down on some chips because
1999          * of bugs.
2000          */
2001         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2002             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2003             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2004              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2005                 return;
2006
2007         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
2008                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2009                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2010                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2011                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2012         }
2013
2014         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2015 }
2016
2017 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2018 {
2019         u32 misc_host_ctrl;
2020
2021         /* Make sure register accesses (indirect or otherwise)
2022          * will function correctly.
2023          */
2024         pci_write_config_dword(tp->pdev,
2025                                TG3PCI_MISC_HOST_CTRL,
2026                                tp->misc_host_ctrl);
2027
2028         switch (state) {
2029         case PCI_D0:
2030                 pci_enable_wake(tp->pdev, state, false);
2031                 pci_set_power_state(tp->pdev, PCI_D0);
2032
2033                 /* Switch out of Vaux if it is a NIC */
2034                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2035                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2036
2037                 return 0;
2038
2039         case PCI_D1:
2040         case PCI_D2:
2041         case PCI_D3hot:
2042                 break;
2043
2044         default:
2045                 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2046                         tp->dev->name, state);
2047                 return -EINVAL;
2048         }
2049         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2050         tw32(TG3PCI_MISC_HOST_CTRL,
2051              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2052
2053         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2054                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2055                     !tp->link_config.phy_is_low_power) {
2056                         struct phy_device *phydev;
2057                         u32 advertising;
2058
2059                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2060
2061                         tp->link_config.phy_is_low_power = 1;
2062
2063                         tp->link_config.orig_speed = phydev->speed;
2064                         tp->link_config.orig_duplex = phydev->duplex;
2065                         tp->link_config.orig_autoneg = phydev->autoneg;
2066                         tp->link_config.orig_advertising = phydev->advertising;
2067
2068                         advertising = ADVERTISED_TP |
2069                                       ADVERTISED_Pause |
2070                                       ADVERTISED_Autoneg |
2071                                       ADVERTISED_10baseT_Half;
2072
2073                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2074                             (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2075                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2076                                         advertising |=
2077                                                 ADVERTISED_100baseT_Half |
2078                                                 ADVERTISED_100baseT_Full |
2079                                                 ADVERTISED_10baseT_Full;
2080                                 else
2081                                         advertising |= ADVERTISED_10baseT_Full;
2082                         }
2083
2084                         phydev->advertising = advertising;
2085
2086                         phy_start_aneg(phydev);
2087                 }
2088         } else {
2089                 if (tp->link_config.phy_is_low_power == 0) {
2090                         tp->link_config.phy_is_low_power = 1;
2091                         tp->link_config.orig_speed = tp->link_config.speed;
2092                         tp->link_config.orig_duplex = tp->link_config.duplex;
2093                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2094                 }
2095
2096                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2097                         tp->link_config.speed = SPEED_10;
2098                         tp->link_config.duplex = DUPLEX_HALF;
2099                         tp->link_config.autoneg = AUTONEG_ENABLE;
2100                         tg3_setup_phy(tp, 0);
2101                 }
2102         }
2103
2104         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2105                 u32 val;
2106
2107                 val = tr32(GRC_VCPU_EXT_CTRL);
2108                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2109         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2110                 int i;
2111                 u32 val;
2112
2113                 for (i = 0; i < 200; i++) {
2114                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2115                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2116                                 break;
2117                         msleep(1);
2118                 }
2119         }
2120         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2121                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2122                                                      WOL_DRV_STATE_SHUTDOWN |
2123                                                      WOL_DRV_WOL |
2124                                                      WOL_SET_MAGIC_PKT);
2125
2126         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
2127                 u32 mac_mode;
2128
2129                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2130                         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
2131                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2132                                 udelay(40);
2133                         }
2134
2135                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2136                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2137                         else
2138                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2139
2140                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2141                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2142                             ASIC_REV_5700) {
2143                                 u32 speed = (tp->tg3_flags &
2144                                              TG3_FLAG_WOL_SPEED_100MB) ?
2145                                              SPEED_100 : SPEED_10;
2146                                 if (tg3_5700_link_polarity(tp, speed))
2147                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2148                                 else
2149                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2150                         }
2151                 } else {
2152                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2153                 }
2154
2155                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2156                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2157
2158                 if (pci_pme_capable(tp->pdev, state) &&
2159                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))
2160                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2161
2162                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2163                         mac_mode |= tp->mac_mode &
2164                                     (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2165                         if (mac_mode & MAC_MODE_APE_TX_EN)
2166                                 mac_mode |= MAC_MODE_TDE_ENABLE;
2167                 }
2168
2169                 tw32_f(MAC_MODE, mac_mode);
2170                 udelay(100);
2171
2172                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2173                 udelay(10);
2174         }
2175
2176         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2177             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2178              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2179                 u32 base_val;
2180
2181                 base_val = tp->pci_clock_ctrl;
2182                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2183                              CLOCK_CTRL_TXCLK_DISABLE);
2184
2185                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2186                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2187         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2188                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2189                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2190                 /* do nothing */
2191         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2192                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2193                 u32 newbits1, newbits2;
2194
2195                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2196                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2197                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2198                                     CLOCK_CTRL_TXCLK_DISABLE |
2199                                     CLOCK_CTRL_ALTCLK);
2200                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2201                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2202                         newbits1 = CLOCK_CTRL_625_CORE;
2203                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2204                 } else {
2205                         newbits1 = CLOCK_CTRL_ALTCLK;
2206                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2207                 }
2208
2209                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2210                             40);
2211
2212                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2213                             40);
2214
2215                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2216                         u32 newbits3;
2217
2218                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2219                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2220                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2221                                             CLOCK_CTRL_TXCLK_DISABLE |
2222                                             CLOCK_CTRL_44MHZ_CORE);
2223                         } else {
2224                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2225                         }
2226
2227                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2228                                     tp->pci_clock_ctrl | newbits3, 40);
2229                 }
2230         }
2231
2232         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
2233             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2234             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
2235                 tg3_power_down_phy(tp);
2236
2237         tg3_frob_aux_power(tp);
2238
2239         /* Workaround for unstable PLL clock */
2240         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2241             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2242                 u32 val = tr32(0x7d00);
2243
2244                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2245                 tw32(0x7d00, val);
2246                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2247                         int err;
2248
2249                         err = tg3_nvram_lock(tp);
2250                         tg3_halt_cpu(tp, RX_CPU_BASE);
2251                         if (!err)
2252                                 tg3_nvram_unlock(tp);
2253                 }
2254         }
2255
2256         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2257
2258         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
2259                 pci_enable_wake(tp->pdev, state, true);
2260
2261         /* Finally, set the new power state. */
2262         pci_set_power_state(tp->pdev, state);
2263
2264         return 0;
2265 }
2266
2267 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2268 {
2269         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2270         case MII_TG3_AUX_STAT_10HALF:
2271                 *speed = SPEED_10;
2272                 *duplex = DUPLEX_HALF;
2273                 break;
2274
2275         case MII_TG3_AUX_STAT_10FULL:
2276                 *speed = SPEED_10;
2277                 *duplex = DUPLEX_FULL;
2278                 break;
2279
2280         case MII_TG3_AUX_STAT_100HALF:
2281                 *speed = SPEED_100;
2282                 *duplex = DUPLEX_HALF;
2283                 break;
2284
2285         case MII_TG3_AUX_STAT_100FULL:
2286                 *speed = SPEED_100;
2287                 *duplex = DUPLEX_FULL;
2288                 break;
2289
2290         case MII_TG3_AUX_STAT_1000HALF:
2291                 *speed = SPEED_1000;
2292                 *duplex = DUPLEX_HALF;
2293                 break;
2294
2295         case MII_TG3_AUX_STAT_1000FULL:
2296                 *speed = SPEED_1000;
2297                 *duplex = DUPLEX_FULL;
2298                 break;
2299
2300         default:
2301                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2302                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2303                                  SPEED_10;
2304                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2305                                   DUPLEX_HALF;
2306                         break;
2307                 }
2308                 *speed = SPEED_INVALID;
2309                 *duplex = DUPLEX_INVALID;
2310                 break;
2311         }
2312 }
2313
2314 static void tg3_phy_copper_begin(struct tg3 *tp)
2315 {
2316         u32 new_adv;
2317         int i;
2318
2319         if (tp->link_config.phy_is_low_power) {
2320                 /* Entering low power mode.  Disable gigabit and
2321                  * 100baseT advertisements.
2322                  */
2323                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2324
2325                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2326                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2327                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2328                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2329
2330                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2331         } else if (tp->link_config.speed == SPEED_INVALID) {
2332                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2333                         tp->link_config.advertising &=
2334                                 ~(ADVERTISED_1000baseT_Half |
2335                                   ADVERTISED_1000baseT_Full);
2336
2337                 new_adv = ADVERTISE_CSMA;
2338                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2339                         new_adv |= ADVERTISE_10HALF;
2340                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2341                         new_adv |= ADVERTISE_10FULL;
2342                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2343                         new_adv |= ADVERTISE_100HALF;
2344                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2345                         new_adv |= ADVERTISE_100FULL;
2346
2347                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2348
2349                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2350
2351                 if (tp->link_config.advertising &
2352                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2353                         new_adv = 0;
2354                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2355                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2356                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2357                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2358                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2359                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2360                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2361                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2362                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2363                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2364                 } else {
2365                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2366                 }
2367         } else {
2368                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2369                 new_adv |= ADVERTISE_CSMA;
2370
2371                 /* Asking for a specific link mode. */
2372                 if (tp->link_config.speed == SPEED_1000) {
2373                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2374
2375                         if (tp->link_config.duplex == DUPLEX_FULL)
2376                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2377                         else
2378                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2379                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2380                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2381                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2382                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2383                 } else {
2384                         if (tp->link_config.speed == SPEED_100) {
2385                                 if (tp->link_config.duplex == DUPLEX_FULL)
2386                                         new_adv |= ADVERTISE_100FULL;
2387                                 else
2388                                         new_adv |= ADVERTISE_100HALF;
2389                         } else {
2390                                 if (tp->link_config.duplex == DUPLEX_FULL)
2391                                         new_adv |= ADVERTISE_10FULL;
2392                                 else
2393                                         new_adv |= ADVERTISE_10HALF;
2394                         }
2395                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2396
2397                         new_adv = 0;
2398                 }
2399
2400                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2401         }
2402
2403         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2404             tp->link_config.speed != SPEED_INVALID) {
2405                 u32 bmcr, orig_bmcr;
2406
2407                 tp->link_config.active_speed = tp->link_config.speed;
2408                 tp->link_config.active_duplex = tp->link_config.duplex;
2409
2410                 bmcr = 0;
2411                 switch (tp->link_config.speed) {
2412                 default:
2413                 case SPEED_10:
2414                         break;
2415
2416                 case SPEED_100:
2417                         bmcr |= BMCR_SPEED100;
2418                         break;
2419
2420                 case SPEED_1000:
2421                         bmcr |= TG3_BMCR_SPEED1000;
2422                         break;
2423                 }
2424
2425                 if (tp->link_config.duplex == DUPLEX_FULL)
2426                         bmcr |= BMCR_FULLDPLX;
2427
2428                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2429                     (bmcr != orig_bmcr)) {
2430                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2431                         for (i = 0; i < 1500; i++) {
2432                                 u32 tmp;
2433
2434                                 udelay(10);
2435                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2436                                     tg3_readphy(tp, MII_BMSR, &tmp))
2437                                         continue;
2438                                 if (!(tmp & BMSR_LSTATUS)) {
2439                                         udelay(40);
2440                                         break;
2441                                 }
2442                         }
2443                         tg3_writephy(tp, MII_BMCR, bmcr);
2444                         udelay(40);
2445                 }
2446         } else {
2447                 tg3_writephy(tp, MII_BMCR,
2448                              BMCR_ANENABLE | BMCR_ANRESTART);
2449         }
2450 }
2451
2452 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2453 {
2454         int err;
2455
2456         /* Turn off tap power management. */
2457         /* Set Extended packet length bit */
2458         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2459
2460         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2461         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2462
2463         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2464         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2465
2466         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2467         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2468
2469         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2470         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2471
2472         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2473         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2474
2475         udelay(40);
2476
2477         return err;
2478 }
2479
2480 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2481 {
2482         u32 adv_reg, all_mask = 0;
2483
2484         if (mask & ADVERTISED_10baseT_Half)
2485                 all_mask |= ADVERTISE_10HALF;
2486         if (mask & ADVERTISED_10baseT_Full)
2487                 all_mask |= ADVERTISE_10FULL;
2488         if (mask & ADVERTISED_100baseT_Half)
2489                 all_mask |= ADVERTISE_100HALF;
2490         if (mask & ADVERTISED_100baseT_Full)
2491                 all_mask |= ADVERTISE_100FULL;
2492
2493         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2494                 return 0;
2495
2496         if ((adv_reg & all_mask) != all_mask)
2497                 return 0;
2498         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2499                 u32 tg3_ctrl;
2500
2501                 all_mask = 0;
2502                 if (mask & ADVERTISED_1000baseT_Half)
2503                         all_mask |= ADVERTISE_1000HALF;
2504                 if (mask & ADVERTISED_1000baseT_Full)
2505                         all_mask |= ADVERTISE_1000FULL;
2506
2507                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2508                         return 0;
2509
2510                 if ((tg3_ctrl & all_mask) != all_mask)
2511                         return 0;
2512         }
2513         return 1;
2514 }
2515
2516 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2517 {
2518         u32 curadv, reqadv;
2519
2520         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2521                 return 1;
2522
2523         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2524         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2525
2526         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2527                 if (curadv != reqadv)
2528                         return 0;
2529
2530                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2531                         tg3_readphy(tp, MII_LPA, rmtadv);
2532         } else {
2533                 /* Reprogram the advertisement register, even if it
2534                  * does not affect the current link.  If the link
2535                  * gets renegotiated in the future, we can save an
2536                  * additional renegotiation cycle by advertising
2537                  * it correctly in the first place.
2538                  */
2539                 if (curadv != reqadv) {
2540                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2541                                      ADVERTISE_PAUSE_ASYM);
2542                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2543                 }
2544         }
2545
2546         return 1;
2547 }
2548
2549 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2550 {
2551         int current_link_up;
2552         u32 bmsr, dummy;
2553         u32 lcl_adv, rmt_adv;
2554         u16 current_speed;
2555         u8 current_duplex;
2556         int i, err;
2557
2558         tw32(MAC_EVENT, 0);
2559
2560         tw32_f(MAC_STATUS,
2561              (MAC_STATUS_SYNC_CHANGED |
2562               MAC_STATUS_CFG_CHANGED |
2563               MAC_STATUS_MI_COMPLETION |
2564               MAC_STATUS_LNKSTATE_CHANGED));
2565         udelay(40);
2566
2567         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2568                 tw32_f(MAC_MI_MODE,
2569                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2570                 udelay(80);
2571         }
2572
2573         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2574
2575         /* Some third-party PHYs need to be reset on link going
2576          * down.
2577          */
2578         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2579              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2580              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2581             netif_carrier_ok(tp->dev)) {
2582                 tg3_readphy(tp, MII_BMSR, &bmsr);
2583                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2584                     !(bmsr & BMSR_LSTATUS))
2585                         force_reset = 1;
2586         }
2587         if (force_reset)
2588                 tg3_phy_reset(tp);
2589
2590         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2591                 tg3_readphy(tp, MII_BMSR, &bmsr);
2592                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2593                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2594                         bmsr = 0;
2595
2596                 if (!(bmsr & BMSR_LSTATUS)) {
2597                         err = tg3_init_5401phy_dsp(tp);
2598                         if (err)
2599                                 return err;
2600
2601                         tg3_readphy(tp, MII_BMSR, &bmsr);
2602                         for (i = 0; i < 1000; i++) {
2603                                 udelay(10);
2604                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2605                                     (bmsr & BMSR_LSTATUS)) {
2606                                         udelay(40);
2607                                         break;
2608                                 }
2609                         }
2610
2611                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2612                             !(bmsr & BMSR_LSTATUS) &&
2613                             tp->link_config.active_speed == SPEED_1000) {
2614                                 err = tg3_phy_reset(tp);
2615                                 if (!err)
2616                                         err = tg3_init_5401phy_dsp(tp);
2617                                 if (err)
2618                                         return err;
2619                         }
2620                 }
2621         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2622                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2623                 /* 5701 {A0,B0} CRC bug workaround */
2624                 tg3_writephy(tp, 0x15, 0x0a75);
2625                 tg3_writephy(tp, 0x1c, 0x8c68);
2626                 tg3_writephy(tp, 0x1c, 0x8d68);
2627                 tg3_writephy(tp, 0x1c, 0x8c68);
2628         }
2629
2630         /* Clear pending interrupts... */
2631         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2632         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2633
2634         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2635                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2636         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2637                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2638
2639         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2640             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2641                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2642                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2643                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2644                 else
2645                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2646         }
2647
2648         current_link_up = 0;
2649         current_speed = SPEED_INVALID;
2650         current_duplex = DUPLEX_INVALID;
2651
2652         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2653                 u32 val;
2654
2655                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2656                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2657                 if (!(val & (1 << 10))) {
2658                         val |= (1 << 10);
2659                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2660                         goto relink;
2661                 }
2662         }
2663
2664         bmsr = 0;
2665         for (i = 0; i < 100; i++) {
2666                 tg3_readphy(tp, MII_BMSR, &bmsr);
2667                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2668                     (bmsr & BMSR_LSTATUS))
2669                         break;
2670                 udelay(40);
2671         }
2672
2673         if (bmsr & BMSR_LSTATUS) {
2674                 u32 aux_stat, bmcr;
2675
2676                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2677                 for (i = 0; i < 2000; i++) {
2678                         udelay(10);
2679                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2680                             aux_stat)
2681                                 break;
2682                 }
2683
2684                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2685                                              &current_speed,
2686                                              &current_duplex);
2687
2688                 bmcr = 0;
2689                 for (i = 0; i < 200; i++) {
2690                         tg3_readphy(tp, MII_BMCR, &bmcr);
2691                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2692                                 continue;
2693                         if (bmcr && bmcr != 0x7fff)
2694                                 break;
2695                         udelay(10);
2696                 }
2697
2698                 lcl_adv = 0;
2699                 rmt_adv = 0;
2700
2701                 tp->link_config.active_speed = current_speed;
2702                 tp->link_config.active_duplex = current_duplex;
2703
2704                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2705                         if ((bmcr & BMCR_ANENABLE) &&
2706                             tg3_copper_is_advertising_all(tp,
2707                                                 tp->link_config.advertising)) {
2708                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2709                                                                   &rmt_adv))
2710                                         current_link_up = 1;
2711                         }
2712                 } else {
2713                         if (!(bmcr & BMCR_ANENABLE) &&
2714                             tp->link_config.speed == current_speed &&
2715                             tp->link_config.duplex == current_duplex &&
2716                             tp->link_config.flowctrl ==
2717                             tp->link_config.active_flowctrl) {
2718                                 current_link_up = 1;
2719                         }
2720                 }
2721
2722                 if (current_link_up == 1 &&
2723                     tp->link_config.active_duplex == DUPLEX_FULL)
2724                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2725         }
2726
2727 relink:
2728         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2729                 u32 tmp;
2730
2731                 tg3_phy_copper_begin(tp);
2732
2733                 tg3_readphy(tp, MII_BMSR, &tmp);
2734                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2735                     (tmp & BMSR_LSTATUS))
2736                         current_link_up = 1;
2737         }
2738
2739         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2740         if (current_link_up == 1) {
2741                 if (tp->link_config.active_speed == SPEED_100 ||
2742                     tp->link_config.active_speed == SPEED_10)
2743                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2744                 else
2745                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2746         } else
2747                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2748
2749         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2750         if (tp->link_config.active_duplex == DUPLEX_HALF)
2751                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2752
2753         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2754                 if (current_link_up == 1 &&
2755                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2756                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2757                 else
2758                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2759         }
2760
2761         /* ??? Without this setting Netgear GA302T PHY does not
2762          * ??? send/receive packets...
2763          */
2764         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2765             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2766                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2767                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2768                 udelay(80);
2769         }
2770
2771         tw32_f(MAC_MODE, tp->mac_mode);
2772         udelay(40);
2773
2774         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2775                 /* Polled via timer. */
2776                 tw32_f(MAC_EVENT, 0);
2777         } else {
2778                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2779         }
2780         udelay(40);
2781
2782         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2783             current_link_up == 1 &&
2784             tp->link_config.active_speed == SPEED_1000 &&
2785             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2786              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2787                 udelay(120);
2788                 tw32_f(MAC_STATUS,
2789                      (MAC_STATUS_SYNC_CHANGED |
2790                       MAC_STATUS_CFG_CHANGED));
2791                 udelay(40);
2792                 tg3_write_mem(tp,
2793                               NIC_SRAM_FIRMWARE_MBOX,
2794                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2795         }
2796
2797         if (current_link_up != netif_carrier_ok(tp->dev)) {
2798                 if (current_link_up)
2799                         netif_carrier_on(tp->dev);
2800                 else
2801                         netif_carrier_off(tp->dev);
2802                 tg3_link_report(tp);
2803         }
2804
2805         return 0;
2806 }
2807
2808 struct tg3_fiber_aneginfo {
2809         int state;
2810 #define ANEG_STATE_UNKNOWN              0
2811 #define ANEG_STATE_AN_ENABLE            1
2812 #define ANEG_STATE_RESTART_INIT         2
2813 #define ANEG_STATE_RESTART              3
2814 #define ANEG_STATE_DISABLE_LINK_OK      4
2815 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2816 #define ANEG_STATE_ABILITY_DETECT       6
2817 #define ANEG_STATE_ACK_DETECT_INIT      7
2818 #define ANEG_STATE_ACK_DETECT           8
2819 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2820 #define ANEG_STATE_COMPLETE_ACK         10
2821 #define ANEG_STATE_IDLE_DETECT_INIT     11
2822 #define ANEG_STATE_IDLE_DETECT          12
2823 #define ANEG_STATE_LINK_OK              13
2824 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2825 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2826
2827         u32 flags;
2828 #define MR_AN_ENABLE            0x00000001
2829 #define MR_RESTART_AN           0x00000002
2830 #define MR_AN_COMPLETE          0x00000004
2831 #define MR_PAGE_RX              0x00000008
2832 #define MR_NP_LOADED            0x00000010
2833 #define MR_TOGGLE_TX            0x00000020
2834 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2835 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2836 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2837 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2838 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2839 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2840 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2841 #define MR_TOGGLE_RX            0x00002000
2842 #define MR_NP_RX                0x00004000
2843
2844 #define MR_LINK_OK              0x80000000
2845
2846         unsigned long link_time, cur_time;
2847
2848         u32 ability_match_cfg;
2849         int ability_match_count;
2850
2851         char ability_match, idle_match, ack_match;
2852
2853         u32 txconfig, rxconfig;
2854 #define ANEG_CFG_NP             0x00000080
2855 #define ANEG_CFG_ACK            0x00000040
2856 #define ANEG_CFG_RF2            0x00000020
2857 #define ANEG_CFG_RF1            0x00000010
2858 #define ANEG_CFG_PS2            0x00000001
2859 #define ANEG_CFG_PS1            0x00008000
2860 #define ANEG_CFG_HD             0x00004000
2861 #define ANEG_CFG_FD             0x00002000
2862 #define ANEG_CFG_INVAL          0x00001f06
2863
2864 };
2865 #define ANEG_OK         0
2866 #define ANEG_DONE       1
2867 #define ANEG_TIMER_ENAB 2
2868 #define ANEG_FAILED     -1
2869
2870 #define ANEG_STATE_SETTLE_TIME  10000
2871
2872 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2873                                    struct tg3_fiber_aneginfo *ap)
2874 {
2875         u16 flowctrl;
2876         unsigned long delta;
2877         u32 rx_cfg_reg;
2878         int ret;
2879
2880         if (ap->state == ANEG_STATE_UNKNOWN) {
2881                 ap->rxconfig = 0;
2882                 ap->link_time = 0;
2883                 ap->cur_time = 0;
2884                 ap->ability_match_cfg = 0;
2885                 ap->ability_match_count = 0;
2886                 ap->ability_match = 0;
2887                 ap->idle_match = 0;
2888                 ap->ack_match = 0;
2889         }
2890         ap->cur_time++;
2891
2892         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2893                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2894
2895                 if (rx_cfg_reg != ap->ability_match_cfg) {
2896                         ap->ability_match_cfg = rx_cfg_reg;
2897                         ap->ability_match = 0;
2898                         ap->ability_match_count = 0;
2899                 } else {
2900                         if (++ap->ability_match_count > 1) {
2901                                 ap->ability_match = 1;
2902                                 ap->ability_match_cfg = rx_cfg_reg;
2903                         }
2904                 }
2905                 if (rx_cfg_reg & ANEG_CFG_ACK)
2906                         ap->ack_match = 1;
2907                 else
2908                         ap->ack_match = 0;
2909
2910                 ap->idle_match = 0;
2911         } else {
2912                 ap->idle_match = 1;
2913                 ap->ability_match_cfg = 0;
2914                 ap->ability_match_count = 0;
2915                 ap->ability_match = 0;
2916                 ap->ack_match = 0;
2917
2918                 rx_cfg_reg = 0;
2919         }
2920
2921         ap->rxconfig = rx_cfg_reg;
2922         ret = ANEG_OK;
2923
2924         switch(ap->state) {
2925         case ANEG_STATE_UNKNOWN:
2926                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2927                         ap->state = ANEG_STATE_AN_ENABLE;
2928
2929                 /* fallthru */
2930         case ANEG_STATE_AN_ENABLE:
2931                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2932                 if (ap->flags & MR_AN_ENABLE) {
2933                         ap->link_time = 0;
2934                         ap->cur_time = 0;
2935                         ap->ability_match_cfg = 0;
2936                         ap->ability_match_count = 0;
2937                         ap->ability_match = 0;
2938                         ap->idle_match = 0;
2939                         ap->ack_match = 0;
2940
2941                         ap->state = ANEG_STATE_RESTART_INIT;
2942                 } else {
2943                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2944                 }
2945                 break;
2946
2947         case ANEG_STATE_RESTART_INIT:
2948                 ap->link_time = ap->cur_time;
2949                 ap->flags &= ~(MR_NP_LOADED);
2950                 ap->txconfig = 0;
2951                 tw32(MAC_TX_AUTO_NEG, 0);
2952                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2953                 tw32_f(MAC_MODE, tp->mac_mode);
2954                 udelay(40);
2955
2956                 ret = ANEG_TIMER_ENAB;
2957                 ap->state = ANEG_STATE_RESTART;
2958
2959                 /* fallthru */
2960         case ANEG_STATE_RESTART:
2961                 delta = ap->cur_time - ap->link_time;
2962                 if (delta > ANEG_STATE_SETTLE_TIME) {
2963                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2964                 } else {
2965                         ret = ANEG_TIMER_ENAB;
2966                 }
2967                 break;
2968
2969         case ANEG_STATE_DISABLE_LINK_OK:
2970                 ret = ANEG_DONE;
2971                 break;
2972
2973         case ANEG_STATE_ABILITY_DETECT_INIT:
2974                 ap->flags &= ~(MR_TOGGLE_TX);
2975                 ap->txconfig = ANEG_CFG_FD;
2976                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2977                 if (flowctrl & ADVERTISE_1000XPAUSE)
2978                         ap->txconfig |= ANEG_CFG_PS1;
2979                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2980                         ap->txconfig |= ANEG_CFG_PS2;
2981                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2982                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2983                 tw32_f(MAC_MODE, tp->mac_mode);
2984                 udelay(40);
2985
2986                 ap->state = ANEG_STATE_ABILITY_DETECT;
2987                 break;
2988
2989         case ANEG_STATE_ABILITY_DETECT:
2990                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2991                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2992                 }
2993                 break;
2994
2995         case ANEG_STATE_ACK_DETECT_INIT:
2996                 ap->txconfig |= ANEG_CFG_ACK;
2997                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2998                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2999                 tw32_f(MAC_MODE, tp->mac_mode);
3000                 udelay(40);
3001
3002                 ap->state = ANEG_STATE_ACK_DETECT;
3003
3004                 /* fallthru */
3005         case ANEG_STATE_ACK_DETECT:
3006                 if (ap->ack_match != 0) {
3007                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3008                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3009                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3010                         } else {
3011                                 ap->state = ANEG_STATE_AN_ENABLE;
3012                         }
3013                 } else if (ap->ability_match != 0 &&
3014                            ap->rxconfig == 0) {
3015                         ap->state = ANEG_STATE_AN_ENABLE;
3016                 }
3017                 break;
3018
3019         case ANEG_STATE_COMPLETE_ACK_INIT:
3020                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3021                         ret = ANEG_FAILED;
3022                         break;
3023                 }
3024                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3025                                MR_LP_ADV_HALF_DUPLEX |
3026                                MR_LP_ADV_SYM_PAUSE |
3027                                MR_LP_ADV_ASYM_PAUSE |
3028                                MR_LP_ADV_REMOTE_FAULT1 |
3029                                MR_LP_ADV_REMOTE_FAULT2 |
3030                                MR_LP_ADV_NEXT_PAGE |
3031                                MR_TOGGLE_RX |
3032                                MR_NP_RX);
3033                 if (ap->rxconfig & ANEG_CFG_FD)
3034                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3035                 if (ap->rxconfig & ANEG_CFG_HD)
3036                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3037                 if (ap->rxconfig & ANEG_CFG_PS1)
3038                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3039                 if (ap->rxconfig & ANEG_CFG_PS2)
3040                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3041                 if (ap->rxconfig & ANEG_CFG_RF1)
3042                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3043                 if (ap->rxconfig & ANEG_CFG_RF2)
3044                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3045                 if (ap->rxconfig & ANEG_CFG_NP)
3046                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3047
3048                 ap->link_time = ap->cur_time;
3049
3050                 ap->flags ^= (MR_TOGGLE_TX);
3051                 if (ap->rxconfig & 0x0008)
3052                         ap->flags |= MR_TOGGLE_RX;
3053                 if (ap->rxconfig & ANEG_CFG_NP)
3054                         ap->flags |= MR_NP_RX;
3055                 ap->flags |= MR_PAGE_RX;
3056
3057                 ap->state = ANEG_STATE_COMPLETE_ACK;
3058                 ret = ANEG_TIMER_ENAB;
3059                 break;
3060
3061         case ANEG_STATE_COMPLETE_ACK:
3062                 if (ap->ability_match != 0 &&
3063                     ap->rxconfig == 0) {
3064                         ap->state = ANEG_STATE_AN_ENABLE;
3065                         break;
3066                 }
3067                 delta = ap->cur_time - ap->link_time;
3068                 if (delta > ANEG_STATE_SETTLE_TIME) {
3069                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3070                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3071                         } else {
3072                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3073                                     !(ap->flags & MR_NP_RX)) {
3074                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3075                                 } else {
3076                                         ret = ANEG_FAILED;
3077                                 }
3078                         }
3079                 }
3080                 break;
3081
3082         case ANEG_STATE_IDLE_DETECT_INIT:
3083                 ap->link_time = ap->cur_time;
3084                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3085                 tw32_f(MAC_MODE, tp->mac_mode);
3086                 udelay(40);
3087
3088                 ap->state = ANEG_STATE_IDLE_DETECT;
3089                 ret = ANEG_TIMER_ENAB;
3090                 break;
3091
3092         case ANEG_STATE_IDLE_DETECT:
3093                 if (ap->ability_match != 0 &&
3094                     ap->rxconfig == 0) {
3095                         ap->state = ANEG_STATE_AN_ENABLE;
3096                         break;
3097                 }
3098                 delta = ap->cur_time - ap->link_time;
3099                 if (delta > ANEG_STATE_SETTLE_TIME) {
3100                         /* XXX another gem from the Broadcom driver :( */
3101                         ap->state = ANEG_STATE_LINK_OK;
3102                 }
3103                 break;
3104
3105         case ANEG_STATE_LINK_OK:
3106                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3107                 ret = ANEG_DONE;
3108                 break;
3109
3110         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3111                 /* ??? unimplemented */
3112                 break;
3113
3114         case ANEG_STATE_NEXT_PAGE_WAIT:
3115                 /* ??? unimplemented */
3116                 break;
3117
3118         default:
3119                 ret = ANEG_FAILED;
3120                 break;
3121         }
3122
3123         return ret;
3124 }
3125
3126 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3127 {
3128         int res = 0;
3129         struct tg3_fiber_aneginfo aninfo;
3130         int status = ANEG_FAILED;
3131         unsigned int tick;
3132         u32 tmp;
3133
3134         tw32_f(MAC_TX_AUTO_NEG, 0);
3135
3136         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3137         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3138         udelay(40);
3139
3140         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3141         udelay(40);
3142
3143         memset(&aninfo, 0, sizeof(aninfo));
3144         aninfo.flags |= MR_AN_ENABLE;
3145         aninfo.state = ANEG_STATE_UNKNOWN;
3146         aninfo.cur_time = 0;
3147         tick = 0;
3148         while (++tick < 195000) {
3149                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3150                 if (status == ANEG_DONE || status == ANEG_FAILED)
3151                         break;
3152
3153                 udelay(1);
3154         }
3155
3156         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3157         tw32_f(MAC_MODE, tp->mac_mode);
3158         udelay(40);
3159
3160         *txflags = aninfo.txconfig;
3161         *rxflags = aninfo.flags;
3162
3163         if (status == ANEG_DONE &&
3164             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3165                              MR_LP_ADV_FULL_DUPLEX)))
3166                 res = 1;
3167
3168         return res;
3169 }
3170
3171 static void tg3_init_bcm8002(struct tg3 *tp)
3172 {
3173         u32 mac_status = tr32(MAC_STATUS);
3174         int i;
3175
3176         /* Reset when initting first time or we have a link. */
3177         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3178             !(mac_status & MAC_STATUS_PCS_SYNCED))
3179                 return;
3180
3181         /* Set PLL lock range. */
3182         tg3_writephy(tp, 0x16, 0x8007);
3183
3184         /* SW reset */
3185         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3186
3187         /* Wait for reset to complete. */
3188         /* XXX schedule_timeout() ... */
3189         for (i = 0; i < 500; i++)
3190                 udelay(10);
3191
3192         /* Config mode; select PMA/Ch 1 regs. */
3193         tg3_writephy(tp, 0x10, 0x8411);
3194
3195         /* Enable auto-lock and comdet, select txclk for tx. */
3196         tg3_writephy(tp, 0x11, 0x0a10);
3197
3198         tg3_writephy(tp, 0x18, 0x00a0);
3199         tg3_writephy(tp, 0x16, 0x41ff);
3200
3201         /* Assert and deassert POR. */
3202         tg3_writephy(tp, 0x13, 0x0400);
3203         udelay(40);
3204         tg3_writephy(tp, 0x13, 0x0000);
3205
3206         tg3_writephy(tp, 0x11, 0x0a50);
3207         udelay(40);
3208         tg3_writephy(tp, 0x11, 0x0a10);
3209
3210         /* Wait for signal to stabilize */
3211         /* XXX schedule_timeout() ... */
3212         for (i = 0; i < 15000; i++)
3213                 udelay(10);
3214
3215         /* Deselect the channel register so we can read the PHYID
3216          * later.
3217          */
3218         tg3_writephy(tp, 0x10, 0x8011);
3219 }
3220
3221 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3222 {
3223         u16 flowctrl;
3224         u32 sg_dig_ctrl, sg_dig_status;
3225         u32 serdes_cfg, expected_sg_dig_ctrl;
3226         int workaround, port_a;
3227         int current_link_up;
3228
3229         serdes_cfg = 0;
3230         expected_sg_dig_ctrl = 0;
3231         workaround = 0;
3232         port_a = 1;
3233         current_link_up = 0;
3234
3235         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3236             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3237                 workaround = 1;
3238                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3239                         port_a = 0;
3240
3241                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3242                 /* preserve bits 20-23 for voltage regulator */
3243                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3244         }
3245
3246         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3247
3248         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3249                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3250                         if (workaround) {
3251                                 u32 val = serdes_cfg;
3252
3253                                 if (port_a)
3254                                         val |= 0xc010000;
3255                                 else
3256                                         val |= 0x4010000;
3257                                 tw32_f(MAC_SERDES_CFG, val);
3258                         }
3259
3260                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3261                 }
3262                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3263                         tg3_setup_flow_control(tp, 0, 0);
3264                         current_link_up = 1;
3265                 }
3266                 goto out;
3267         }
3268
3269         /* Want auto-negotiation.  */
3270         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3271
3272         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3273         if (flowctrl & ADVERTISE_1000XPAUSE)
3274                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3275         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3276                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3277
3278         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3279                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3280                     tp->serdes_counter &&
3281                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3282                                     MAC_STATUS_RCVD_CFG)) ==
3283                      MAC_STATUS_PCS_SYNCED)) {
3284                         tp->serdes_counter--;
3285                         current_link_up = 1;
3286                         goto out;
3287                 }
3288 restart_autoneg:
3289                 if (workaround)
3290                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3291                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3292                 udelay(5);
3293                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3294
3295                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3296                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3297         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3298                                  MAC_STATUS_SIGNAL_DET)) {
3299                 sg_dig_status = tr32(SG_DIG_STATUS);
3300                 mac_status = tr32(MAC_STATUS);
3301
3302                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3303                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3304                         u32 local_adv = 0, remote_adv = 0;
3305
3306                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3307                                 local_adv |= ADVERTISE_1000XPAUSE;
3308                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3309                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3310
3311                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3312                                 remote_adv |= LPA_1000XPAUSE;
3313                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3314                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3315
3316                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3317                         current_link_up = 1;
3318                         tp->serdes_counter = 0;
3319                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3320                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3321                         if (tp->serdes_counter)
3322                                 tp->serdes_counter--;
3323                         else {
3324                                 if (workaround) {
3325                                         u32 val = serdes_cfg;
3326
3327                                         if (port_a)
3328                                                 val |= 0xc010000;
3329                                         else
3330                                                 val |= 0x4010000;
3331
3332                                         tw32_f(MAC_SERDES_CFG, val);
3333                                 }
3334
3335                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3336                                 udelay(40);
3337
3338                                 /* Link parallel detection - link is up */
3339                                 /* only if we have PCS_SYNC and not */
3340                                 /* receiving config code words */
3341                                 mac_status = tr32(MAC_STATUS);
3342                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3343                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3344                                         tg3_setup_flow_control(tp, 0, 0);
3345                                         current_link_up = 1;
3346                                         tp->tg3_flags2 |=
3347                                                 TG3_FLG2_PARALLEL_DETECT;
3348                                         tp->serdes_counter =
3349                                                 SERDES_PARALLEL_DET_TIMEOUT;
3350                                 } else
3351                                         goto restart_autoneg;
3352                         }
3353                 }
3354         } else {
3355                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3356                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3357         }
3358
3359 out:
3360         return current_link_up;
3361 }
3362
3363 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3364 {
3365         int current_link_up = 0;
3366
3367         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3368                 goto out;
3369
3370         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3371                 u32 txflags, rxflags;
3372                 int i;
3373
3374                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3375                         u32 local_adv = 0, remote_adv = 0;
3376
3377                         if (txflags & ANEG_CFG_PS1)
3378                                 local_adv |= ADVERTISE_1000XPAUSE;
3379                         if (txflags & ANEG_CFG_PS2)
3380                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3381
3382                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3383                                 remote_adv |= LPA_1000XPAUSE;
3384                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3385                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3386
3387                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3388
3389                         current_link_up = 1;
3390                 }
3391                 for (i = 0; i < 30; i++) {
3392                         udelay(20);
3393                         tw32_f(MAC_STATUS,
3394                                (MAC_STATUS_SYNC_CHANGED |
3395                                 MAC_STATUS_CFG_CHANGED));
3396                         udelay(40);
3397                         if ((tr32(MAC_STATUS) &
3398                              (MAC_STATUS_SYNC_CHANGED |
3399                               MAC_STATUS_CFG_CHANGED)) == 0)
3400                                 break;
3401                 }
3402
3403                 mac_status = tr32(MAC_STATUS);
3404                 if (current_link_up == 0 &&
3405                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3406                     !(mac_status & MAC_STATUS_RCVD_CFG))
3407                         current_link_up = 1;
3408         } else {
3409                 tg3_setup_flow_control(tp, 0, 0);
3410
3411                 /* Forcing 1000FD link up. */
3412                 current_link_up = 1;
3413
3414                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3415                 udelay(40);
3416
3417                 tw32_f(MAC_MODE, tp->mac_mode);
3418                 udelay(40);
3419         }
3420
3421 out:
3422         return current_link_up;
3423 }
3424
3425 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3426 {
3427         u32 orig_pause_cfg;
3428         u16 orig_active_speed;
3429         u8 orig_active_duplex;
3430         u32 mac_status;
3431         int current_link_up;
3432         int i;
3433
3434         orig_pause_cfg = tp->link_config.active_flowctrl;
3435         orig_active_speed = tp->link_config.active_speed;
3436         orig_active_duplex = tp->link_config.active_duplex;
3437
3438         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3439             netif_carrier_ok(tp->dev) &&
3440             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3441                 mac_status = tr32(MAC_STATUS);
3442                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3443                                MAC_STATUS_SIGNAL_DET |
3444                                MAC_STATUS_CFG_CHANGED |
3445                                MAC_STATUS_RCVD_CFG);
3446                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3447                                    MAC_STATUS_SIGNAL_DET)) {
3448                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3449                                             MAC_STATUS_CFG_CHANGED));
3450                         return 0;
3451                 }
3452         }
3453
3454         tw32_f(MAC_TX_AUTO_NEG, 0);
3455
3456         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3457         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3458         tw32_f(MAC_MODE, tp->mac_mode);
3459         udelay(40);
3460
3461         if (tp->phy_id == PHY_ID_BCM8002)
3462                 tg3_init_bcm8002(tp);
3463
3464         /* Enable link change event even when serdes polling.  */
3465         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3466         udelay(40);
3467
3468         current_link_up = 0;
3469         mac_status = tr32(MAC_STATUS);
3470
3471         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3472                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3473         else
3474                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3475
3476         tp->hw_status->status =
3477                 (SD_STATUS_UPDATED |
3478                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3479
3480         for (i = 0; i < 100; i++) {
3481                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3482                                     MAC_STATUS_CFG_CHANGED));
3483                 udelay(5);
3484                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3485                                          MAC_STATUS_CFG_CHANGED |
3486                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3487                         break;
3488         }
3489
3490         mac_status = tr32(MAC_STATUS);
3491         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3492                 current_link_up = 0;
3493                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3494                     tp->serdes_counter == 0) {
3495                         tw32_f(MAC_MODE, (tp->mac_mode |
3496                                           MAC_MODE_SEND_CONFIGS));
3497                         udelay(1);
3498                         tw32_f(MAC_MODE, tp->mac_mode);
3499                 }
3500         }
3501
3502         if (current_link_up == 1) {
3503                 tp->link_config.active_speed = SPEED_1000;
3504                 tp->link_config.active_duplex = DUPLEX_FULL;
3505                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3506                                     LED_CTRL_LNKLED_OVERRIDE |
3507                                     LED_CTRL_1000MBPS_ON));
3508         } else {
3509                 tp->link_config.active_speed = SPEED_INVALID;
3510                 tp->link_config.active_duplex = DUPLEX_INVALID;
3511                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3512                                     LED_CTRL_LNKLED_OVERRIDE |
3513                                     LED_CTRL_TRAFFIC_OVERRIDE));
3514         }
3515
3516         if (current_link_up != netif_carrier_ok(tp->dev)) {
3517                 if (current_link_up)
3518                         netif_carrier_on(tp->dev);
3519                 else
3520                         netif_carrier_off(tp->dev);
3521                 tg3_link_report(tp);
3522         } else {
3523                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3524                 if (orig_pause_cfg != now_pause_cfg ||
3525                     orig_active_speed != tp->link_config.active_speed ||
3526                     orig_active_duplex != tp->link_config.active_duplex)
3527                         tg3_link_report(tp);
3528         }
3529
3530         return 0;
3531 }
3532
3533 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3534 {
3535         int current_link_up, err = 0;
3536         u32 bmsr, bmcr;
3537         u16 current_speed;
3538         u8 current_duplex;
3539         u32 local_adv, remote_adv;
3540
3541         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3542         tw32_f(MAC_MODE, tp->mac_mode);
3543         udelay(40);
3544
3545         tw32(MAC_EVENT, 0);
3546
3547         tw32_f(MAC_STATUS,
3548              (MAC_STATUS_SYNC_CHANGED |
3549               MAC_STATUS_CFG_CHANGED |
3550               MAC_STATUS_MI_COMPLETION |
3551               MAC_STATUS_LNKSTATE_CHANGED));
3552         udelay(40);
3553
3554         if (force_reset)
3555                 tg3_phy_reset(tp);
3556
3557         current_link_up = 0;
3558         current_speed = SPEED_INVALID;
3559         current_duplex = DUPLEX_INVALID;
3560
3561         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3562         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3563         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3564                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3565                         bmsr |= BMSR_LSTATUS;
3566                 else
3567                         bmsr &= ~BMSR_LSTATUS;
3568         }
3569
3570         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3571
3572         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3573             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3574                 /* do nothing, just check for link up at the end */
3575         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3576                 u32 adv, new_adv;
3577
3578                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3579                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3580                                   ADVERTISE_1000XPAUSE |
3581                                   ADVERTISE_1000XPSE_ASYM |
3582                                   ADVERTISE_SLCT);
3583
3584                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3585
3586                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3587                         new_adv |= ADVERTISE_1000XHALF;
3588                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3589                         new_adv |= ADVERTISE_1000XFULL;
3590
3591                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3592                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3593                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3594                         tg3_writephy(tp, MII_BMCR, bmcr);
3595
3596                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3597                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3598                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3599
3600                         return err;
3601                 }
3602         } else {
3603                 u32 new_bmcr;
3604
3605                 bmcr &= ~BMCR_SPEED1000;
3606                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3607
3608                 if (tp->link_config.duplex == DUPLEX_FULL)
3609                         new_bmcr |= BMCR_FULLDPLX;
3610
3611                 if (new_bmcr != bmcr) {
3612                         /* BMCR_SPEED1000 is a reserved bit that needs
3613                          * to be set on write.
3614                          */
3615                         new_bmcr |= BMCR_SPEED1000;
3616
3617                         /* Force a linkdown */
3618                         if (netif_carrier_ok(tp->dev)) {
3619                                 u32 adv;
3620
3621                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3622                                 adv &= ~(ADVERTISE_1000XFULL |
3623                                          ADVERTISE_1000XHALF |
3624                                          ADVERTISE_SLCT);
3625                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3626                                 tg3_writephy(tp, MII_BMCR, bmcr |
3627                                                            BMCR_ANRESTART |
3628                                                            BMCR_ANENABLE);
3629                                 udelay(10);
3630                                 netif_carrier_off(tp->dev);
3631                         }
3632                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3633                         bmcr = new_bmcr;
3634                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3635                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3636                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3637                             ASIC_REV_5714) {
3638                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3639                                         bmsr |= BMSR_LSTATUS;
3640                                 else
3641                                         bmsr &= ~BMSR_LSTATUS;
3642                         }
3643                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3644                 }
3645         }
3646
3647         if (bmsr & BMSR_LSTATUS) {
3648                 current_speed = SPEED_1000;
3649                 current_link_up = 1;
3650                 if (bmcr & BMCR_FULLDPLX)
3651                         current_duplex = DUPLEX_FULL;
3652                 else
3653                         current_duplex = DUPLEX_HALF;
3654
3655                 local_adv = 0;
3656                 remote_adv = 0;
3657
3658                 if (bmcr & BMCR_ANENABLE) {
3659                         u32 common;
3660
3661                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3662                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3663                         common = local_adv & remote_adv;
3664                         if (common & (ADVERTISE_1000XHALF |
3665                                       ADVERTISE_1000XFULL)) {
3666                                 if (common & ADVERTISE_1000XFULL)
3667                                         current_duplex = DUPLEX_FULL;
3668                                 else
3669                                         current_duplex = DUPLEX_HALF;
3670                         }
3671                         else
3672                                 current_link_up = 0;
3673                 }
3674         }
3675
3676         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3677                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3678
3679         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3680         if (tp->link_config.active_duplex == DUPLEX_HALF)
3681                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3682
3683         tw32_f(MAC_MODE, tp->mac_mode);
3684         udelay(40);
3685
3686         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3687
3688         tp->link_config.active_speed = current_speed;
3689         tp->link_config.active_duplex = current_duplex;
3690
3691         if (current_link_up != netif_carrier_ok(tp->dev)) {
3692                 if (current_link_up)
3693                         netif_carrier_on(tp->dev);
3694                 else {
3695                         netif_carrier_off(tp->dev);
3696                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3697                 }
3698                 tg3_link_report(tp);
3699         }
3700         return err;
3701 }
3702
3703 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3704 {
3705         if (tp->serdes_counter) {
3706                 /* Give autoneg time to complete. */
3707                 tp->serdes_counter--;
3708                 return;
3709         }
3710         if (!netif_carrier_ok(tp->dev) &&
3711             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3712                 u32 bmcr;
3713
3714                 tg3_readphy(tp, MII_BMCR, &bmcr);
3715                 if (bmcr & BMCR_ANENABLE) {
3716                         u32 phy1, phy2;
3717
3718                         /* Select shadow register 0x1f */
3719                         tg3_writephy(tp, 0x1c, 0x7c00);
3720                         tg3_readphy(tp, 0x1c, &phy1);
3721
3722                         /* Select expansion interrupt status register */
3723                         tg3_writephy(tp, 0x17, 0x0f01);
3724                         tg3_readphy(tp, 0x15, &phy2);
3725                         tg3_readphy(tp, 0x15, &phy2);
3726
3727                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3728                                 /* We have signal detect and not receiving
3729                                  * config code words, link is up by parallel
3730                                  * detection.
3731                                  */
3732
3733                                 bmcr &= ~BMCR_ANENABLE;
3734                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3735                                 tg3_writephy(tp, MII_BMCR, bmcr);
3736                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3737                         }
3738                 }
3739         }
3740         else if (netif_carrier_ok(tp->dev) &&
3741                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3742                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3743                 u32 phy2;
3744
3745                 /* Select expansion interrupt status register */
3746                 tg3_writephy(tp, 0x17, 0x0f01);
3747                 tg3_readphy(tp, 0x15, &phy2);
3748                 if (phy2 & 0x20) {
3749                         u32 bmcr;
3750
3751                         /* Config code words received, turn on autoneg. */
3752                         tg3_readphy(tp, MII_BMCR, &bmcr);
3753                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3754
3755                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3756
3757                 }
3758         }
3759 }
3760
3761 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3762 {
3763         int err;
3764
3765         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3766                 err = tg3_setup_fiber_phy(tp, force_reset);
3767         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3768                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3769         } else {
3770                 err = tg3_setup_copper_phy(tp, force_reset);
3771         }
3772
3773         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3774             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3775                 u32 val, scale;
3776
3777                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3778                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3779                         scale = 65;
3780                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3781                         scale = 6;
3782                 else
3783                         scale = 12;
3784
3785                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3786                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3787                 tw32(GRC_MISC_CFG, val);
3788         }
3789
3790         if (tp->link_config.active_speed == SPEED_1000 &&
3791             tp->link_config.active_duplex == DUPLEX_HALF)
3792                 tw32(MAC_TX_LENGTHS,
3793                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3794                       (6 << TX_LENGTHS_IPG_SHIFT) |
3795                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3796         else
3797                 tw32(MAC_TX_LENGTHS,
3798                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3799                       (6 << TX_LENGTHS_IPG_SHIFT) |
3800                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3801
3802         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3803                 if (netif_carrier_ok(tp->dev)) {
3804                         tw32(HOSTCC_STAT_COAL_TICKS,
3805                              tp->coal.stats_block_coalesce_usecs);
3806                 } else {
3807                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3808                 }
3809         }
3810
3811         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3812                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3813                 if (!netif_carrier_ok(tp->dev))
3814                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3815                               tp->pwrmgmt_thresh;
3816                 else
3817                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3818                 tw32(PCIE_PWR_MGMT_THRESH, val);
3819         }
3820
3821         return err;
3822 }
3823
3824 /* This is called whenever we suspect that the system chipset is re-
3825  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3826  * is bogus tx completions. We try to recover by setting the
3827  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3828  * in the workqueue.
3829  */
3830 static void tg3_tx_recover(struct tg3 *tp)
3831 {
3832         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3833                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3834
3835         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3836                "mapped I/O cycles to the network device, attempting to "
3837                "recover. Please report the problem to the driver maintainer "
3838                "and include system chipset information.\n", tp->dev->name);
3839
3840         spin_lock(&tp->lock);
3841         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3842         spin_unlock(&tp->lock);
3843 }
3844
3845 static inline u32 tg3_tx_avail(struct tg3 *tp)
3846 {
3847         smp_mb();
3848         return (tp->tx_pending -
3849                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3850 }
3851
3852 /* Tigon3 never reports partial packet sends.  So we do not
3853  * need special logic to handle SKBs that have not had all
3854  * of their frags sent yet, like SunGEM does.
3855  */
3856 static void tg3_tx(struct tg3 *tp)
3857 {
3858         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3859         u32 sw_idx = tp->tx_cons;
3860
3861         while (sw_idx != hw_idx) {
3862                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3863                 struct sk_buff *skb = ri->skb;
3864                 int i, tx_bug = 0;
3865
3866                 if (unlikely(skb == NULL)) {
3867                         tg3_tx_recover(tp);
3868                         return;
3869                 }
3870
3871                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
3872
3873                 ri->skb = NULL;
3874
3875                 sw_idx = NEXT_TX(sw_idx);
3876
3877                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3878                         ri = &tp->tx_buffers[sw_idx];
3879                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3880                                 tx_bug = 1;
3881                         sw_idx = NEXT_TX(sw_idx);
3882                 }
3883
3884                 dev_kfree_skb(skb);
3885
3886                 if (unlikely(tx_bug)) {
3887                         tg3_tx_recover(tp);
3888                         return;
3889                 }
3890         }
3891
3892         tp->tx_cons = sw_idx;
3893
3894         /* Need to make the tx_cons update visible to tg3_start_xmit()
3895          * before checking for netif_queue_stopped().  Without the
3896          * memory barrier, there is a small possibility that tg3_start_xmit()
3897          * will miss it and cause the queue to be stopped forever.
3898          */
3899         smp_mb();
3900
3901         if (unlikely(netif_queue_stopped(tp->dev) &&
3902                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3903                 netif_tx_lock(tp->dev);
3904                 if (netif_queue_stopped(tp->dev) &&
3905                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3906                         netif_wake_queue(tp->dev);
3907                 netif_tx_unlock(tp->dev);
3908         }
3909 }
3910
3911 /* Returns size of skb allocated or < 0 on error.
3912  *
3913  * We only need to fill in the address because the other members
3914  * of the RX descriptor are invariant, see tg3_init_rings.
3915  *
3916  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3917  * posting buffers we only dirty the first cache line of the RX
3918  * descriptor (containing the address).  Whereas for the RX status
3919  * buffers the cpu only reads the last cacheline of the RX descriptor
3920  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3921  */
3922 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3923                             int src_idx, u32 dest_idx_unmasked)
3924 {
3925         struct tg3_rx_buffer_desc *desc;
3926         struct ring_info *map, *src_map;
3927         struct sk_buff *skb;
3928         dma_addr_t mapping;
3929         int skb_size, dest_idx;
3930
3931         src_map = NULL;
3932         switch (opaque_key) {
3933         case RXD_OPAQUE_RING_STD:
3934                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3935                 desc = &tp->rx_std[dest_idx];
3936                 map = &tp->rx_std_buffers[dest_idx];
3937                 if (src_idx >= 0)
3938                         src_map = &tp->rx_std_buffers[src_idx];
3939                 skb_size = tp->rx_pkt_buf_sz;
3940                 break;
3941
3942         case RXD_OPAQUE_RING_JUMBO:
3943                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3944                 desc = &tp->rx_jumbo[dest_idx];
3945                 map = &tp->rx_jumbo_buffers[dest_idx];
3946                 if (src_idx >= 0)
3947                         src_map = &tp->rx_jumbo_buffers[src_idx];
3948                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3949                 break;
3950
3951         default:
3952                 return -EINVAL;
3953         }
3954
3955         /* Do not overwrite any of the map or rp information
3956          * until we are sure we can commit to a new buffer.
3957          *
3958          * Callers depend upon this behavior and assume that
3959          * we leave everything unchanged if we fail.
3960          */
3961         skb = netdev_alloc_skb(tp->dev, skb_size);
3962         if (skb == NULL)
3963                 return -ENOMEM;
3964
3965         skb_reserve(skb, tp->rx_offset);
3966
3967         mapping = pci_map_single(tp->pdev, skb->data,
3968                                  skb_size - tp->rx_offset,
3969                                  PCI_DMA_FROMDEVICE);
3970
3971         map->skb = skb;
3972         pci_unmap_addr_set(map, mapping, mapping);
3973
3974         if (src_map != NULL)
3975                 src_map->skb = NULL;
3976
3977         desc->addr_hi = ((u64)mapping >> 32);
3978         desc->addr_lo = ((u64)mapping & 0xffffffff);
3979
3980         return skb_size;
3981 }
3982
3983 /* We only need to move over in the address because the other
3984  * members of the RX descriptor are invariant.  See notes above
3985  * tg3_alloc_rx_skb for full details.
3986  */
3987 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3988                            int src_idx, u32 dest_idx_unmasked)
3989 {
3990         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3991         struct ring_info *src_map, *dest_map;
3992         int dest_idx;
3993
3994         switch (opaque_key) {
3995         case RXD_OPAQUE_RING_STD:
3996                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3997                 dest_desc = &tp->rx_std[dest_idx];
3998                 dest_map = &tp->rx_std_buffers[dest_idx];
3999                 src_desc = &tp->rx_std[src_idx];
4000                 src_map = &tp->rx_std_buffers[src_idx];
4001                 break;
4002
4003         case RXD_OPAQUE_RING_JUMBO:
4004                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4005                 dest_desc = &tp->rx_jumbo[dest_idx];
4006                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4007                 src_desc = &tp->rx_jumbo[src_idx];
4008                 src_map = &tp->rx_jumbo_buffers[src_idx];
4009                 break;
4010
4011         default:
4012                 return;
4013         }
4014
4015         dest_map->skb = src_map->skb;
4016         pci_unmap_addr_set(dest_map, mapping,
4017                            pci_unmap_addr(src_map, mapping));
4018         dest_desc->addr_hi = src_desc->addr_hi;
4019         dest_desc->addr_lo = src_desc->addr_lo;
4020
4021         src_map->skb = NULL;
4022 }
4023
4024 #if TG3_VLAN_TAG_USED
4025 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4026 {
4027         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4028 }
4029 #endif
4030
4031 /* The RX ring scheme is composed of multiple rings which post fresh
4032  * buffers to the chip, and one special ring the chip uses to report
4033  * status back to the host.
4034  *
4035  * The special ring reports the status of received packets to the
4036  * host.  The chip does not write into the original descriptor the
4037  * RX buffer was obtained from.  The chip simply takes the original
4038  * descriptor as provided by the host, updates the status and length
4039  * field, then writes this into the next status ring entry.
4040  *
4041  * Each ring the host uses to post buffers to the chip is described
4042  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4043  * it is first placed into the on-chip ram.  When the packet's length
4044  * is known, it walks down the TG3_BDINFO entries to select the ring.
4045  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4046  * which is within the range of the new packet's length is chosen.
4047  *
4048  * The "separate ring for rx status" scheme may sound queer, but it makes
4049  * sense from a cache coherency perspective.  If only the host writes
4050  * to the buffer post rings, and only the chip writes to the rx status
4051  * rings, then cache lines never move beyond shared-modified state.
4052  * If both the host and chip were to write into the same ring, cache line
4053  * eviction could occur since both entities want it in an exclusive state.
4054  */
4055 static int tg3_rx(struct tg3 *tp, int budget)
4056 {
4057         u32 work_mask, rx_std_posted = 0;
4058         u32 sw_idx = tp->rx_rcb_ptr;
4059         u16 hw_idx;
4060         int received;
4061
4062         hw_idx = tp->hw_status->idx[0].rx_producer;
4063         /*
4064          * We need to order the read of hw_idx and the read of
4065          * the opaque cookie.
4066          */
4067         rmb();
4068         work_mask = 0;
4069         received = 0;
4070         while (sw_idx != hw_idx && budget > 0) {
4071                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4072                 unsigned int len;
4073                 struct sk_buff *skb;
4074                 dma_addr_t dma_addr;
4075                 u32 opaque_key, desc_idx, *post_ptr;
4076
4077                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4078                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4079                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4080                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4081                                                   mapping);
4082                         skb = tp->rx_std_buffers[desc_idx].skb;
4083                         post_ptr = &tp->rx_std_ptr;
4084                         rx_std_posted++;
4085                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4086                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4087                                                   mapping);
4088                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
4089                         post_ptr = &tp->rx_jumbo_ptr;
4090                 }
4091                 else {
4092                         goto next_pkt_nopost;
4093                 }
4094
4095                 work_mask |= opaque_key;
4096
4097                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4098                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4099                 drop_it:
4100                         tg3_recycle_rx(tp, opaque_key,
4101                                        desc_idx, *post_ptr);
4102                 drop_it_no_recycle:
4103                         /* Other statistics kept track of by card. */
4104                         tp->net_stats.rx_dropped++;
4105                         goto next_pkt;
4106                 }
4107
4108                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4109
4110                 if (len > RX_COPY_THRESHOLD
4111                         && tp->rx_offset == 2
4112                         /* rx_offset != 2 iff this is a 5701 card running
4113                          * in PCI-X mode [see tg3_get_invariants()] */
4114                 ) {
4115                         int skb_size;
4116
4117                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4118                                                     desc_idx, *post_ptr);
4119                         if (skb_size < 0)
4120                                 goto drop_it;
4121
4122                         pci_unmap_single(tp->pdev, dma_addr,
4123                                          skb_size - tp->rx_offset,
4124                                          PCI_DMA_FROMDEVICE);
4125
4126                         skb_put(skb, len);
4127                 } else {
4128                         struct sk_buff *copy_skb;
4129
4130                         tg3_recycle_rx(tp, opaque_key,
4131                                        desc_idx, *post_ptr);
4132
4133                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
4134                         if (copy_skb == NULL)
4135                                 goto drop_it_no_recycle;
4136
4137                         skb_reserve(copy_skb, 2);
4138                         skb_put(copy_skb, len);
4139                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4140                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4141                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4142
4143                         /* We'll reuse the original ring buffer. */
4144                         skb = copy_skb;
4145                 }
4146
4147                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4148                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4149                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4150                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4151                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4152                 else
4153                         skb->ip_summed = CHECKSUM_NONE;
4154
4155                 skb->protocol = eth_type_trans(skb, tp->dev);
4156 #if TG3_VLAN_TAG_USED
4157                 if (tp->vlgrp != NULL &&
4158                     desc->type_flags & RXD_FLAG_VLAN) {
4159                         tg3_vlan_rx(tp, skb,
4160                                     desc->err_vlan & RXD_VLAN_MASK);
4161                 } else
4162 #endif
4163                         netif_receive_skb(skb);
4164
4165                 tp->dev->last_rx = jiffies;
4166                 received++;
4167                 budget--;
4168
4169 next_pkt:
4170                 (*post_ptr)++;
4171
4172                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4173                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4174
4175                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4176                                      TG3_64BIT_REG_LOW, idx);
4177                         work_mask &= ~RXD_OPAQUE_RING_STD;
4178                         rx_std_posted = 0;
4179                 }
4180 next_pkt_nopost:
4181                 sw_idx++;
4182                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4183
4184                 /* Refresh hw_idx to see if there is new work */
4185                 if (sw_idx == hw_idx) {
4186                         hw_idx = tp->hw_status->idx[0].rx_producer;
4187                         rmb();
4188                 }
4189         }
4190
4191         /* ACK the status ring. */
4192         tp->rx_rcb_ptr = sw_idx;
4193         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4194
4195         /* Refill RX ring(s). */
4196         if (work_mask & RXD_OPAQUE_RING_STD) {
4197                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4198                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4199                              sw_idx);
4200         }
4201         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4202                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4203                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4204                              sw_idx);
4205         }
4206         mmiowb();
4207
4208         return received;
4209 }
4210
4211 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4212 {
4213         struct tg3_hw_status *sblk = tp->hw_status;
4214
4215         /* handle link change and other phy events */
4216         if (!(tp->tg3_flags &
4217               (TG3_FLAG_USE_LINKCHG_REG |
4218                TG3_FLAG_POLL_SERDES))) {
4219                 if (sblk->status & SD_STATUS_LINK_CHG) {
4220                         sblk->status = SD_STATUS_UPDATED |
4221                                 (sblk->status & ~SD_STATUS_LINK_CHG);
4222                         spin_lock(&tp->lock);
4223                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4224                                 tw32_f(MAC_STATUS,
4225                                      (MAC_STATUS_SYNC_CHANGED |
4226                                       MAC_STATUS_CFG_CHANGED |
4227                                       MAC_STATUS_MI_COMPLETION |
4228                                       MAC_STATUS_LNKSTATE_CHANGED));
4229                                 udelay(40);
4230                         } else
4231                                 tg3_setup_phy(tp, 0);
4232                         spin_unlock(&tp->lock);
4233                 }
4234         }
4235
4236         /* run TX completion thread */
4237         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4238                 tg3_tx(tp);
4239                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4240                         return work_done;
4241         }
4242
4243         /* run RX thread, within the bounds set by NAPI.
4244          * All RX "locking" is done by ensuring outside
4245          * code synchronizes with tg3->napi.poll()
4246          */
4247         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4248                 work_done += tg3_rx(tp, budget - work_done);
4249
4250         return work_done;
4251 }
4252
4253 static int tg3_poll(struct napi_struct *napi, int budget)
4254 {
4255         struct tg3 *tp = container_of(napi, struct tg3, napi);
4256         int work_done = 0;
4257         struct tg3_hw_status *sblk = tp->hw_status;
4258
4259         while (1) {
4260                 work_done = tg3_poll_work(tp, work_done, budget);
4261
4262                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4263                         goto tx_recovery;
4264
4265                 if (unlikely(work_done >= budget))
4266                         break;
4267
4268                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4269                         /* tp->last_tag is used in tg3_restart_ints() below
4270                          * to tell the hw how much work has been processed,
4271                          * so we must read it before checking for more work.
4272                          */
4273                         tp->last_tag = sblk->status_tag;
4274                         rmb();
4275                 } else
4276                         sblk->status &= ~SD_STATUS_UPDATED;
4277
4278                 if (likely(!tg3_has_work(tp))) {
4279                         netif_rx_complete(tp->dev, napi);
4280                         tg3_restart_ints(tp);
4281                         break;
4282                 }
4283         }
4284
4285         return work_done;
4286
4287 tx_recovery:
4288         /* work_done is guaranteed to be less than budget. */
4289         netif_rx_complete(tp->dev, napi);
4290         schedule_work(&tp->reset_task);
4291         return work_done;
4292 }
4293
4294 static void tg3_irq_quiesce(struct tg3 *tp)
4295 {
4296         BUG_ON(tp->irq_sync);
4297
4298         tp->irq_sync = 1;
4299         smp_mb();
4300
4301         synchronize_irq(tp->pdev->irq);
4302 }
4303
4304 static inline int tg3_irq_sync(struct tg3 *tp)
4305 {
4306         return tp->irq_sync;
4307 }
4308
4309 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4310  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4311  * with as well.  Most of the time, this is not necessary except when
4312  * shutting down the device.
4313  */
4314 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4315 {
4316         spin_lock_bh(&tp->lock);
4317         if (irq_sync)
4318                 tg3_irq_quiesce(tp);
4319 }
4320
4321 static inline void tg3_full_unlock(struct tg3 *tp)
4322 {
4323         spin_unlock_bh(&tp->lock);
4324 }
4325
4326 /* One-shot MSI handler - Chip automatically disables interrupt
4327  * after sending MSI so driver doesn't have to do it.
4328  */
4329 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4330 {
4331         struct net_device *dev = dev_id;
4332         struct tg3 *tp = netdev_priv(dev);
4333
4334         prefetch(tp->hw_status);
4335         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4336
4337         if (likely(!tg3_irq_sync(tp)))
4338                 netif_rx_schedule(dev, &tp->napi);
4339
4340         return IRQ_HANDLED;
4341 }
4342
4343 /* MSI ISR - No need to check for interrupt sharing and no need to
4344  * flush status block and interrupt mailbox. PCI ordering rules
4345  * guarantee that MSI will arrive after the status block.
4346  */
4347 static irqreturn_t tg3_msi(int irq, void *dev_id)
4348 {
4349         struct net_device *dev = dev_id;
4350         struct tg3 *tp = netdev_priv(dev);
4351
4352         prefetch(tp->hw_status);
4353         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4354         /*
4355          * Writing any value to intr-mbox-0 clears PCI INTA# and
4356          * chip-internal interrupt pending events.
4357          * Writing non-zero to intr-mbox-0 additional tells the
4358          * NIC to stop sending us irqs, engaging "in-intr-handler"
4359          * event coalescing.
4360          */
4361         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4362         if (likely(!tg3_irq_sync(tp)))
4363                 netif_rx_schedule(dev, &tp->napi);
4364
4365         return IRQ_RETVAL(1);
4366 }
4367
4368 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4369 {
4370         struct net_device *dev = dev_id;
4371         struct tg3 *tp = netdev_priv(dev);
4372         struct tg3_hw_status *sblk = tp->hw_status;
4373         unsigned int handled = 1;
4374
4375         /* In INTx mode, it is possible for the interrupt to arrive at
4376          * the CPU before the status block posted prior to the interrupt.
4377          * Reading the PCI State register will confirm whether the
4378          * interrupt is ours and will flush the status block.
4379          */
4380         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4381                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4382                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4383                         handled = 0;
4384                         goto out;
4385                 }
4386         }
4387
4388         /*
4389          * Writing any value to intr-mbox-0 clears PCI INTA# and
4390          * chip-internal interrupt pending events.
4391          * Writing non-zero to intr-mbox-0 additional tells the
4392          * NIC to stop sending us irqs, engaging "in-intr-handler"
4393          * event coalescing.
4394          *
4395          * Flush the mailbox to de-assert the IRQ immediately to prevent
4396          * spurious interrupts.  The flush impacts performance but
4397          * excessive spurious interrupts can be worse in some cases.
4398          */
4399         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4400         if (tg3_irq_sync(tp))
4401                 goto out;
4402         sblk->status &= ~SD_STATUS_UPDATED;
4403         if (likely(tg3_has_work(tp))) {
4404                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4405                 netif_rx_schedule(dev, &tp->napi);
4406         } else {
4407                 /* No work, shared interrupt perhaps?  re-enable
4408                  * interrupts, and flush that PCI write
4409                  */
4410                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4411                                0x00000000);
4412         }
4413 out:
4414         return IRQ_RETVAL(handled);
4415 }
4416
4417 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4418 {
4419         struct net_device *dev = dev_id;
4420         struct tg3 *tp = netdev_priv(dev);
4421         struct tg3_hw_status *sblk = tp->hw_status;
4422         unsigned int handled = 1;
4423
4424         /* In INTx mode, it is possible for the interrupt to arrive at
4425          * the CPU before the status block posted prior to the interrupt.
4426          * Reading the PCI State register will confirm whether the
4427          * interrupt is ours and will flush the status block.
4428          */
4429         if (unlikely(sblk->status_tag == tp->last_tag)) {
4430                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4431                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4432                         handled = 0;
4433                         goto out;
4434                 }
4435         }
4436
4437         /*
4438          * writing any value to intr-mbox-0 clears PCI INTA# and
4439          * chip-internal interrupt pending events.
4440          * writing non-zero to intr-mbox-0 additional tells the
4441          * NIC to stop sending us irqs, engaging "in-intr-handler"
4442          * event coalescing.
4443          *
4444          * Flush the mailbox to de-assert the IRQ immediately to prevent
4445          * spurious interrupts.  The flush impacts performance but
4446          * excessive spurious interrupts can be worse in some cases.
4447          */
4448         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4449         if (tg3_irq_sync(tp))
4450                 goto out;
4451         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4452                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4453                 /* Update last_tag to mark that this status has been
4454                  * seen. Because interrupt may be shared, we may be
4455                  * racing with tg3_poll(), so only update last_tag
4456                  * if tg3_poll() is not scheduled.
4457                  */
4458                 tp->last_tag = sblk->status_tag;
4459                 __netif_rx_schedule(dev, &tp->napi);
4460         }
4461 out:
4462         return IRQ_RETVAL(handled);
4463 }
4464
4465 /* ISR for interrupt test */
4466 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4467 {
4468         struct net_device *dev = dev_id;
4469         struct tg3 *tp = netdev_priv(dev);
4470         struct tg3_hw_status *sblk = tp->hw_status;
4471
4472         if ((sblk->status & SD_STATUS_UPDATED) ||
4473             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4474                 tg3_disable_ints(tp);
4475                 return IRQ_RETVAL(1);
4476         }
4477         return IRQ_RETVAL(0);
4478 }
4479
4480 static int tg3_init_hw(struct tg3 *, int);
4481 static int tg3_halt(struct tg3 *, int, int);
4482
4483 /* Restart hardware after configuration changes, self-test, etc.
4484  * Invoked with tp->lock held.
4485  */
4486 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4487         __releases(tp->lock)
4488         __acquires(tp->lock)
4489 {
4490         int err;
4491
4492         err = tg3_init_hw(tp, reset_phy);
4493         if (err) {
4494                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4495                        "aborting.\n", tp->dev->name);
4496                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4497                 tg3_full_unlock(tp);
4498                 del_timer_sync(&tp->timer);
4499                 tp->irq_sync = 0;
4500                 napi_enable(&tp->napi);
4501                 dev_close(tp->dev);
4502                 tg3_full_lock(tp, 0);
4503         }
4504         return err;
4505 }
4506
4507 #ifdef CONFIG_NET_POLL_CONTROLLER
4508 static void tg3_poll_controller(struct net_device *dev)
4509 {
4510         struct tg3 *tp = netdev_priv(dev);
4511
4512         tg3_interrupt(tp->pdev->irq, dev);
4513 }
4514 #endif
4515
4516 static void tg3_reset_task(struct work_struct *work)
4517 {
4518         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4519         int err;
4520         unsigned int restart_timer;
4521
4522         tg3_full_lock(tp, 0);
4523
4524         if (!netif_running(tp->dev)) {
4525                 tg3_full_unlock(tp);
4526                 return;
4527         }
4528
4529         tg3_full_unlock(tp);
4530
4531         tg3_phy_stop(tp);
4532
4533         tg3_netif_stop(tp);
4534
4535         tg3_full_lock(tp, 1);
4536
4537         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4538         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4539
4540         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4541                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4542                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4543                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4544                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4545         }
4546
4547         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4548         err = tg3_init_hw(tp, 1);
4549         if (err)
4550                 goto out;
4551
4552         tg3_netif_start(tp);
4553
4554         if (restart_timer)
4555                 mod_timer(&tp->timer, jiffies + 1);
4556
4557 out:
4558         tg3_full_unlock(tp);
4559
4560         if (!err)
4561                 tg3_phy_start(tp);
4562 }
4563
4564 static void tg3_dump_short_state(struct tg3 *tp)
4565 {
4566         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4567                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4568         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4569                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4570 }
4571
4572 static void tg3_tx_timeout(struct net_device *dev)
4573 {
4574         struct tg3 *tp = netdev_priv(dev);
4575
4576         if (netif_msg_tx_err(tp)) {
4577                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4578                        dev->name);
4579                 tg3_dump_short_state(tp);
4580         }
4581
4582         schedule_work(&tp->reset_task);
4583 }
4584
4585 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4586 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4587 {
4588         u32 base = (u32) mapping & 0xffffffff;
4589
4590         return ((base > 0xffffdcc0) &&
4591                 (base + len + 8 < base));
4592 }
4593
4594 /* Test for DMA addresses > 40-bit */
4595 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4596                                           int len)
4597 {
4598 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4599         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4600                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4601         return 0;
4602 #else
4603         return 0;
4604 #endif
4605 }
4606
4607 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4608
4609 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4610 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4611                                        u32 last_plus_one, u32 *start,
4612                                        u32 base_flags, u32 mss)
4613 {
4614         struct sk_buff *new_skb;
4615         dma_addr_t new_addr = 0;
4616         u32 entry = *start;
4617         int i, ret = 0;
4618
4619         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4620                 new_skb = skb_copy(skb, GFP_ATOMIC);
4621         else {
4622                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4623
4624                 new_skb = skb_copy_expand(skb,
4625                                           skb_headroom(skb) + more_headroom,
4626                                           skb_tailroom(skb), GFP_ATOMIC);
4627         }
4628
4629         if (!new_skb) {
4630                 ret = -1;
4631         } else {
4632                 /* New SKB is guaranteed to be linear. */
4633                 entry = *start;
4634                 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4635                 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4636
4637                 /* Make sure new skb does not cross any 4G boundaries.
4638                  * Drop the packet if it does.
4639                  */
4640                 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4641                         if (!ret)
4642                                 skb_dma_unmap(&tp->pdev->dev, new_skb,
4643                                               DMA_TO_DEVICE);
4644                         ret = -1;
4645                         dev_kfree_skb(new_skb);
4646                         new_skb = NULL;
4647                 } else {
4648                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4649                                     base_flags, 1 | (mss << 1));
4650                         *start = NEXT_TX(entry);
4651                 }
4652         }
4653
4654         /* Now clean up the sw ring entries. */
4655         i = 0;
4656         while (entry != last_plus_one) {
4657                 if (i == 0) {
4658                         tp->tx_buffers[entry].skb = new_skb;
4659                 } else {
4660                         tp->tx_buffers[entry].skb = NULL;
4661                 }
4662                 entry = NEXT_TX(entry);
4663                 i++;
4664         }
4665
4666         skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4667         dev_kfree_skb(skb);
4668
4669         return ret;
4670 }
4671
4672 static void tg3_set_txd(struct tg3 *tp, int entry,
4673                         dma_addr_t mapping, int len, u32 flags,
4674                         u32 mss_and_is_end)
4675 {
4676         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4677         int is_end = (mss_and_is_end & 0x1);
4678         u32 mss = (mss_and_is_end >> 1);
4679         u32 vlan_tag = 0;
4680
4681         if (is_end)
4682                 flags |= TXD_FLAG_END;
4683         if (flags & TXD_FLAG_VLAN) {
4684                 vlan_tag = flags >> 16;
4685                 flags &= 0xffff;
4686         }
4687         vlan_tag |= (mss << TXD_MSS_SHIFT);
4688
4689         txd->addr_hi = ((u64) mapping >> 32);
4690         txd->addr_lo = ((u64) mapping & 0xffffffff);
4691         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4692         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4693 }
4694
4695 /* hard_start_xmit for devices that don't have any bugs and
4696  * support TG3_FLG2_HW_TSO_2 only.
4697  */
4698 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4699 {
4700         struct tg3 *tp = netdev_priv(dev);
4701         u32 len, entry, base_flags, mss;
4702         struct skb_shared_info *sp;
4703         dma_addr_t mapping;
4704
4705         len = skb_headlen(skb);
4706
4707         /* We are running in BH disabled context with netif_tx_lock
4708          * and TX reclaim runs via tp->napi.poll inside of a software
4709          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4710          * no IRQ context deadlocks to worry about either.  Rejoice!
4711          */
4712         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4713                 if (!netif_queue_stopped(dev)) {
4714                         netif_stop_queue(dev);
4715
4716                         /* This is a hard error, log it. */
4717                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4718                                "queue awake!\n", dev->name);
4719                 }
4720                 return NETDEV_TX_BUSY;
4721         }
4722
4723         entry = tp->tx_prod;
4724         base_flags = 0;
4725         mss = 0;
4726         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4727                 int tcp_opt_len, ip_tcp_len;
4728
4729                 if (skb_header_cloned(skb) &&
4730                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4731                         dev_kfree_skb(skb);
4732                         goto out_unlock;
4733                 }
4734
4735                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4736                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4737                 else {
4738                         struct iphdr *iph = ip_hdr(skb);
4739
4740                         tcp_opt_len = tcp_optlen(skb);
4741                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4742
4743                         iph->check = 0;
4744                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4745                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4746                 }
4747
4748                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4749                                TXD_FLAG_CPU_POST_DMA);
4750
4751                 tcp_hdr(skb)->check = 0;
4752
4753         }
4754         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4755                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4756 #if TG3_VLAN_TAG_USED
4757         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4758                 base_flags |= (TXD_FLAG_VLAN |
4759                                (vlan_tx_tag_get(skb) << 16));
4760 #endif
4761
4762         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4763                 dev_kfree_skb(skb);
4764                 goto out_unlock;
4765         }
4766
4767         sp = skb_shinfo(skb);
4768
4769         mapping = sp->dma_maps[0];
4770
4771         tp->tx_buffers[entry].skb = skb;
4772
4773         tg3_set_txd(tp, entry, mapping, len, base_flags,
4774                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4775
4776         entry = NEXT_TX(entry);
4777
4778         /* Now loop through additional data fragments, and queue them. */
4779         if (skb_shinfo(skb)->nr_frags > 0) {
4780                 unsigned int i, last;
4781
4782                 last = skb_shinfo(skb)->nr_frags - 1;
4783                 for (i = 0; i <= last; i++) {
4784                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4785
4786                         len = frag->size;
4787                         mapping = sp->dma_maps[i + 1];
4788                         tp->tx_buffers[entry].skb = NULL;
4789
4790                         tg3_set_txd(tp, entry, mapping, len,
4791                                     base_flags, (i == last) | (mss << 1));
4792
4793                         entry = NEXT_TX(entry);
4794                 }
4795         }
4796
4797         /* Packets are ready, update Tx producer idx local and on card. */
4798         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4799
4800         tp->tx_prod = entry;
4801         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4802                 netif_stop_queue(dev);
4803                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4804                         netif_wake_queue(tp->dev);
4805         }
4806
4807 out_unlock:
4808         mmiowb();
4809
4810         dev->trans_start = jiffies;
4811
4812         return NETDEV_TX_OK;
4813 }
4814
4815 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4816
4817 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4818  * TSO header is greater than 80 bytes.
4819  */
4820 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4821 {
4822         struct sk_buff *segs, *nskb;
4823
4824         /* Estimate the number of fragments in the worst case */
4825         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4826                 netif_stop_queue(tp->dev);
4827                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4828                         return NETDEV_TX_BUSY;
4829
4830                 netif_wake_queue(tp->dev);
4831         }
4832
4833         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4834         if (IS_ERR(segs))
4835                 goto tg3_tso_bug_end;
4836
4837         do {
4838                 nskb = segs;
4839                 segs = segs->next;
4840                 nskb->next = NULL;
4841                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4842         } while (segs);
4843
4844 tg3_tso_bug_end:
4845         dev_kfree_skb(skb);
4846
4847         return NETDEV_TX_OK;
4848 }
4849
4850 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4851  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4852  */
4853 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4854 {
4855         struct tg3 *tp = netdev_priv(dev);
4856         u32 len, entry, base_flags, mss;
4857         struct skb_shared_info *sp;
4858         int would_hit_hwbug;
4859         dma_addr_t mapping;
4860
4861         len = skb_headlen(skb);
4862
4863         /* We are running in BH disabled context with netif_tx_lock
4864          * and TX reclaim runs via tp->napi.poll inside of a software
4865          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4866          * no IRQ context deadlocks to worry about either.  Rejoice!
4867          */
4868         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4869                 if (!netif_queue_stopped(dev)) {
4870                         netif_stop_queue(dev);
4871
4872                         /* This is a hard error, log it. */
4873                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4874                                "queue awake!\n", dev->name);
4875                 }
4876                 return NETDEV_TX_BUSY;
4877         }
4878
4879         entry = tp->tx_prod;
4880         base_flags = 0;
4881         if (skb->ip_summed == CHECKSUM_PARTIAL)
4882                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4883         mss = 0;
4884         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4885                 struct iphdr *iph;
4886                 int tcp_opt_len, ip_tcp_len, hdr_len;
4887
4888                 if (skb_header_cloned(skb) &&
4889                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4890                         dev_kfree_skb(skb);
4891                         goto out_unlock;
4892                 }
4893
4894                 tcp_opt_len = tcp_optlen(skb);
4895                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4896
4897                 hdr_len = ip_tcp_len + tcp_opt_len;
4898                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4899                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4900                         return (tg3_tso_bug(tp, skb));
4901
4902                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4903                                TXD_FLAG_CPU_POST_DMA);
4904
4905                 iph = ip_hdr(skb);
4906                 iph->check = 0;
4907                 iph->tot_len = htons(mss + hdr_len);
4908                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4909                         tcp_hdr(skb)->check = 0;
4910                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4911                 } else
4912                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4913                                                                  iph->daddr, 0,
4914                                                                  IPPROTO_TCP,
4915                                                                  0);
4916
4917                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4918                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4919                         if (tcp_opt_len || iph->ihl > 5) {
4920                                 int tsflags;
4921
4922                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4923                                 mss |= (tsflags << 11);
4924                         }
4925                 } else {
4926                         if (tcp_opt_len || iph->ihl > 5) {
4927                                 int tsflags;
4928
4929                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4930                                 base_flags |= tsflags << 12;
4931                         }
4932                 }
4933         }
4934 #if TG3_VLAN_TAG_USED
4935         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4936                 base_flags |= (TXD_FLAG_VLAN |
4937                                (vlan_tx_tag_get(skb) << 16));
4938 #endif
4939
4940         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4941                 dev_kfree_skb(skb);
4942                 goto out_unlock;
4943         }
4944
4945         sp = skb_shinfo(skb);
4946
4947         mapping = sp->dma_maps[0];
4948
4949         tp->tx_buffers[entry].skb = skb;
4950
4951         would_hit_hwbug = 0;
4952
4953         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4954                 would_hit_hwbug = 1;
4955         else if (tg3_4g_overflow_test(mapping, len))
4956                 would_hit_hwbug = 1;
4957
4958         tg3_set_txd(tp, entry, mapping, len, base_flags,
4959                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4960
4961         entry = NEXT_TX(entry);
4962
4963         /* Now loop through additional data fragments, and queue them. */
4964         if (skb_shinfo(skb)->nr_frags > 0) {
4965                 unsigned int i, last;
4966
4967                 last = skb_shinfo(skb)->nr_frags - 1;
4968                 for (i = 0; i <= last; i++) {
4969                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4970
4971                         len = frag->size;
4972                         mapping = sp->dma_maps[i + 1];
4973
4974                         tp->tx_buffers[entry].skb = NULL;
4975
4976                         if (tg3_4g_overflow_test(mapping, len))
4977                                 would_hit_hwbug = 1;
4978
4979                         if (tg3_40bit_overflow_test(tp, mapping, len))
4980                                 would_hit_hwbug = 1;
4981
4982                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4983                                 tg3_set_txd(tp, entry, mapping, len,
4984                                             base_flags, (i == last)|(mss << 1));
4985                         else
4986                                 tg3_set_txd(tp, entry, mapping, len,
4987                                             base_flags, (i == last));
4988
4989                         entry = NEXT_TX(entry);
4990                 }
4991         }
4992
4993         if (would_hit_hwbug) {
4994                 u32 last_plus_one = entry;
4995                 u32 start;
4996
4997                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4998                 start &= (TG3_TX_RING_SIZE - 1);
4999
5000                 /* If the workaround fails due to memory/mapping
5001                  * failure, silently drop this packet.
5002                  */
5003                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5004                                                 &start, base_flags, mss))
5005                         goto out_unlock;
5006
5007                 entry = start;
5008         }
5009
5010         /* Packets are ready, update Tx producer idx local and on card. */
5011         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5012
5013         tp->tx_prod = entry;
5014         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5015                 netif_stop_queue(dev);
5016                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5017                         netif_wake_queue(tp->dev);
5018         }
5019
5020 out_unlock:
5021         mmiowb();
5022
5023         dev->trans_start = jiffies;
5024
5025         return NETDEV_TX_OK;
5026 }
5027
5028 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5029                                int new_mtu)
5030 {
5031         dev->mtu = new_mtu;
5032
5033         if (new_mtu > ETH_DATA_LEN) {
5034                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5035                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5036                         ethtool_op_set_tso(dev, 0);
5037                 }
5038                 else
5039                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5040         } else {
5041                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5042                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5043                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5044         }
5045 }
5046
5047 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5048 {
5049         struct tg3 *tp = netdev_priv(dev);
5050         int err;
5051
5052         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5053                 return -EINVAL;
5054
5055         if (!netif_running(dev)) {
5056                 /* We'll just catch it later when the
5057                  * device is up'd.
5058                  */
5059                 tg3_set_mtu(dev, tp, new_mtu);
5060                 return 0;
5061         }
5062
5063         tg3_phy_stop(tp);
5064
5065         tg3_netif_stop(tp);
5066
5067         tg3_full_lock(tp, 1);
5068
5069         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5070
5071         tg3_set_mtu(dev, tp, new_mtu);
5072
5073         err = tg3_restart_hw(tp, 0);
5074
5075         if (!err)
5076                 tg3_netif_start(tp);
5077
5078         tg3_full_unlock(tp);
5079
5080         if (!err)
5081                 tg3_phy_start(tp);
5082
5083         return err;
5084 }
5085
5086 /* Free up pending packets in all rx/tx rings.
5087  *
5088  * The chip has been shut down and the driver detached from
5089  * the networking, so no interrupts or new tx packets will
5090  * end up in the driver.  tp->{tx,}lock is not held and we are not
5091  * in an interrupt context and thus may sleep.
5092  */
5093 static void tg3_free_rings(struct tg3 *tp)
5094 {
5095         struct ring_info *rxp;
5096         int i;
5097
5098         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5099                 rxp = &tp->rx_std_buffers[i];
5100
5101                 if (rxp->skb == NULL)
5102                         continue;
5103                 pci_unmap_single(tp->pdev,
5104                                  pci_unmap_addr(rxp, mapping),
5105                                  tp->rx_pkt_buf_sz - tp->rx_offset,
5106                                  PCI_DMA_FROMDEVICE);
5107                 dev_kfree_skb_any(rxp->skb);
5108                 rxp->skb = NULL;
5109         }
5110
5111         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5112                 rxp = &tp->rx_jumbo_buffers[i];
5113
5114                 if (rxp->skb == NULL)
5115                         continue;
5116                 pci_unmap_single(tp->pdev,
5117                                  pci_unmap_addr(rxp, mapping),
5118                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5119                                  PCI_DMA_FROMDEVICE);
5120                 dev_kfree_skb_any(rxp->skb);
5121                 rxp->skb = NULL;
5122         }
5123
5124         for (i = 0; i < TG3_TX_RING_SIZE; ) {
5125                 struct tx_ring_info *txp;
5126                 struct sk_buff *skb;
5127
5128                 txp = &tp->tx_buffers[i];
5129                 skb = txp->skb;
5130
5131                 if (skb == NULL) {
5132                         i++;
5133                         continue;
5134                 }
5135
5136                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5137
5138                 txp->skb = NULL;
5139
5140                 i += skb_shinfo(skb)->nr_frags + 1;
5141
5142                 dev_kfree_skb_any(skb);
5143         }
5144 }
5145
5146 /* Initialize tx/rx rings for packet processing.
5147  *
5148  * The chip has been shut down and the driver detached from
5149  * the networking, so no interrupts or new tx packets will
5150  * end up in the driver.  tp->{tx,}lock are held and thus
5151  * we may not sleep.
5152  */
5153 static int tg3_init_rings(struct tg3 *tp)
5154 {
5155         u32 i;
5156
5157         /* Free up all the SKBs. */
5158         tg3_free_rings(tp);
5159
5160         /* Zero out all descriptors. */
5161         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5162         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5163         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5164         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5165
5166         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5167         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5168             (tp->dev->mtu > ETH_DATA_LEN))
5169                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5170
5171         /* Initialize invariants of the rings, we only set this
5172          * stuff once.  This works because the card does not
5173          * write into the rx buffer posting rings.
5174          */
5175         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5176                 struct tg3_rx_buffer_desc *rxd;
5177
5178                 rxd = &tp->rx_std[i];
5179                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5180                         << RXD_LEN_SHIFT;
5181                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5182                 rxd->opaque = (RXD_OPAQUE_RING_STD |
5183                                (i << RXD_OPAQUE_INDEX_SHIFT));
5184         }
5185
5186         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5187                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5188                         struct tg3_rx_buffer_desc *rxd;
5189
5190                         rxd = &tp->rx_jumbo[i];
5191                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5192                                 << RXD_LEN_SHIFT;
5193                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5194                                 RXD_FLAG_JUMBO;
5195                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5196                                (i << RXD_OPAQUE_INDEX_SHIFT));
5197                 }
5198         }
5199
5200         /* Now allocate fresh SKBs for each rx ring. */
5201         for (i = 0; i < tp->rx_pending; i++) {
5202                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5203                         printk(KERN_WARNING PFX
5204                                "%s: Using a smaller RX standard ring, "
5205                                "only %d out of %d buffers were allocated "
5206                                "successfully.\n",
5207                                tp->dev->name, i, tp->rx_pending);
5208                         if (i == 0)
5209                                 return -ENOMEM;
5210                         tp->rx_pending = i;
5211                         break;
5212                 }
5213         }
5214
5215         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5216                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5217                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5218                                              -1, i) < 0) {
5219                                 printk(KERN_WARNING PFX
5220                                        "%s: Using a smaller RX jumbo ring, "
5221                                        "only %d out of %d buffers were "
5222                                        "allocated successfully.\n",
5223                                        tp->dev->name, i, tp->rx_jumbo_pending);
5224                                 if (i == 0) {
5225                                         tg3_free_rings(tp);
5226                                         return -ENOMEM;
5227                                 }
5228                                 tp->rx_jumbo_pending = i;
5229                                 break;
5230                         }
5231                 }
5232         }
5233         return 0;
5234 }
5235
5236 /*
5237  * Must not be invoked with interrupt sources disabled and
5238  * the hardware shutdown down.
5239  */
5240 static void tg3_free_consistent(struct tg3 *tp)
5241 {
5242         kfree(tp->rx_std_buffers);
5243         tp->rx_std_buffers = NULL;
5244         if (tp->rx_std) {
5245                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5246                                     tp->rx_std, tp->rx_std_mapping);
5247                 tp->rx_std = NULL;
5248         }
5249         if (tp->rx_jumbo) {
5250                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5251                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
5252                 tp->rx_jumbo = NULL;
5253         }
5254         if (tp->rx_rcb) {
5255                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5256                                     tp->rx_rcb, tp->rx_rcb_mapping);
5257                 tp->rx_rcb = NULL;
5258         }
5259         if (tp->tx_ring) {
5260                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5261                         tp->tx_ring, tp->tx_desc_mapping);
5262                 tp->tx_ring = NULL;
5263         }
5264         if (tp->hw_status) {
5265                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5266                                     tp->hw_status, tp->status_mapping);
5267                 tp->hw_status = NULL;
5268         }
5269         if (tp->hw_stats) {
5270                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5271                                     tp->hw_stats, tp->stats_mapping);
5272                 tp->hw_stats = NULL;
5273         }
5274 }
5275
5276 /*
5277  * Must not be invoked with interrupt sources disabled and
5278  * the hardware shutdown down.  Can sleep.
5279  */
5280 static int tg3_alloc_consistent(struct tg3 *tp)
5281 {
5282         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5283                                       (TG3_RX_RING_SIZE +
5284                                        TG3_RX_JUMBO_RING_SIZE)) +
5285                                      (sizeof(struct tx_ring_info) *
5286                                       TG3_TX_RING_SIZE),
5287                                      GFP_KERNEL);
5288         if (!tp->rx_std_buffers)
5289                 return -ENOMEM;
5290
5291         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5292         tp->tx_buffers = (struct tx_ring_info *)
5293                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5294
5295         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5296                                           &tp->rx_std_mapping);
5297         if (!tp->rx_std)
5298                 goto err_out;
5299
5300         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5301                                             &tp->rx_jumbo_mapping);
5302
5303         if (!tp->rx_jumbo)
5304                 goto err_out;
5305
5306         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5307                                           &tp->rx_rcb_mapping);
5308         if (!tp->rx_rcb)
5309                 goto err_out;
5310
5311         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5312                                            &tp->tx_desc_mapping);
5313         if (!tp->tx_ring)
5314                 goto err_out;
5315
5316         tp->hw_status = pci_alloc_consistent(tp->pdev,
5317                                              TG3_HW_STATUS_SIZE,
5318                                              &tp->status_mapping);
5319         if (!tp->hw_status)
5320                 goto err_out;
5321
5322         tp->hw_stats = pci_alloc_consistent(tp->pdev,
5323                                             sizeof(struct tg3_hw_stats),
5324                                             &tp->stats_mapping);
5325         if (!tp->hw_stats)
5326                 goto err_out;
5327
5328         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5329         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5330
5331         return 0;
5332
5333 err_out:
5334         tg3_free_consistent(tp);
5335         return -ENOMEM;
5336 }
5337
5338 #define MAX_WAIT_CNT 1000
5339
5340 /* To stop a block, clear the enable bit and poll till it
5341  * clears.  tp->lock is held.
5342  */
5343 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5344 {
5345         unsigned int i;
5346         u32 val;
5347
5348         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5349                 switch (ofs) {
5350                 case RCVLSC_MODE:
5351                 case DMAC_MODE:
5352                 case MBFREE_MODE:
5353                 case BUFMGR_MODE:
5354                 case MEMARB_MODE:
5355                         /* We can't enable/disable these bits of the
5356                          * 5705/5750, just say success.
5357                          */
5358                         return 0;
5359
5360                 default:
5361                         break;
5362                 }
5363         }
5364
5365         val = tr32(ofs);
5366         val &= ~enable_bit;
5367         tw32_f(ofs, val);
5368
5369         for (i = 0; i < MAX_WAIT_CNT; i++) {
5370                 udelay(100);
5371                 val = tr32(ofs);
5372                 if ((val & enable_bit) == 0)
5373                         break;
5374         }
5375
5376         if (i == MAX_WAIT_CNT && !silent) {
5377                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5378                        "ofs=%lx enable_bit=%x\n",
5379                        ofs, enable_bit);
5380                 return -ENODEV;
5381         }
5382
5383         return 0;
5384 }
5385
5386 /* tp->lock is held. */
5387 static int tg3_abort_hw(struct tg3 *tp, int silent)
5388 {
5389         int i, err;
5390
5391         tg3_disable_ints(tp);
5392
5393         tp->rx_mode &= ~RX_MODE_ENABLE;
5394         tw32_f(MAC_RX_MODE, tp->rx_mode);
5395         udelay(10);
5396
5397         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5398         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5399         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5400         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5401         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5402         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5403
5404         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5405         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5406         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5407         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5408         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5409         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5410         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5411
5412         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5413         tw32_f(MAC_MODE, tp->mac_mode);
5414         udelay(40);
5415
5416         tp->tx_mode &= ~TX_MODE_ENABLE;
5417         tw32_f(MAC_TX_MODE, tp->tx_mode);
5418
5419         for (i = 0; i < MAX_WAIT_CNT; i++) {
5420                 udelay(100);
5421                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5422                         break;
5423         }
5424         if (i >= MAX_WAIT_CNT) {
5425                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5426                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5427                        tp->dev->name, tr32(MAC_TX_MODE));
5428                 err |= -ENODEV;
5429         }
5430
5431         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5432         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5433         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5434
5435         tw32(FTQ_RESET, 0xffffffff);
5436         tw32(FTQ_RESET, 0x00000000);
5437
5438         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5439         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5440
5441         if (tp->hw_status)
5442                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5443         if (tp->hw_stats)
5444                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5445
5446         return err;
5447 }
5448
5449 /* tp->lock is held. */
5450 static int tg3_nvram_lock(struct tg3 *tp)
5451 {
5452         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5453                 int i;
5454
5455                 if (tp->nvram_lock_cnt == 0) {
5456                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5457                         for (i = 0; i < 8000; i++) {
5458                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5459                                         break;
5460                                 udelay(20);
5461                         }
5462                         if (i == 8000) {
5463                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5464                                 return -ENODEV;
5465                         }
5466                 }
5467                 tp->nvram_lock_cnt++;
5468         }
5469         return 0;
5470 }
5471
5472 /* tp->lock is held. */
5473 static void tg3_nvram_unlock(struct tg3 *tp)
5474 {
5475         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5476                 if (tp->nvram_lock_cnt > 0)
5477                         tp->nvram_lock_cnt--;
5478                 if (tp->nvram_lock_cnt == 0)
5479                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5480         }
5481 }
5482
5483 /* tp->lock is held. */
5484 static void tg3_enable_nvram_access(struct tg3 *tp)
5485 {
5486         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5487             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5488                 u32 nvaccess = tr32(NVRAM_ACCESS);
5489
5490                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5491         }
5492 }
5493
5494 /* tp->lock is held. */
5495 static void tg3_disable_nvram_access(struct tg3 *tp)
5496 {
5497         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5498             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5499                 u32 nvaccess = tr32(NVRAM_ACCESS);
5500
5501                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5502         }
5503 }
5504
5505 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5506 {
5507         int i;
5508         u32 apedata;
5509
5510         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5511         if (apedata != APE_SEG_SIG_MAGIC)
5512                 return;
5513
5514         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5515         if (!(apedata & APE_FW_STATUS_READY))
5516                 return;
5517
5518         /* Wait for up to 1 millisecond for APE to service previous event. */
5519         for (i = 0; i < 10; i++) {
5520                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5521                         return;
5522
5523                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5524
5525                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5526                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5527                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5528
5529                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5530
5531                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5532                         break;
5533
5534                 udelay(100);
5535         }
5536
5537         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5538                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5539 }
5540
5541 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5542 {
5543         u32 event;
5544         u32 apedata;
5545
5546         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5547                 return;
5548
5549         switch (kind) {
5550                 case RESET_KIND_INIT:
5551                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5552                                         APE_HOST_SEG_SIG_MAGIC);
5553                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5554                                         APE_HOST_SEG_LEN_MAGIC);
5555                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5556                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5557                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5558                                         APE_HOST_DRIVER_ID_MAGIC);
5559                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5560                                         APE_HOST_BEHAV_NO_PHYLOCK);
5561
5562                         event = APE_EVENT_STATUS_STATE_START;
5563                         break;
5564                 case RESET_KIND_SHUTDOWN:
5565                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5566                         break;
5567                 case RESET_KIND_SUSPEND:
5568                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5569                         break;
5570                 default:
5571                         return;
5572         }
5573
5574         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5575
5576         tg3_ape_send_event(tp, event);
5577 }
5578
5579 /* tp->lock is held. */
5580 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5581 {
5582         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5583                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5584
5585         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5586                 switch (kind) {
5587                 case RESET_KIND_INIT:
5588                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5589                                       DRV_STATE_START);
5590                         break;
5591
5592                 case RESET_KIND_SHUTDOWN:
5593                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5594                                       DRV_STATE_UNLOAD);
5595                         break;
5596
5597                 case RESET_KIND_SUSPEND:
5598                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5599                                       DRV_STATE_SUSPEND);
5600                         break;
5601
5602                 default:
5603                         break;
5604                 }
5605         }
5606
5607         if (kind == RESET_KIND_INIT ||
5608             kind == RESET_KIND_SUSPEND)
5609                 tg3_ape_driver_state_change(tp, kind);
5610 }
5611
5612 /* tp->lock is held. */
5613 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5614 {
5615         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5616                 switch (kind) {
5617                 case RESET_KIND_INIT:
5618                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5619                                       DRV_STATE_START_DONE);
5620                         break;
5621
5622                 case RESET_KIND_SHUTDOWN:
5623                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5624                                       DRV_STATE_UNLOAD_DONE);
5625                         break;
5626
5627                 default:
5628                         break;
5629                 }
5630         }
5631
5632         if (kind == RESET_KIND_SHUTDOWN)
5633                 tg3_ape_driver_state_change(tp, kind);
5634 }
5635
5636 /* tp->lock is held. */
5637 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5638 {
5639         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5640                 switch (kind) {
5641                 case RESET_KIND_INIT:
5642                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5643                                       DRV_STATE_START);
5644                         break;
5645
5646                 case RESET_KIND_SHUTDOWN:
5647                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5648                                       DRV_STATE_UNLOAD);
5649                         break;
5650
5651                 case RESET_KIND_SUSPEND:
5652                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5653                                       DRV_STATE_SUSPEND);
5654                         break;
5655
5656                 default:
5657                         break;
5658                 }
5659         }
5660 }
5661
5662 static int tg3_poll_fw(struct tg3 *tp)
5663 {
5664         int i;
5665         u32 val;
5666
5667         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5668                 /* Wait up to 20ms for init done. */
5669                 for (i = 0; i < 200; i++) {
5670                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5671                                 return 0;
5672                         udelay(100);
5673                 }
5674                 return -ENODEV;
5675         }
5676
5677         /* Wait for firmware initialization to complete. */
5678         for (i = 0; i < 100000; i++) {
5679                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5680                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5681                         break;
5682                 udelay(10);
5683         }
5684
5685         /* Chip might not be fitted with firmware.  Some Sun onboard
5686          * parts are configured like that.  So don't signal the timeout
5687          * of the above loop as an error, but do report the lack of
5688          * running firmware once.
5689          */
5690         if (i >= 100000 &&
5691             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5692                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5693
5694                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5695                        tp->dev->name);
5696         }
5697
5698         return 0;
5699 }
5700
5701 /* Save PCI command register before chip reset */
5702 static void tg3_save_pci_state(struct tg3 *tp)
5703 {
5704         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5705 }
5706
5707 /* Restore PCI state after chip reset */
5708 static void tg3_restore_pci_state(struct tg3 *tp)
5709 {
5710         u32 val;
5711
5712         /* Re-enable indirect register accesses. */
5713         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5714                                tp->misc_host_ctrl);
5715
5716         /* Set MAX PCI retry to zero. */
5717         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5718         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5719             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5720                 val |= PCISTATE_RETRY_SAME_DMA;
5721         /* Allow reads and writes to the APE register and memory space. */
5722         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5723                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5724                        PCISTATE_ALLOW_APE_SHMEM_WR;
5725         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5726
5727         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5728
5729         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5730                 pcie_set_readrq(tp->pdev, 4096);
5731         else {
5732                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5733                                       tp->pci_cacheline_sz);
5734                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5735                                       tp->pci_lat_timer);
5736         }
5737
5738         /* Make sure PCI-X relaxed ordering bit is clear. */
5739         if (tp->pcix_cap) {
5740                 u16 pcix_cmd;
5741
5742                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5743                                      &pcix_cmd);
5744                 pcix_cmd &= ~PCI_X_CMD_ERO;
5745                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5746                                       pcix_cmd);
5747         }
5748
5749         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5750
5751                 /* Chip reset on 5780 will reset MSI enable bit,
5752                  * so need to restore it.
5753                  */
5754                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5755                         u16 ctrl;
5756
5757                         pci_read_config_word(tp->pdev,
5758                                              tp->msi_cap + PCI_MSI_FLAGS,
5759                                              &ctrl);
5760                         pci_write_config_word(tp->pdev,
5761                                               tp->msi_cap + PCI_MSI_FLAGS,
5762                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5763                         val = tr32(MSGINT_MODE);
5764                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5765                 }
5766         }
5767 }
5768
5769 static void tg3_stop_fw(struct tg3 *);
5770
5771 /* tp->lock is held. */
5772 static int tg3_chip_reset(struct tg3 *tp)
5773 {
5774         u32 val;
5775         void (*write_op)(struct tg3 *, u32, u32);
5776         int err;
5777
5778         tg3_nvram_lock(tp);
5779
5780         tg3_mdio_stop(tp);
5781
5782         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5783
5784         /* No matching tg3_nvram_unlock() after this because
5785          * chip reset below will undo the nvram lock.
5786          */
5787         tp->nvram_lock_cnt = 0;
5788
5789         /* GRC_MISC_CFG core clock reset will clear the memory
5790          * enable bit in PCI register 4 and the MSI enable bit
5791          * on some chips, so we save relevant registers here.
5792          */
5793         tg3_save_pci_state(tp);
5794
5795         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5796             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5797             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5798             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5799             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5800             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
5801                 tw32(GRC_FASTBOOT_PC, 0);
5802
5803         /*
5804          * We must avoid the readl() that normally takes place.
5805          * It locks machines, causes machine checks, and other
5806          * fun things.  So, temporarily disable the 5701
5807          * hardware workaround, while we do the reset.
5808          */
5809         write_op = tp->write32;
5810         if (write_op == tg3_write_flush_reg32)
5811                 tp->write32 = tg3_write32;
5812
5813         /* Prevent the irq handler from reading or writing PCI registers
5814          * during chip reset when the memory enable bit in the PCI command
5815          * register may be cleared.  The chip does not generate interrupt
5816          * at this time, but the irq handler may still be called due to irq
5817          * sharing or irqpoll.
5818          */
5819         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5820         if (tp->hw_status) {
5821                 tp->hw_status->status = 0;
5822                 tp->hw_status->status_tag = 0;
5823         }
5824         tp->last_tag = 0;
5825         smp_mb();
5826         synchronize_irq(tp->pdev->irq);
5827
5828         /* do the reset */
5829         val = GRC_MISC_CFG_CORECLK_RESET;
5830
5831         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5832                 if (tr32(0x7e2c) == 0x60) {
5833                         tw32(0x7e2c, 0x20);
5834                 }
5835                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5836                         tw32(GRC_MISC_CFG, (1 << 29));
5837                         val |= (1 << 29);
5838                 }
5839         }
5840
5841         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5842                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5843                 tw32(GRC_VCPU_EXT_CTRL,
5844                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5845         }
5846
5847         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5848                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5849         tw32(GRC_MISC_CFG, val);
5850
5851         /* restore 5701 hardware bug workaround write method */
5852         tp->write32 = write_op;
5853
5854         /* Unfortunately, we have to delay before the PCI read back.
5855          * Some 575X chips even will not respond to a PCI cfg access
5856          * when the reset command is given to the chip.
5857          *
5858          * How do these hardware designers expect things to work
5859          * properly if the PCI write is posted for a long period
5860          * of time?  It is always necessary to have some method by
5861          * which a register read back can occur to push the write
5862          * out which does the reset.
5863          *
5864          * For most tg3 variants the trick below was working.
5865          * Ho hum...
5866          */
5867         udelay(120);
5868
5869         /* Flush PCI posted writes.  The normal MMIO registers
5870          * are inaccessible at this time so this is the only
5871          * way to make this reliably (actually, this is no longer
5872          * the case, see above).  I tried to use indirect
5873          * register read/write but this upset some 5701 variants.
5874          */
5875         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5876
5877         udelay(120);
5878
5879         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5880                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5881                         int i;
5882                         u32 cfg_val;
5883
5884                         /* Wait for link training to complete.  */
5885                         for (i = 0; i < 5000; i++)
5886                                 udelay(100);
5887
5888                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5889                         pci_write_config_dword(tp->pdev, 0xc4,
5890                                                cfg_val | (1 << 15));
5891                 }
5892                 /* Set PCIE max payload size and clear error status.  */
5893                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5894         }
5895
5896         tg3_restore_pci_state(tp);
5897
5898         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5899
5900         val = 0;
5901         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5902                 val = tr32(MEMARB_MODE);
5903         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5904
5905         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5906                 tg3_stop_fw(tp);
5907                 tw32(0x5000, 0x400);
5908         }
5909
5910         tw32(GRC_MODE, tp->grc_mode);
5911
5912         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5913                 val = tr32(0xc4);
5914
5915                 tw32(0xc4, val | (1 << 15));
5916         }
5917
5918         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5919             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5920                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5921                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5922                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5923                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5924         }
5925
5926         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5927                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5928                 tw32_f(MAC_MODE, tp->mac_mode);
5929         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5930                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5931                 tw32_f(MAC_MODE, tp->mac_mode);
5932         } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
5933                 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
5934                 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
5935                         tp->mac_mode |= MAC_MODE_TDE_ENABLE;
5936                 tw32_f(MAC_MODE, tp->mac_mode);
5937         } else
5938                 tw32_f(MAC_MODE, 0);
5939         udelay(40);
5940
5941         tg3_mdio_start(tp);
5942
5943         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
5944
5945         err = tg3_poll_fw(tp);
5946         if (err)
5947                 return err;
5948
5949         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5950             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5951                 val = tr32(0x7c00);
5952
5953                 tw32(0x7c00, val | (1 << 25));
5954         }
5955
5956         /* Reprobe ASF enable state.  */
5957         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5958         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5959         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5960         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5961                 u32 nic_cfg;
5962
5963                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5964                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5965                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5966                         tp->last_event_jiffies = jiffies;
5967                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5968                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5969                 }
5970         }
5971
5972         return 0;
5973 }
5974
5975 /* tp->lock is held. */
5976 static void tg3_stop_fw(struct tg3 *tp)
5977 {
5978         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5979            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5980                 /* Wait for RX cpu to ACK the previous event. */
5981                 tg3_wait_for_event_ack(tp);
5982
5983                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5984
5985                 tg3_generate_fw_event(tp);
5986
5987                 /* Wait for RX cpu to ACK this event. */
5988                 tg3_wait_for_event_ack(tp);
5989         }
5990 }
5991
5992 /* tp->lock is held. */
5993 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5994 {
5995         int err;
5996
5997         tg3_stop_fw(tp);
5998
5999         tg3_write_sig_pre_reset(tp, kind);
6000
6001         tg3_abort_hw(tp, silent);
6002         err = tg3_chip_reset(tp);
6003
6004         tg3_write_sig_legacy(tp, kind);
6005         tg3_write_sig_post_reset(tp, kind);
6006
6007         if (err)
6008                 return err;
6009
6010         return 0;
6011 }
6012
6013 #define TG3_FW_RELEASE_MAJOR    0x0
6014 #define TG3_FW_RELASE_MINOR     0x0
6015 #define TG3_FW_RELEASE_FIX      0x0
6016 #define TG3_FW_START_ADDR       0x08000000
6017 #define TG3_FW_TEXT_ADDR        0x08000000
6018 #define TG3_FW_TEXT_LEN         0x9c0
6019 #define TG3_FW_RODATA_ADDR      0x080009c0
6020 #define TG3_FW_RODATA_LEN       0x60
6021 #define TG3_FW_DATA_ADDR        0x08000a40
6022 #define TG3_FW_DATA_LEN         0x20
6023 #define TG3_FW_SBSS_ADDR        0x08000a60
6024 #define TG3_FW_SBSS_LEN         0xc
6025 #define TG3_FW_BSS_ADDR         0x08000a70
6026 #define TG3_FW_BSS_LEN          0x10
6027
6028 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
6029         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6030         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6031         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6032         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6033         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6034         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6035         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6036         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6037         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6038         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6039         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6040         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6041         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6042         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6043         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6044         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6045         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6046         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6047         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6048         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6049         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6050         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6051         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6052         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6053         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6054         0, 0, 0, 0, 0, 0,
6055         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6056         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6057         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6058         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6059         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6060         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6061         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6062         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6063         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6064         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6065         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6066         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6067         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6068         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6069         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6070         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6071         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6072         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6073         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6074         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6075         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6076         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6077         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6078         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6079         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6080         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6081         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6082         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6083         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6084         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6085         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6086         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6087         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6088         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6089         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6090         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6091         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6092         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6093         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6094         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6095         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6096         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6097         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6098         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6099         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6100         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6101         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6102         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6103         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6104         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6105         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6106         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6107         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6108         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6109         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6110         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6111         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6112         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6113         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6114         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6115         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6116         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6117         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6118         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6119         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6120 };
6121
6122 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
6123         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6124         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6125         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6126         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6127         0x00000000
6128 };
6129
6130 #if 0 /* All zeros, don't eat up space with it. */
6131 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6132         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6133         0x00000000, 0x00000000, 0x00000000, 0x00000000
6134 };
6135 #endif
6136
6137 #define RX_CPU_SCRATCH_BASE     0x30000
6138 #define RX_CPU_SCRATCH_SIZE     0x04000
6139 #define TX_CPU_SCRATCH_BASE     0x34000
6140 #define TX_CPU_SCRATCH_SIZE     0x04000
6141
6142 /* tp->lock is held. */
6143 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6144 {
6145         int i;
6146
6147         BUG_ON(offset == TX_CPU_BASE &&
6148             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6149
6150         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6151                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6152
6153                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6154                 return 0;
6155         }
6156         if (offset == RX_CPU_BASE) {
6157                 for (i = 0; i < 10000; i++) {
6158                         tw32(offset + CPU_STATE, 0xffffffff);
6159                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6160                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6161                                 break;
6162                 }
6163
6164                 tw32(offset + CPU_STATE, 0xffffffff);
6165                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
6166                 udelay(10);
6167         } else {
6168                 for (i = 0; i < 10000; i++) {
6169                         tw32(offset + CPU_STATE, 0xffffffff);
6170                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6171                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6172                                 break;
6173                 }
6174         }
6175
6176         if (i >= 10000) {
6177                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6178                        "and %s CPU\n",
6179                        tp->dev->name,
6180                        (offset == RX_CPU_BASE ? "RX" : "TX"));
6181                 return -ENODEV;
6182         }
6183
6184         /* Clear firmware's nvram arbitration. */
6185         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6186                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6187         return 0;
6188 }
6189
6190 struct fw_info {
6191         unsigned int text_base;
6192         unsigned int text_len;
6193         const u32 *text_data;
6194         unsigned int rodata_base;
6195         unsigned int rodata_len;
6196         const u32 *rodata_data;
6197         unsigned int data_base;
6198         unsigned int data_len;
6199         const u32 *data_data;
6200 };
6201
6202 /* tp->lock is held. */
6203 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6204                                  int cpu_scratch_size, struct fw_info *info)
6205 {
6206         int err, lock_err, i;
6207         void (*write_op)(struct tg3 *, u32, u32);
6208
6209         if (cpu_base == TX_CPU_BASE &&
6210             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6211                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6212                        "TX cpu firmware on %s which is 5705.\n",
6213                        tp->dev->name);
6214                 return -EINVAL;
6215         }
6216
6217         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6218                 write_op = tg3_write_mem;
6219         else
6220                 write_op = tg3_write_indirect_reg32;
6221
6222         /* It is possible that bootcode is still loading at this point.
6223          * Get the nvram lock first before halting the cpu.
6224          */
6225         lock_err = tg3_nvram_lock(tp);
6226         err = tg3_halt_cpu(tp, cpu_base);
6227         if (!lock_err)
6228                 tg3_nvram_unlock(tp);
6229         if (err)
6230                 goto out;
6231
6232         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6233                 write_op(tp, cpu_scratch_base + i, 0);
6234         tw32(cpu_base + CPU_STATE, 0xffffffff);
6235         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6236         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6237                 write_op(tp, (cpu_scratch_base +
6238                               (info->text_base & 0xffff) +
6239                               (i * sizeof(u32))),
6240                          (info->text_data ?
6241                           info->text_data[i] : 0));
6242         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6243                 write_op(tp, (cpu_scratch_base +
6244                               (info->rodata_base & 0xffff) +
6245                               (i * sizeof(u32))),
6246                          (info->rodata_data ?
6247                           info->rodata_data[i] : 0));
6248         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6249                 write_op(tp, (cpu_scratch_base +
6250                               (info->data_base & 0xffff) +
6251                               (i * sizeof(u32))),
6252                          (info->data_data ?
6253                           info->data_data[i] : 0));
6254
6255         err = 0;
6256
6257 out:
6258         return err;
6259 }
6260
6261 /* tp->lock is held. */
6262 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6263 {
6264         struct fw_info info;
6265         int err, i;
6266
6267         info.text_base = TG3_FW_TEXT_ADDR;
6268         info.text_len = TG3_FW_TEXT_LEN;
6269         info.text_data = &tg3FwText[0];
6270         info.rodata_base = TG3_FW_RODATA_ADDR;
6271         info.rodata_len = TG3_FW_RODATA_LEN;
6272         info.rodata_data = &tg3FwRodata[0];
6273         info.data_base = TG3_FW_DATA_ADDR;
6274         info.data_len = TG3_FW_DATA_LEN;
6275         info.data_data = NULL;
6276
6277         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6278                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6279                                     &info);
6280         if (err)
6281                 return err;
6282
6283         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6284                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6285                                     &info);
6286         if (err)
6287                 return err;
6288
6289         /* Now startup only the RX cpu. */
6290         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6291         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6292
6293         for (i = 0; i < 5; i++) {
6294                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6295                         break;
6296                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6297                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
6298                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6299                 udelay(1000);
6300         }
6301         if (i >= 5) {
6302                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6303                        "to set RX CPU PC, is %08x should be %08x\n",
6304                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6305                        TG3_FW_TEXT_ADDR);
6306                 return -ENODEV;
6307         }
6308         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6309         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
6310
6311         return 0;
6312 }
6313
6314
6315 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
6316 #define TG3_TSO_FW_RELASE_MINOR         0x6
6317 #define TG3_TSO_FW_RELEASE_FIX          0x0
6318 #define TG3_TSO_FW_START_ADDR           0x08000000
6319 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
6320 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
6321 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
6322 #define TG3_TSO_FW_RODATA_LEN           0x60
6323 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
6324 #define TG3_TSO_FW_DATA_LEN             0x30
6325 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
6326 #define TG3_TSO_FW_SBSS_LEN             0x2c
6327 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
6328 #define TG3_TSO_FW_BSS_LEN              0x894
6329
6330 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6331         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6332         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6333         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6334         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6335         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6336         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6337         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6338         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6339         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6340         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6341         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6342         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6343         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6344         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6345         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6346         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6347         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6348         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6349         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6350         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6351         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6352         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6353         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6354         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6355         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6356         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6357         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6358         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6359         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6360         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6361         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6362         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6363         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6364         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6365         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6366         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6367         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6368         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6369         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6370         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6371         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6372         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6373         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6374         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6375         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6376         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6377         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6378         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6379         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6380         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6381         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6382         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6383         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6384         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6385         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6386         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6387         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6388         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6389         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6390         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6391         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6392         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6393         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6394         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6395         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6396         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6397         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6398         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6399         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6400         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6401         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6402         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6403         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6404         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6405         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6406         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6407         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6408         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6409         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6410         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6411         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6412         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6413         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6414         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6415         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6416         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6417         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6418         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6419         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6420         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6421         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6422         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6423         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6424         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6425         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6426         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6427         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6428         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6429         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6430         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6431         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6432         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6433         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6434         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6435         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6436         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6437         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6438         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6439         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6440         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6441         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6442         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6443         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6444         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6445         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6446         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6447         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6448         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6449         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6450         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6451         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6452         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6453         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6454         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6455         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6456         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6457         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6458         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6459         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6460         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6461         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6462         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6463         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6464         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6465         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6466         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6467         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6468         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6469         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6470         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6471         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6472         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6473         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6474         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6475         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6476         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6477         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6478         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6479         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6480         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6481         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6482         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6483         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6484         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6485         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6486         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6487         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6488         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6489         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6490         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6491         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6492         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6493         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6494         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6495         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6496         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6497         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6498         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6499         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6500         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6501         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6502         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6503         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6504         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6505         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6506         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6507         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6508         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6509         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6510         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6511         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6512         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6513         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6514         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6515         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6516         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6517         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6518         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6519         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6520         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6521         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6522         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6523         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6524         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6525         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6526         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6527         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6528         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6529         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6530         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6531         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6532         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6533         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6534         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6535         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6536         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6537         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6538         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6539         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6540         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6541         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6542         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6543         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6544         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6545         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6546         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6547         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6548         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6549         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6550         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6551         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6552         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6553         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6554         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6555         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6556         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6557         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6558         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6559         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6560         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6561         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6562         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6563         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6564         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6565         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6566         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6567         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6568         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6569         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6570         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6571         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6572         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6573         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6574         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6575         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6576         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6577         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6578         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6579         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6580         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6581         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6582         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6583         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6584         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6585         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6586         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6587         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6588         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6589         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6590         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6591         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6592         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6593         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6594         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6595         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6596         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6597         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6598         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6599         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6600         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6601         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6602         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6603         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6604         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6605         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6606         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6607         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6608         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6609         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6610         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6611         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6612         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6613         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6614         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6615 };
6616
6617 static const u32 tg3TsoFwRodata[] = {
6618         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6619         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6620         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6621         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6622         0x00000000,
6623 };
6624
6625 static const u32 tg3TsoFwData[] = {
6626         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6627         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6628         0x00000000,
6629 };
6630
6631 /* 5705 needs a special version of the TSO firmware.  */
6632 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6633 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6634 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6635 #define TG3_TSO5_FW_START_ADDR          0x00010000
6636 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6637 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6638 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6639 #define TG3_TSO5_FW_RODATA_LEN          0x50
6640 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6641 #define TG3_TSO5_FW_DATA_LEN            0x20
6642 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6643 #define TG3_TSO5_FW_SBSS_LEN            0x28
6644 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6645 #define TG3_TSO5_FW_BSS_LEN             0x88
6646
6647 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6648         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6649         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6650         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6651         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6652         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6653         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6654         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6655         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6656         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6657         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6658         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6659         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6660         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6661         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6662         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6663         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6664         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6665         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6666         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6667         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6668         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6669         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6670         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6671         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6672         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6673         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6674         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6675         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6676         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6677         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6678         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6679         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6680         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6681         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6682         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6683         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6684         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6685         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6686         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6687         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6688         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6689         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6690         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6691         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6692         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6693         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6694         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6695         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6696         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6697         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6698         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6699         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6700         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6701         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6702         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6703         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6704         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6705         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6706         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6707         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6708         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6709         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6710         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6711         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6712         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6713         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6714         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6715         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6716         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6717         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6718         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6719         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6720         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6721         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6722         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6723         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6724         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6725         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6726         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6727         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6728         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6729         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6730         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6731         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6732         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6733         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6734         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6735         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6736         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6737         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6738         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6739         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6740         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6741         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6742         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6743         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6744         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6745         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6746         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6747         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6748         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6749         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6750         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6751         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6752         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6753         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6754         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6755         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6756         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6757         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6758         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6759         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6760         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6761         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6762         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6763         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6764         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6765         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6766         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6767         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6768         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6769         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6770         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6771         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6772         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6773         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6774         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6775         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6776         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6777         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6778         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6779         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6780         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6781         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6782         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6783         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6784         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6785         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6786         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6787         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6788         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6789         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6790         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6791         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6792         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6793         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6794         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6795         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6796         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6797         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6798         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6799         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6800         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6801         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6802         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6803         0x00000000, 0x00000000, 0x00000000,
6804 };
6805
6806 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6807         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6808         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6809         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6810         0x00000000, 0x00000000, 0x00000000,
6811 };
6812
6813 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6814         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6815         0x00000000, 0x00000000, 0x00000000,
6816 };
6817
6818 /* tp->lock is held. */
6819 static int tg3_load_tso_firmware(struct tg3 *tp)
6820 {
6821         struct fw_info info;
6822         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6823         int err, i;
6824
6825         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6826                 return 0;
6827
6828         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6829                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6830                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6831                 info.text_data = &tg3Tso5FwText[0];
6832                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6833                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6834                 info.rodata_data = &tg3Tso5FwRodata[0];
6835                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6836                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6837                 info.data_data = &tg3Tso5FwData[0];
6838                 cpu_base = RX_CPU_BASE;
6839                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6840                 cpu_scratch_size = (info.text_len +
6841                                     info.rodata_len +
6842                                     info.data_len +
6843                                     TG3_TSO5_FW_SBSS_LEN +
6844                                     TG3_TSO5_FW_BSS_LEN);
6845         } else {
6846                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6847                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6848                 info.text_data = &tg3TsoFwText[0];
6849                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6850                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6851                 info.rodata_data = &tg3TsoFwRodata[0];
6852                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6853                 info.data_len = TG3_TSO_FW_DATA_LEN;
6854                 info.data_data = &tg3TsoFwData[0];
6855                 cpu_base = TX_CPU_BASE;
6856                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6857                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6858         }
6859
6860         err = tg3_load_firmware_cpu(tp, cpu_base,
6861                                     cpu_scratch_base, cpu_scratch_size,
6862                                     &info);
6863         if (err)
6864                 return err;
6865
6866         /* Now startup the cpu. */
6867         tw32(cpu_base + CPU_STATE, 0xffffffff);
6868         tw32_f(cpu_base + CPU_PC,    info.text_base);
6869
6870         for (i = 0; i < 5; i++) {
6871                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6872                         break;
6873                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6874                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6875                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6876                 udelay(1000);
6877         }
6878         if (i >= 5) {
6879                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6880                        "to set CPU PC, is %08x should be %08x\n",
6881                        tp->dev->name, tr32(cpu_base + CPU_PC),
6882                        info.text_base);
6883                 return -ENODEV;
6884         }
6885         tw32(cpu_base + CPU_STATE, 0xffffffff);
6886         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6887         return 0;
6888 }
6889
6890
6891 /* tp->lock is held. */
6892 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6893 {
6894         u32 addr_high, addr_low;
6895         int i;
6896
6897         addr_high = ((tp->dev->dev_addr[0] << 8) |
6898                      tp->dev->dev_addr[1]);
6899         addr_low = ((tp->dev->dev_addr[2] << 24) |
6900                     (tp->dev->dev_addr[3] << 16) |
6901                     (tp->dev->dev_addr[4] <<  8) |
6902                     (tp->dev->dev_addr[5] <<  0));
6903         for (i = 0; i < 4; i++) {
6904                 if (i == 1 && skip_mac_1)
6905                         continue;
6906                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6907                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6908         }
6909
6910         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6911             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6912                 for (i = 0; i < 12; i++) {
6913                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6914                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6915                 }
6916         }
6917
6918         addr_high = (tp->dev->dev_addr[0] +
6919                      tp->dev->dev_addr[1] +
6920                      tp->dev->dev_addr[2] +
6921                      tp->dev->dev_addr[3] +
6922                      tp->dev->dev_addr[4] +
6923                      tp->dev->dev_addr[5]) &
6924                 TX_BACKOFF_SEED_MASK;
6925         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6926 }
6927
6928 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6929 {
6930         struct tg3 *tp = netdev_priv(dev);
6931         struct sockaddr *addr = p;
6932         int err = 0, skip_mac_1 = 0;
6933
6934         if (!is_valid_ether_addr(addr->sa_data))
6935                 return -EINVAL;
6936
6937         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6938
6939         if (!netif_running(dev))
6940                 return 0;
6941
6942         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6943                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6944
6945                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6946                 addr0_low = tr32(MAC_ADDR_0_LOW);
6947                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6948                 addr1_low = tr32(MAC_ADDR_1_LOW);
6949
6950                 /* Skip MAC addr 1 if ASF is using it. */
6951                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6952                     !(addr1_high == 0 && addr1_low == 0))
6953                         skip_mac_1 = 1;
6954         }
6955         spin_lock_bh(&tp->lock);
6956         __tg3_set_mac_addr(tp, skip_mac_1);
6957         spin_unlock_bh(&tp->lock);
6958
6959         return err;
6960 }
6961
6962 /* tp->lock is held. */
6963 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6964                            dma_addr_t mapping, u32 maxlen_flags,
6965                            u32 nic_addr)
6966 {
6967         tg3_write_mem(tp,
6968                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6969                       ((u64) mapping >> 32));
6970         tg3_write_mem(tp,
6971                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6972                       ((u64) mapping & 0xffffffff));
6973         tg3_write_mem(tp,
6974                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6975                        maxlen_flags);
6976
6977         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6978                 tg3_write_mem(tp,
6979                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6980                               nic_addr);
6981 }
6982
6983 static void __tg3_set_rx_mode(struct net_device *);
6984 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6985 {
6986         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6987         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6988         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6989         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6990         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6991                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6992                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6993         }
6994         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6995         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6996         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6997                 u32 val = ec->stats_block_coalesce_usecs;
6998
6999                 if (!netif_carrier_ok(tp->dev))
7000                         val = 0;
7001
7002                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7003         }
7004 }
7005
7006 /* tp->lock is held. */
7007 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7008 {
7009         u32 val, rdmac_mode;
7010         int i, err, limit;
7011
7012         tg3_disable_ints(tp);
7013
7014         tg3_stop_fw(tp);
7015
7016         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7017
7018         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7019                 tg3_abort_hw(tp, 1);
7020         }
7021
7022         if (reset_phy &&
7023             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7024                 tg3_phy_reset(tp);
7025
7026         err = tg3_chip_reset(tp);
7027         if (err)
7028                 return err;
7029
7030         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7031
7032         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
7033             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
7034                 val = tr32(TG3_CPMU_CTRL);
7035                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7036                 tw32(TG3_CPMU_CTRL, val);
7037
7038                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7039                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7040                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7041                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7042
7043                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7044                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7045                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7046                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7047
7048                 val = tr32(TG3_CPMU_HST_ACC);
7049                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7050                 val |= CPMU_HST_ACC_MACCLK_6_25;
7051                 tw32(TG3_CPMU_HST_ACC, val);
7052         }
7053
7054         /* This works around an issue with Athlon chipsets on
7055          * B3 tigon3 silicon.  This bit has no effect on any
7056          * other revision.  But do not set this on PCI Express
7057          * chips and don't even touch the clocks if the CPMU is present.
7058          */
7059         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7060                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7061                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7062                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7063         }
7064
7065         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7066             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7067                 val = tr32(TG3PCI_PCISTATE);
7068                 val |= PCISTATE_RETRY_SAME_DMA;
7069                 tw32(TG3PCI_PCISTATE, val);
7070         }
7071
7072         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7073                 /* Allow reads and writes to the
7074                  * APE register and memory space.
7075                  */
7076                 val = tr32(TG3PCI_PCISTATE);
7077                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7078                        PCISTATE_ALLOW_APE_SHMEM_WR;
7079                 tw32(TG3PCI_PCISTATE, val);
7080         }
7081
7082         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7083                 /* Enable some hw fixes.  */
7084                 val = tr32(TG3PCI_MSI_DATA);
7085                 val |= (1 << 26) | (1 << 28) | (1 << 29);
7086                 tw32(TG3PCI_MSI_DATA, val);
7087         }
7088
7089         /* Descriptor ring init may make accesses to the
7090          * NIC SRAM area to setup the TX descriptors, so we
7091          * can only do this after the hardware has been
7092          * successfully reset.
7093          */
7094         err = tg3_init_rings(tp);
7095         if (err)
7096                 return err;
7097
7098         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7099             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7100             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7101                 /* This value is determined during the probe time DMA
7102                  * engine test, tg3_test_dma.
7103                  */
7104                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7105         }
7106
7107         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7108                           GRC_MODE_4X_NIC_SEND_RINGS |
7109                           GRC_MODE_NO_TX_PHDR_CSUM |
7110                           GRC_MODE_NO_RX_PHDR_CSUM);
7111         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7112
7113         /* Pseudo-header checksum is done by hardware logic and not
7114          * the offload processers, so make the chip do the pseudo-
7115          * header checksums on receive.  For transmit it is more
7116          * convenient to do the pseudo-header checksum in software
7117          * as Linux does that on transmit for us in all cases.
7118          */
7119         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7120
7121         tw32(GRC_MODE,
7122              tp->grc_mode |
7123              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7124
7125         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
7126         val = tr32(GRC_MISC_CFG);
7127         val &= ~0xff;
7128         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7129         tw32(GRC_MISC_CFG, val);
7130
7131         /* Initialize MBUF/DESC pool. */
7132         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7133                 /* Do nothing.  */
7134         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7135                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7136                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7137                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7138                 else
7139                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7140                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7141                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7142         }
7143         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7144                 int fw_len;
7145
7146                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7147                           TG3_TSO5_FW_RODATA_LEN +
7148                           TG3_TSO5_FW_DATA_LEN +
7149                           TG3_TSO5_FW_SBSS_LEN +
7150                           TG3_TSO5_FW_BSS_LEN);
7151                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7152                 tw32(BUFMGR_MB_POOL_ADDR,
7153                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7154                 tw32(BUFMGR_MB_POOL_SIZE,
7155                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7156         }
7157
7158         if (tp->dev->mtu <= ETH_DATA_LEN) {
7159                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7160                      tp->bufmgr_config.mbuf_read_dma_low_water);
7161                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7162                      tp->bufmgr_config.mbuf_mac_rx_low_water);
7163                 tw32(BUFMGR_MB_HIGH_WATER,
7164                      tp->bufmgr_config.mbuf_high_water);
7165         } else {
7166                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7167                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7168                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7169                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7170                 tw32(BUFMGR_MB_HIGH_WATER,
7171                      tp->bufmgr_config.mbuf_high_water_jumbo);
7172         }
7173         tw32(BUFMGR_DMA_LOW_WATER,
7174              tp->bufmgr_config.dma_low_water);
7175         tw32(BUFMGR_DMA_HIGH_WATER,
7176              tp->bufmgr_config.dma_high_water);
7177
7178         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7179         for (i = 0; i < 2000; i++) {
7180                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7181                         break;
7182                 udelay(10);
7183         }
7184         if (i >= 2000) {
7185                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7186                        tp->dev->name);
7187                 return -ENODEV;
7188         }
7189
7190         /* Setup replenish threshold. */
7191         val = tp->rx_pending / 8;
7192         if (val == 0)
7193                 val = 1;
7194         else if (val > tp->rx_std_max_post)
7195                 val = tp->rx_std_max_post;
7196         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7197                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7198                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7199
7200                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7201                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7202         }
7203
7204         tw32(RCVBDI_STD_THRESH, val);
7205
7206         /* Initialize TG3_BDINFO's at:
7207          *  RCVDBDI_STD_BD:     standard eth size rx ring
7208          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
7209          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
7210          *
7211          * like so:
7212          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
7213          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
7214          *                              ring attribute flags
7215          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
7216          *
7217          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7218          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7219          *
7220          * The size of each ring is fixed in the firmware, but the location is
7221          * configurable.
7222          */
7223         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7224              ((u64) tp->rx_std_mapping >> 32));
7225         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7226              ((u64) tp->rx_std_mapping & 0xffffffff));
7227         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7228              NIC_SRAM_RX_BUFFER_DESC);
7229
7230         /* Don't even try to program the JUMBO/MINI buffer descriptor
7231          * configs on 5705.
7232          */
7233         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7234                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7235                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7236         } else {
7237                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7238                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7239
7240                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7241                      BDINFO_FLAGS_DISABLED);
7242
7243                 /* Setup replenish threshold. */
7244                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7245
7246                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7247                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7248                              ((u64) tp->rx_jumbo_mapping >> 32));
7249                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7250                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7251                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7252                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7253                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7254                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7255                 } else {
7256                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7257                              BDINFO_FLAGS_DISABLED);
7258                 }
7259
7260         }
7261
7262         /* There is only one send ring on 5705/5750, no need to explicitly
7263          * disable the others.
7264          */
7265         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7266                 /* Clear out send RCB ring in SRAM. */
7267                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7268                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7269                                       BDINFO_FLAGS_DISABLED);
7270         }
7271
7272         tp->tx_prod = 0;
7273         tp->tx_cons = 0;
7274         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7275         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7276
7277         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7278                        tp->tx_desc_mapping,
7279                        (TG3_TX_RING_SIZE <<
7280                         BDINFO_FLAGS_MAXLEN_SHIFT),
7281                        NIC_SRAM_TX_BUFFER_DESC);
7282
7283         /* There is only one receive return ring on 5705/5750, no need
7284          * to explicitly disable the others.
7285          */
7286         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7287                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7288                      i += TG3_BDINFO_SIZE) {
7289                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7290                                       BDINFO_FLAGS_DISABLED);
7291                 }
7292         }
7293
7294         tp->rx_rcb_ptr = 0;
7295         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7296
7297         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7298                        tp->rx_rcb_mapping,
7299                        (TG3_RX_RCB_RING_SIZE(tp) <<
7300                         BDINFO_FLAGS_MAXLEN_SHIFT),
7301                        0);
7302
7303         tp->rx_std_ptr = tp->rx_pending;
7304         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7305                      tp->rx_std_ptr);
7306
7307         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7308                                                 tp->rx_jumbo_pending : 0;
7309         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7310                      tp->rx_jumbo_ptr);
7311
7312         /* Initialize MAC address and backoff seed. */
7313         __tg3_set_mac_addr(tp, 0);
7314
7315         /* MTU + ethernet header + FCS + optional VLAN tag */
7316         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7317
7318         /* The slot time is changed by tg3_setup_phy if we
7319          * run at gigabit with half duplex.
7320          */
7321         tw32(MAC_TX_LENGTHS,
7322              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7323              (6 << TX_LENGTHS_IPG_SHIFT) |
7324              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7325
7326         /* Receive rules. */
7327         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7328         tw32(RCVLPC_CONFIG, 0x0181);
7329
7330         /* Calculate RDMAC_MODE setting early, we need it to determine
7331          * the RCVLPC_STATE_ENABLE mask.
7332          */
7333         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7334                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7335                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7336                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7337                       RDMAC_MODE_LNGREAD_ENAB);
7338
7339         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7340             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7341                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7342                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7343                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7344
7345         /* If statement applies to 5705 and 5750 PCI devices only */
7346         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7347              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7348             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7349                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7350                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7351                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7352                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7353                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7354                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7355                 }
7356         }
7357
7358         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7359                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7360
7361         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7362                 rdmac_mode |= (1 << 27);
7363
7364         /* Receive/send statistics. */
7365         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7366                 val = tr32(RCVLPC_STATS_ENABLE);
7367                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7368                 tw32(RCVLPC_STATS_ENABLE, val);
7369         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7370                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7371                 val = tr32(RCVLPC_STATS_ENABLE);
7372                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7373                 tw32(RCVLPC_STATS_ENABLE, val);
7374         } else {
7375                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7376         }
7377         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7378         tw32(SNDDATAI_STATSENAB, 0xffffff);
7379         tw32(SNDDATAI_STATSCTRL,
7380              (SNDDATAI_SCTRL_ENABLE |
7381               SNDDATAI_SCTRL_FASTUPD));
7382
7383         /* Setup host coalescing engine. */
7384         tw32(HOSTCC_MODE, 0);
7385         for (i = 0; i < 2000; i++) {
7386                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7387                         break;
7388                 udelay(10);
7389         }
7390
7391         __tg3_set_coalesce(tp, &tp->coal);
7392
7393         /* set status block DMA address */
7394         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7395              ((u64) tp->status_mapping >> 32));
7396         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7397              ((u64) tp->status_mapping & 0xffffffff));
7398
7399         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7400                 /* Status/statistics block address.  See tg3_timer,
7401                  * the tg3_periodic_fetch_stats call there, and
7402                  * tg3_get_stats to see how this works for 5705/5750 chips.
7403                  */
7404                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7405                      ((u64) tp->stats_mapping >> 32));
7406                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7407                      ((u64) tp->stats_mapping & 0xffffffff));
7408                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7409                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7410         }
7411
7412         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7413
7414         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7415         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7416         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7417                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7418
7419         /* Clear statistics/status block in chip, and status block in ram. */
7420         for (i = NIC_SRAM_STATS_BLK;
7421              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7422              i += sizeof(u32)) {
7423                 tg3_write_mem(tp, i, 0);
7424                 udelay(40);
7425         }
7426         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7427
7428         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7429                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7430                 /* reset to prevent losing 1st rx packet intermittently */
7431                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7432                 udelay(10);
7433         }
7434
7435         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7436                 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7437         else
7438                 tp->mac_mode = 0;
7439         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7440                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7441         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7442             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7443             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7444                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7445         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7446         udelay(40);
7447
7448         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7449          * If TG3_FLG2_IS_NIC is zero, we should read the
7450          * register to preserve the GPIO settings for LOMs. The GPIOs,
7451          * whether used as inputs or outputs, are set by boot code after
7452          * reset.
7453          */
7454         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7455                 u32 gpio_mask;
7456
7457                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7458                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7459                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7460
7461                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7462                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7463                                      GRC_LCLCTRL_GPIO_OUTPUT3;
7464
7465                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7466                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7467
7468                 tp->grc_local_ctrl &= ~gpio_mask;
7469                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7470
7471                 /* GPIO1 must be driven high for eeprom write protect */
7472                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7473                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7474                                                GRC_LCLCTRL_GPIO_OUTPUT1);
7475         }
7476         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7477         udelay(100);
7478
7479         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7480         tp->last_tag = 0;
7481
7482         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7483                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7484                 udelay(40);
7485         }
7486
7487         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7488                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7489                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7490                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7491                WDMAC_MODE_LNGREAD_ENAB);
7492
7493         /* If statement applies to 5705 and 5750 PCI devices only */
7494         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7495              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7496             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7497                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7498                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7499                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7500                         /* nothing */
7501                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7502                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7503                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7504                         val |= WDMAC_MODE_RX_ACCEL;
7505                 }
7506         }
7507
7508         /* Enable host coalescing bug fix */
7509         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7510             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7511             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7512             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7513             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7514                 val |= WDMAC_MODE_STATUS_TAG_FIX;
7515
7516         tw32_f(WDMAC_MODE, val);
7517         udelay(40);
7518
7519         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7520                 u16 pcix_cmd;
7521
7522                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7523                                      &pcix_cmd);
7524                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7525                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7526                         pcix_cmd |= PCI_X_CMD_READ_2K;
7527                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7528                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7529                         pcix_cmd |= PCI_X_CMD_READ_2K;
7530                 }
7531                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7532                                       pcix_cmd);
7533         }
7534
7535         tw32_f(RDMAC_MODE, rdmac_mode);
7536         udelay(40);
7537
7538         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7539         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7540                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7541
7542         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7543                 tw32(SNDDATAC_MODE,
7544                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7545         else
7546                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7547
7548         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7549         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7550         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7551         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7552         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7553                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7554         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7555         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7556
7557         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7558                 err = tg3_load_5701_a0_firmware_fix(tp);
7559                 if (err)
7560                         return err;
7561         }
7562
7563         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7564                 err = tg3_load_tso_firmware(tp);
7565                 if (err)
7566                         return err;
7567         }
7568
7569         tp->tx_mode = TX_MODE_ENABLE;
7570         tw32_f(MAC_TX_MODE, tp->tx_mode);
7571         udelay(100);
7572
7573         tp->rx_mode = RX_MODE_ENABLE;
7574         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7575             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7576             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7577             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7578                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7579
7580         tw32_f(MAC_RX_MODE, tp->rx_mode);
7581         udelay(10);
7582
7583         tw32(MAC_LED_CTRL, tp->led_ctrl);
7584
7585         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7586         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7587                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7588                 udelay(10);
7589         }
7590         tw32_f(MAC_RX_MODE, tp->rx_mode);
7591         udelay(10);
7592
7593         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7594                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7595                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7596                         /* Set drive transmission level to 1.2V  */
7597                         /* only if the signal pre-emphasis bit is not set  */
7598                         val = tr32(MAC_SERDES_CFG);
7599                         val &= 0xfffff000;
7600                         val |= 0x880;
7601                         tw32(MAC_SERDES_CFG, val);
7602                 }
7603                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7604                         tw32(MAC_SERDES_CFG, 0x616000);
7605         }
7606
7607         /* Prevent chip from dropping frames when flow control
7608          * is enabled.
7609          */
7610         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7611
7612         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7613             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7614                 /* Use hardware link auto-negotiation */
7615                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7616         }
7617
7618         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7619             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7620                 u32 tmp;
7621
7622                 tmp = tr32(SERDES_RX_CTRL);
7623                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7624                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7625                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7626                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7627         }
7628
7629         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7630                 if (tp->link_config.phy_is_low_power) {
7631                         tp->link_config.phy_is_low_power = 0;
7632                         tp->link_config.speed = tp->link_config.orig_speed;
7633                         tp->link_config.duplex = tp->link_config.orig_duplex;
7634                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
7635                 }
7636
7637                 err = tg3_setup_phy(tp, 0);
7638                 if (err)
7639                         return err;
7640
7641                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7642                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7643                         u32 tmp;
7644
7645                         /* Clear CRC stats. */
7646                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7647                                 tg3_writephy(tp, MII_TG3_TEST1,
7648                                              tmp | MII_TG3_TEST1_CRC_EN);
7649                                 tg3_readphy(tp, 0x14, &tmp);
7650                         }
7651                 }
7652         }
7653
7654         __tg3_set_rx_mode(tp->dev);
7655
7656         /* Initialize receive rules. */
7657         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7658         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7659         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7660         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7661
7662         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7663             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7664                 limit = 8;
7665         else
7666                 limit = 16;
7667         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7668                 limit -= 4;
7669         switch (limit) {
7670         case 16:
7671                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7672         case 15:
7673                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7674         case 14:
7675                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7676         case 13:
7677                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7678         case 12:
7679                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7680         case 11:
7681                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7682         case 10:
7683                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7684         case 9:
7685                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7686         case 8:
7687                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7688         case 7:
7689                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7690         case 6:
7691                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7692         case 5:
7693                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7694         case 4:
7695                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7696         case 3:
7697                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7698         case 2:
7699         case 1:
7700
7701         default:
7702                 break;
7703         }
7704
7705         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7706                 /* Write our heartbeat update interval to APE. */
7707                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7708                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7709
7710         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7711
7712         return 0;
7713 }
7714
7715 /* Called at device open time to get the chip ready for
7716  * packet processing.  Invoked with tp->lock held.
7717  */
7718 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7719 {
7720         tg3_switch_clocks(tp);
7721
7722         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7723
7724         return tg3_reset_hw(tp, reset_phy);
7725 }
7726
7727 #define TG3_STAT_ADD32(PSTAT, REG) \
7728 do {    u32 __val = tr32(REG); \
7729         (PSTAT)->low += __val; \
7730         if ((PSTAT)->low < __val) \
7731                 (PSTAT)->high += 1; \
7732 } while (0)
7733
7734 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7735 {
7736         struct tg3_hw_stats *sp = tp->hw_stats;
7737
7738         if (!netif_carrier_ok(tp->dev))
7739                 return;
7740
7741         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7742         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7743         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7744         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7745         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7746         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7747         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7748         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7749         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7750         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7751         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7752         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7753         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7754
7755         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7756         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7757         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7758         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7759         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7760         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7761         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7762         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7763         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7764         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7765         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7766         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7767         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7768         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7769
7770         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7771         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7772         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7773 }
7774
7775 static void tg3_timer(unsigned long __opaque)
7776 {
7777         struct tg3 *tp = (struct tg3 *) __opaque;
7778
7779         if (tp->irq_sync)
7780                 goto restart_timer;
7781
7782         spin_lock(&tp->lock);
7783
7784         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7785                 /* All of this garbage is because when using non-tagged
7786                  * IRQ status the mailbox/status_block protocol the chip
7787                  * uses with the cpu is race prone.
7788                  */
7789                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7790                         tw32(GRC_LOCAL_CTRL,
7791                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7792                 } else {
7793                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7794                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7795                 }
7796
7797                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7798                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7799                         spin_unlock(&tp->lock);
7800                         schedule_work(&tp->reset_task);
7801                         return;
7802                 }
7803         }
7804
7805         /* This part only runs once per second. */
7806         if (!--tp->timer_counter) {
7807                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7808                         tg3_periodic_fetch_stats(tp);
7809
7810                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7811                         u32 mac_stat;
7812                         int phy_event;
7813
7814                         mac_stat = tr32(MAC_STATUS);
7815
7816                         phy_event = 0;
7817                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7818                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7819                                         phy_event = 1;
7820                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7821                                 phy_event = 1;
7822
7823                         if (phy_event)
7824                                 tg3_setup_phy(tp, 0);
7825                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7826                         u32 mac_stat = tr32(MAC_STATUS);
7827                         int need_setup = 0;
7828
7829                         if (netif_carrier_ok(tp->dev) &&
7830                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7831                                 need_setup = 1;
7832                         }
7833                         if (! netif_carrier_ok(tp->dev) &&
7834                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7835                                          MAC_STATUS_SIGNAL_DET))) {
7836                                 need_setup = 1;
7837                         }
7838                         if (need_setup) {
7839                                 if (!tp->serdes_counter) {
7840                                         tw32_f(MAC_MODE,
7841                                              (tp->mac_mode &
7842                                               ~MAC_MODE_PORT_MODE_MASK));
7843                                         udelay(40);
7844                                         tw32_f(MAC_MODE, tp->mac_mode);
7845                                         udelay(40);
7846                                 }
7847                                 tg3_setup_phy(tp, 0);
7848                         }
7849                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7850                         tg3_serdes_parallel_detect(tp);
7851
7852                 tp->timer_counter = tp->timer_multiplier;
7853         }
7854
7855         /* Heartbeat is only sent once every 2 seconds.
7856          *
7857          * The heartbeat is to tell the ASF firmware that the host
7858          * driver is still alive.  In the event that the OS crashes,
7859          * ASF needs to reset the hardware to free up the FIFO space
7860          * that may be filled with rx packets destined for the host.
7861          * If the FIFO is full, ASF will no longer function properly.
7862          *
7863          * Unintended resets have been reported on real time kernels
7864          * where the timer doesn't run on time.  Netpoll will also have
7865          * same problem.
7866          *
7867          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7868          * to check the ring condition when the heartbeat is expiring
7869          * before doing the reset.  This will prevent most unintended
7870          * resets.
7871          */
7872         if (!--tp->asf_counter) {
7873                 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7874                     !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7875                         tg3_wait_for_event_ack(tp);
7876
7877                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7878                                       FWCMD_NICDRV_ALIVE3);
7879                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7880                         /* 5 seconds timeout */
7881                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7882
7883                         tg3_generate_fw_event(tp);
7884                 }
7885                 tp->asf_counter = tp->asf_multiplier;
7886         }
7887
7888         spin_unlock(&tp->lock);
7889
7890 restart_timer:
7891         tp->timer.expires = jiffies + tp->timer_offset;
7892         add_timer(&tp->timer);
7893 }
7894
7895 static int tg3_request_irq(struct tg3 *tp)
7896 {
7897         irq_handler_t fn;
7898         unsigned long flags;
7899         struct net_device *dev = tp->dev;
7900
7901         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7902                 fn = tg3_msi;
7903                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7904                         fn = tg3_msi_1shot;
7905                 flags = IRQF_SAMPLE_RANDOM;
7906         } else {
7907                 fn = tg3_interrupt;
7908                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7909                         fn = tg3_interrupt_tagged;
7910                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7911         }
7912         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7913 }
7914
7915 static int tg3_test_interrupt(struct tg3 *tp)
7916 {
7917         struct net_device *dev = tp->dev;
7918         int err, i, intr_ok = 0;
7919
7920         if (!netif_running(dev))
7921                 return -ENODEV;
7922
7923         tg3_disable_ints(tp);
7924
7925         free_irq(tp->pdev->irq, dev);
7926
7927         err = request_irq(tp->pdev->irq, tg3_test_isr,
7928                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7929         if (err)
7930                 return err;
7931
7932         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7933         tg3_enable_ints(tp);
7934
7935         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7936                HOSTCC_MODE_NOW);
7937
7938         for (i = 0; i < 5; i++) {
7939                 u32 int_mbox, misc_host_ctrl;
7940
7941                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7942                                         TG3_64BIT_REG_LOW);
7943                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7944
7945                 if ((int_mbox != 0) ||
7946                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7947                         intr_ok = 1;
7948                         break;
7949                 }
7950
7951                 msleep(10);
7952         }
7953
7954         tg3_disable_ints(tp);
7955
7956         free_irq(tp->pdev->irq, dev);
7957
7958         err = tg3_request_irq(tp);
7959
7960         if (err)
7961                 return err;
7962
7963         if (intr_ok)
7964                 return 0;
7965
7966         return -EIO;
7967 }
7968
7969 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7970  * successfully restored
7971  */
7972 static int tg3_test_msi(struct tg3 *tp)
7973 {
7974         struct net_device *dev = tp->dev;
7975         int err;
7976         u16 pci_cmd;
7977
7978         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7979                 return 0;
7980
7981         /* Turn off SERR reporting in case MSI terminates with Master
7982          * Abort.
7983          */
7984         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7985         pci_write_config_word(tp->pdev, PCI_COMMAND,
7986                               pci_cmd & ~PCI_COMMAND_SERR);
7987
7988         err = tg3_test_interrupt(tp);
7989
7990         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7991
7992         if (!err)
7993                 return 0;
7994
7995         /* other failures */
7996         if (err != -EIO)
7997                 return err;
7998
7999         /* MSI test failed, go back to INTx mode */
8000         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8001                "switching to INTx mode. Please report this failure to "
8002                "the PCI maintainer and include system chipset information.\n",
8003                        tp->dev->name);
8004
8005         free_irq(tp->pdev->irq, dev);
8006         pci_disable_msi(tp->pdev);
8007
8008         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8009
8010         err = tg3_request_irq(tp);
8011         if (err)
8012                 return err;
8013
8014         /* Need to reset the chip because the MSI cycle may have terminated
8015          * with Master Abort.
8016          */
8017         tg3_full_lock(tp, 1);
8018
8019         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8020         err = tg3_init_hw(tp, 1);
8021
8022         tg3_full_unlock(tp);
8023
8024         if (err)
8025                 free_irq(tp->pdev->irq, dev);
8026
8027         return err;
8028 }
8029
8030 static int tg3_open(struct net_device *dev)
8031 {
8032         struct tg3 *tp = netdev_priv(dev);
8033         int err;
8034
8035         netif_carrier_off(tp->dev);
8036
8037         err = tg3_set_power_state(tp, PCI_D0);
8038         if (err)
8039                 return err;
8040
8041         tg3_full_lock(tp, 0);
8042
8043         tg3_disable_ints(tp);
8044         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8045
8046         tg3_full_unlock(tp);
8047
8048         /* The placement of this call is tied
8049          * to the setup and use of Host TX descriptors.
8050          */
8051         err = tg3_alloc_consistent(tp);
8052         if (err)
8053                 return err;
8054
8055         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
8056                 /* All MSI supporting chips should support tagged
8057                  * status.  Assert that this is the case.
8058                  */
8059                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8060                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8061                                "Not using MSI.\n", tp->dev->name);
8062                 } else if (pci_enable_msi(tp->pdev) == 0) {
8063                         u32 msi_mode;
8064
8065                         msi_mode = tr32(MSGINT_MODE);
8066                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8067                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8068                 }
8069         }
8070         err = tg3_request_irq(tp);
8071
8072         if (err) {
8073                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8074                         pci_disable_msi(tp->pdev);
8075                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8076                 }
8077                 tg3_free_consistent(tp);
8078                 return err;
8079         }
8080
8081         napi_enable(&tp->napi);
8082
8083         tg3_full_lock(tp, 0);
8084
8085         err = tg3_init_hw(tp, 1);
8086         if (err) {
8087                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8088                 tg3_free_rings(tp);
8089         } else {
8090                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8091                         tp->timer_offset = HZ;
8092                 else
8093                         tp->timer_offset = HZ / 10;
8094
8095                 BUG_ON(tp->timer_offset > HZ);
8096                 tp->timer_counter = tp->timer_multiplier =
8097                         (HZ / tp->timer_offset);
8098                 tp->asf_counter = tp->asf_multiplier =
8099                         ((HZ / tp->timer_offset) * 2);
8100
8101                 init_timer(&tp->timer);
8102                 tp->timer.expires = jiffies + tp->timer_offset;
8103                 tp->timer.data = (unsigned long) tp;
8104                 tp->timer.function = tg3_timer;
8105         }
8106
8107         tg3_full_unlock(tp);
8108
8109         if (err) {
8110                 napi_disable(&tp->napi);
8111                 free_irq(tp->pdev->irq, dev);
8112                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8113                         pci_disable_msi(tp->pdev);
8114                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8115                 }
8116                 tg3_free_consistent(tp);
8117                 return err;
8118         }
8119
8120         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8121                 err = tg3_test_msi(tp);
8122
8123                 if (err) {
8124                         tg3_full_lock(tp, 0);
8125
8126                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8127                                 pci_disable_msi(tp->pdev);
8128                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8129                         }
8130                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8131                         tg3_free_rings(tp);
8132                         tg3_free_consistent(tp);
8133
8134                         tg3_full_unlock(tp);
8135
8136                         napi_disable(&tp->napi);
8137
8138                         return err;
8139                 }
8140
8141                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8142                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8143                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
8144
8145                                 tw32(PCIE_TRANSACTION_CFG,
8146                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
8147                         }
8148                 }
8149         }
8150
8151         tg3_phy_start(tp);
8152
8153         tg3_full_lock(tp, 0);
8154
8155         add_timer(&tp->timer);
8156         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8157         tg3_enable_ints(tp);
8158
8159         tg3_full_unlock(tp);
8160
8161         netif_start_queue(dev);
8162
8163         return 0;
8164 }
8165
8166 #if 0
8167 /*static*/ void tg3_dump_state(struct tg3 *tp)
8168 {
8169         u32 val32, val32_2, val32_3, val32_4, val32_5;
8170         u16 val16;
8171         int i;
8172
8173         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8174         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8175         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8176                val16, val32);
8177
8178         /* MAC block */
8179         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8180                tr32(MAC_MODE), tr32(MAC_STATUS));
8181         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8182                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8183         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8184                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8185         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8186                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8187
8188         /* Send data initiator control block */
8189         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8190                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8191         printk("       SNDDATAI_STATSCTRL[%08x]\n",
8192                tr32(SNDDATAI_STATSCTRL));
8193
8194         /* Send data completion control block */
8195         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8196
8197         /* Send BD ring selector block */
8198         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8199                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8200
8201         /* Send BD initiator control block */
8202         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8203                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8204
8205         /* Send BD completion control block */
8206         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8207
8208         /* Receive list placement control block */
8209         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8210                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8211         printk("       RCVLPC_STATSCTRL[%08x]\n",
8212                tr32(RCVLPC_STATSCTRL));
8213
8214         /* Receive data and receive BD initiator control block */
8215         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8216                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8217
8218         /* Receive data completion control block */
8219         printk("DEBUG: RCVDCC_MODE[%08x]\n",
8220                tr32(RCVDCC_MODE));
8221
8222         /* Receive BD initiator control block */
8223         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8224                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8225
8226         /* Receive BD completion control block */
8227         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8228                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8229
8230         /* Receive list selector control block */
8231         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8232                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8233
8234         /* Mbuf cluster free block */
8235         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8236                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8237
8238         /* Host coalescing control block */
8239         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8240                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8241         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8242                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8243                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8244         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8245                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8246                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8247         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8248                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8249         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8250                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8251
8252         /* Memory arbiter control block */
8253         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8254                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8255
8256         /* Buffer manager control block */
8257         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8258                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8259         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8260                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8261         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8262                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8263                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8264                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8265
8266         /* Read DMA control block */
8267         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8268                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8269
8270         /* Write DMA control block */
8271         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8272                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8273
8274         /* DMA completion block */
8275         printk("DEBUG: DMAC_MODE[%08x]\n",
8276                tr32(DMAC_MODE));
8277
8278         /* GRC block */
8279         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8280                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8281         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8282                tr32(GRC_LOCAL_CTRL));
8283
8284         /* TG3_BDINFOs */
8285         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8286                tr32(RCVDBDI_JUMBO_BD + 0x0),
8287                tr32(RCVDBDI_JUMBO_BD + 0x4),
8288                tr32(RCVDBDI_JUMBO_BD + 0x8),
8289                tr32(RCVDBDI_JUMBO_BD + 0xc));
8290         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8291                tr32(RCVDBDI_STD_BD + 0x0),
8292                tr32(RCVDBDI_STD_BD + 0x4),
8293                tr32(RCVDBDI_STD_BD + 0x8),
8294                tr32(RCVDBDI_STD_BD + 0xc));
8295         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8296                tr32(RCVDBDI_MINI_BD + 0x0),
8297                tr32(RCVDBDI_MINI_BD + 0x4),
8298                tr32(RCVDBDI_MINI_BD + 0x8),
8299                tr32(RCVDBDI_MINI_BD + 0xc));
8300
8301         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8302         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8303         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8304         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8305         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8306                val32, val32_2, val32_3, val32_4);
8307
8308         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8309         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8310         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8311         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8312         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8313                val32, val32_2, val32_3, val32_4);
8314
8315         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8316         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8317         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8318         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8319         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8320         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8321                val32, val32_2, val32_3, val32_4, val32_5);
8322
8323         /* SW status block */
8324         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8325                tp->hw_status->status,
8326                tp->hw_status->status_tag,
8327                tp->hw_status->rx_jumbo_consumer,
8328                tp->hw_status->rx_consumer,
8329                tp->hw_status->rx_mini_consumer,
8330                tp->hw_status->idx[0].rx_producer,
8331                tp->hw_status->idx[0].tx_consumer);
8332
8333         /* SW statistics block */
8334         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8335                ((u32 *)tp->hw_stats)[0],
8336                ((u32 *)tp->hw_stats)[1],
8337                ((u32 *)tp->hw_stats)[2],
8338                ((u32 *)tp->hw_stats)[3]);
8339
8340         /* Mailboxes */
8341         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8342                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8343                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8344                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8345                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8346
8347         /* NIC side send descriptors. */
8348         for (i = 0; i < 6; i++) {
8349                 unsigned long txd;
8350
8351                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8352                         + (i * sizeof(struct tg3_tx_buffer_desc));
8353                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8354                        i,
8355                        readl(txd + 0x0), readl(txd + 0x4),
8356                        readl(txd + 0x8), readl(txd + 0xc));
8357         }
8358
8359         /* NIC side RX descriptors. */
8360         for (i = 0; i < 6; i++) {
8361                 unsigned long rxd;
8362
8363                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8364                         + (i * sizeof(struct tg3_rx_buffer_desc));
8365                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8366                        i,
8367                        readl(rxd + 0x0), readl(rxd + 0x4),
8368                        readl(rxd + 0x8), readl(rxd + 0xc));
8369                 rxd += (4 * sizeof(u32));
8370                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8371                        i,
8372                        readl(rxd + 0x0), readl(rxd + 0x4),
8373                        readl(rxd + 0x8), readl(rxd + 0xc));
8374         }
8375
8376         for (i = 0; i < 6; i++) {
8377                 unsigned long rxd;
8378
8379                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8380                         + (i * sizeof(struct tg3_rx_buffer_desc));
8381                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8382                        i,
8383                        readl(rxd + 0x0), readl(rxd + 0x4),
8384                        readl(rxd + 0x8), readl(rxd + 0xc));
8385                 rxd += (4 * sizeof(u32));
8386                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8387                        i,
8388                        readl(rxd + 0x0), readl(rxd + 0x4),
8389                        readl(rxd + 0x8), readl(rxd + 0xc));
8390         }
8391 }
8392 #endif
8393
8394 static struct net_device_stats *tg3_get_stats(struct net_device *);
8395 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8396
8397 static int tg3_close(struct net_device *dev)
8398 {
8399         struct tg3 *tp = netdev_priv(dev);
8400
8401         napi_disable(&tp->napi);
8402         cancel_work_sync(&tp->reset_task);
8403
8404         netif_stop_queue(dev);
8405
8406         del_timer_sync(&tp->timer);
8407
8408         tg3_full_lock(tp, 1);
8409 #if 0
8410         tg3_dump_state(tp);
8411 #endif
8412
8413         tg3_disable_ints(tp);
8414
8415         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8416         tg3_free_rings(tp);
8417         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8418
8419         tg3_full_unlock(tp);
8420
8421         free_irq(tp->pdev->irq, dev);
8422         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8423                 pci_disable_msi(tp->pdev);
8424                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8425         }
8426
8427         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8428                sizeof(tp->net_stats_prev));
8429         memcpy(&tp->estats_prev, tg3_get_estats(tp),
8430                sizeof(tp->estats_prev));
8431
8432         tg3_free_consistent(tp);
8433
8434         tg3_set_power_state(tp, PCI_D3hot);
8435
8436         netif_carrier_off(tp->dev);
8437
8438         return 0;
8439 }
8440
8441 static inline unsigned long get_stat64(tg3_stat64_t *val)
8442 {
8443         unsigned long ret;
8444
8445 #if (BITS_PER_LONG == 32)
8446         ret = val->low;
8447 #else
8448         ret = ((u64)val->high << 32) | ((u64)val->low);
8449 #endif
8450         return ret;
8451 }
8452
8453 static inline u64 get_estat64(tg3_stat64_t *val)
8454 {
8455        return ((u64)val->high << 32) | ((u64)val->low);
8456 }
8457
8458 static unsigned long calc_crc_errors(struct tg3 *tp)
8459 {
8460         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8461
8462         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8463             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8464              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8465                 u32 val;
8466
8467                 spin_lock_bh(&tp->lock);
8468                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8469                         tg3_writephy(tp, MII_TG3_TEST1,
8470                                      val | MII_TG3_TEST1_CRC_EN);
8471                         tg3_readphy(tp, 0x14, &val);
8472                 } else
8473                         val = 0;
8474                 spin_unlock_bh(&tp->lock);
8475
8476                 tp->phy_crc_errors += val;
8477
8478                 return tp->phy_crc_errors;
8479         }
8480
8481         return get_stat64(&hw_stats->rx_fcs_errors);
8482 }
8483
8484 #define ESTAT_ADD(member) \
8485         estats->member =        old_estats->member + \
8486                                 get_estat64(&hw_stats->member)
8487
8488 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8489 {
8490         struct tg3_ethtool_stats *estats = &tp->estats;
8491         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8492         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8493
8494         if (!hw_stats)
8495                 return old_estats;
8496
8497         ESTAT_ADD(rx_octets);
8498         ESTAT_ADD(rx_fragments);
8499         ESTAT_ADD(rx_ucast_packets);
8500         ESTAT_ADD(rx_mcast_packets);
8501         ESTAT_ADD(rx_bcast_packets);
8502         ESTAT_ADD(rx_fcs_errors);
8503         ESTAT_ADD(rx_align_errors);
8504         ESTAT_ADD(rx_xon_pause_rcvd);
8505         ESTAT_ADD(rx_xoff_pause_rcvd);
8506         ESTAT_ADD(rx_mac_ctrl_rcvd);
8507         ESTAT_ADD(rx_xoff_entered);
8508         ESTAT_ADD(rx_frame_too_long_errors);
8509         ESTAT_ADD(rx_jabbers);
8510         ESTAT_ADD(rx_undersize_packets);
8511         ESTAT_ADD(rx_in_length_errors);
8512         ESTAT_ADD(rx_out_length_errors);
8513         ESTAT_ADD(rx_64_or_less_octet_packets);
8514         ESTAT_ADD(rx_65_to_127_octet_packets);
8515         ESTAT_ADD(rx_128_to_255_octet_packets);
8516         ESTAT_ADD(rx_256_to_511_octet_packets);
8517         ESTAT_ADD(rx_512_to_1023_octet_packets);
8518         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8519         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8520         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8521         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8522         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8523
8524         ESTAT_ADD(tx_octets);
8525         ESTAT_ADD(tx_collisions);
8526         ESTAT_ADD(tx_xon_sent);
8527         ESTAT_ADD(tx_xoff_sent);
8528         ESTAT_ADD(tx_flow_control);
8529         ESTAT_ADD(tx_mac_errors);
8530         ESTAT_ADD(tx_single_collisions);
8531         ESTAT_ADD(tx_mult_collisions);
8532         ESTAT_ADD(tx_deferred);
8533         ESTAT_ADD(tx_excessive_collisions);
8534         ESTAT_ADD(tx_late_collisions);
8535         ESTAT_ADD(tx_collide_2times);
8536         ESTAT_ADD(tx_collide_3times);
8537         ESTAT_ADD(tx_collide_4times);
8538         ESTAT_ADD(tx_collide_5times);
8539         ESTAT_ADD(tx_collide_6times);
8540         ESTAT_ADD(tx_collide_7times);
8541         ESTAT_ADD(tx_collide_8times);
8542         ESTAT_ADD(tx_collide_9times);
8543         ESTAT_ADD(tx_collide_10times);
8544         ESTAT_ADD(tx_collide_11times);
8545         ESTAT_ADD(tx_collide_12times);
8546         ESTAT_ADD(tx_collide_13times);
8547         ESTAT_ADD(tx_collide_14times);
8548         ESTAT_ADD(tx_collide_15times);
8549         ESTAT_ADD(tx_ucast_packets);
8550         ESTAT_ADD(tx_mcast_packets);
8551         ESTAT_ADD(tx_bcast_packets);
8552         ESTAT_ADD(tx_carrier_sense_errors);
8553         ESTAT_ADD(tx_discards);
8554         ESTAT_ADD(tx_errors);
8555
8556         ESTAT_ADD(dma_writeq_full);
8557         ESTAT_ADD(dma_write_prioq_full);
8558         ESTAT_ADD(rxbds_empty);
8559         ESTAT_ADD(rx_discards);
8560         ESTAT_ADD(rx_errors);
8561         ESTAT_ADD(rx_threshold_hit);
8562
8563         ESTAT_ADD(dma_readq_full);
8564         ESTAT_ADD(dma_read_prioq_full);
8565         ESTAT_ADD(tx_comp_queue_full);
8566
8567         ESTAT_ADD(ring_set_send_prod_index);
8568         ESTAT_ADD(ring_status_update);
8569         ESTAT_ADD(nic_irqs);
8570         ESTAT_ADD(nic_avoided_irqs);
8571         ESTAT_ADD(nic_tx_threshold_hit);
8572
8573         return estats;
8574 }
8575
8576 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8577 {
8578         struct tg3 *tp = netdev_priv(dev);
8579         struct net_device_stats *stats = &tp->net_stats;
8580         struct net_device_stats *old_stats = &tp->net_stats_prev;
8581         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8582
8583         if (!hw_stats)
8584                 return old_stats;
8585
8586         stats->rx_packets = old_stats->rx_packets +
8587                 get_stat64(&hw_stats->rx_ucast_packets) +
8588                 get_stat64(&hw_stats->rx_mcast_packets) +
8589                 get_stat64(&hw_stats->rx_bcast_packets);
8590
8591         stats->tx_packets = old_stats->tx_packets +
8592                 get_stat64(&hw_stats->tx_ucast_packets) +
8593                 get_stat64(&hw_stats->tx_mcast_packets) +
8594                 get_stat64(&hw_stats->tx_bcast_packets);
8595
8596         stats->rx_bytes = old_stats->rx_bytes +
8597                 get_stat64(&hw_stats->rx_octets);
8598         stats->tx_bytes = old_stats->tx_bytes +
8599                 get_stat64(&hw_stats->tx_octets);
8600
8601         stats->rx_errors = old_stats->rx_errors +
8602                 get_stat64(&hw_stats->rx_errors);
8603         stats->tx_errors = old_stats->tx_errors +
8604                 get_stat64(&hw_stats->tx_errors) +
8605                 get_stat64(&hw_stats->tx_mac_errors) +
8606                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8607                 get_stat64(&hw_stats->tx_discards);
8608
8609         stats->multicast = old_stats->multicast +
8610                 get_stat64(&hw_stats->rx_mcast_packets);
8611         stats->collisions = old_stats->collisions +
8612                 get_stat64(&hw_stats->tx_collisions);
8613
8614         stats->rx_length_errors = old_stats->rx_length_errors +
8615                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8616                 get_stat64(&hw_stats->rx_undersize_packets);
8617
8618         stats->rx_over_errors = old_stats->rx_over_errors +
8619                 get_stat64(&hw_stats->rxbds_empty);
8620         stats->rx_frame_errors = old_stats->rx_frame_errors +
8621                 get_stat64(&hw_stats->rx_align_errors);
8622         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8623                 get_stat64(&hw_stats->tx_discards);
8624         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8625                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8626
8627         stats->rx_crc_errors = old_stats->rx_crc_errors +
8628                 calc_crc_errors(tp);
8629
8630         stats->rx_missed_errors = old_stats->rx_missed_errors +
8631                 get_stat64(&hw_stats->rx_discards);
8632
8633         return stats;
8634 }
8635
8636 static inline u32 calc_crc(unsigned char *buf, int len)
8637 {
8638         u32 reg;
8639         u32 tmp;
8640         int j, k;
8641
8642         reg = 0xffffffff;
8643
8644         for (j = 0; j < len; j++) {
8645                 reg ^= buf[j];
8646
8647                 for (k = 0; k < 8; k++) {
8648                         tmp = reg & 0x01;
8649
8650                         reg >>= 1;
8651
8652                         if (tmp) {
8653                                 reg ^= 0xedb88320;
8654                         }
8655                 }
8656         }
8657
8658         return ~reg;
8659 }
8660
8661 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8662 {
8663         /* accept or reject all multicast frames */
8664         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8665         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8666         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8667         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8668 }
8669
8670 static void __tg3_set_rx_mode(struct net_device *dev)
8671 {
8672         struct tg3 *tp = netdev_priv(dev);
8673         u32 rx_mode;
8674
8675         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8676                                   RX_MODE_KEEP_VLAN_TAG);
8677
8678         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8679          * flag clear.
8680          */
8681 #if TG3_VLAN_TAG_USED
8682         if (!tp->vlgrp &&
8683             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8684                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8685 #else
8686         /* By definition, VLAN is disabled always in this
8687          * case.
8688          */
8689         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8690                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8691 #endif
8692
8693         if (dev->flags & IFF_PROMISC) {
8694                 /* Promiscuous mode. */
8695                 rx_mode |= RX_MODE_PROMISC;
8696         } else if (dev->flags & IFF_ALLMULTI) {
8697                 /* Accept all multicast. */
8698                 tg3_set_multi (tp, 1);
8699         } else if (dev->mc_count < 1) {
8700                 /* Reject all multicast. */
8701                 tg3_set_multi (tp, 0);
8702         } else {
8703                 /* Accept one or more multicast(s). */
8704                 struct dev_mc_list *mclist;
8705                 unsigned int i;
8706                 u32 mc_filter[4] = { 0, };
8707                 u32 regidx;
8708                 u32 bit;
8709                 u32 crc;
8710
8711                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8712                      i++, mclist = mclist->next) {
8713
8714                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8715                         bit = ~crc & 0x7f;
8716                         regidx = (bit & 0x60) >> 5;
8717                         bit &= 0x1f;
8718                         mc_filter[regidx] |= (1 << bit);
8719                 }
8720
8721                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8722                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8723                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8724                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8725         }
8726
8727         if (rx_mode != tp->rx_mode) {
8728                 tp->rx_mode = rx_mode;
8729                 tw32_f(MAC_RX_MODE, rx_mode);
8730                 udelay(10);
8731         }
8732 }
8733
8734 static void tg3_set_rx_mode(struct net_device *dev)
8735 {
8736         struct tg3 *tp = netdev_priv(dev);
8737
8738         if (!netif_running(dev))
8739                 return;
8740
8741         tg3_full_lock(tp, 0);
8742         __tg3_set_rx_mode(dev);
8743         tg3_full_unlock(tp);
8744 }
8745
8746 #define TG3_REGDUMP_LEN         (32 * 1024)
8747
8748 static int tg3_get_regs_len(struct net_device *dev)
8749 {
8750         return TG3_REGDUMP_LEN;
8751 }
8752
8753 static void tg3_get_regs(struct net_device *dev,
8754                 struct ethtool_regs *regs, void *_p)
8755 {
8756         u32 *p = _p;
8757         struct tg3 *tp = netdev_priv(dev);
8758         u8 *orig_p = _p;
8759         int i;
8760
8761         regs->version = 0;
8762
8763         memset(p, 0, TG3_REGDUMP_LEN);
8764
8765         if (tp->link_config.phy_is_low_power)
8766                 return;
8767
8768         tg3_full_lock(tp, 0);
8769
8770 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8771 #define GET_REG32_LOOP(base,len)                \
8772 do {    p = (u32 *)(orig_p + (base));           \
8773         for (i = 0; i < len; i += 4)            \
8774                 __GET_REG32((base) + i);        \
8775 } while (0)
8776 #define GET_REG32_1(reg)                        \
8777 do {    p = (u32 *)(orig_p + (reg));            \
8778         __GET_REG32((reg));                     \
8779 } while (0)
8780
8781         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8782         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8783         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8784         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8785         GET_REG32_1(SNDDATAC_MODE);
8786         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8787         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8788         GET_REG32_1(SNDBDC_MODE);
8789         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8790         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8791         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8792         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8793         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8794         GET_REG32_1(RCVDCC_MODE);
8795         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8796         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8797         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8798         GET_REG32_1(MBFREE_MODE);
8799         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8800         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8801         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8802         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8803         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8804         GET_REG32_1(RX_CPU_MODE);
8805         GET_REG32_1(RX_CPU_STATE);
8806         GET_REG32_1(RX_CPU_PGMCTR);
8807         GET_REG32_1(RX_CPU_HWBKPT);
8808         GET_REG32_1(TX_CPU_MODE);
8809         GET_REG32_1(TX_CPU_STATE);
8810         GET_REG32_1(TX_CPU_PGMCTR);
8811         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8812         GET_REG32_LOOP(FTQ_RESET, 0x120);
8813         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8814         GET_REG32_1(DMAC_MODE);
8815         GET_REG32_LOOP(GRC_MODE, 0x4c);
8816         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8817                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8818
8819 #undef __GET_REG32
8820 #undef GET_REG32_LOOP
8821 #undef GET_REG32_1
8822
8823         tg3_full_unlock(tp);
8824 }
8825
8826 static int tg3_get_eeprom_len(struct net_device *dev)
8827 {
8828         struct tg3 *tp = netdev_priv(dev);
8829
8830         return tp->nvram_size;
8831 }
8832
8833 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8834 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8835 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8836
8837 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8838 {
8839         struct tg3 *tp = netdev_priv(dev);
8840         int ret;
8841         u8  *pd;
8842         u32 i, offset, len, b_offset, b_count;
8843         __le32 val;
8844
8845         if (tp->link_config.phy_is_low_power)
8846                 return -EAGAIN;
8847
8848         offset = eeprom->offset;
8849         len = eeprom->len;
8850         eeprom->len = 0;
8851
8852         eeprom->magic = TG3_EEPROM_MAGIC;
8853
8854         if (offset & 3) {
8855                 /* adjustments to start on required 4 byte boundary */
8856                 b_offset = offset & 3;
8857                 b_count = 4 - b_offset;
8858                 if (b_count > len) {
8859                         /* i.e. offset=1 len=2 */
8860                         b_count = len;
8861                 }
8862                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8863                 if (ret)
8864                         return ret;
8865                 memcpy(data, ((char*)&val) + b_offset, b_count);
8866                 len -= b_count;
8867                 offset += b_count;
8868                 eeprom->len += b_count;
8869         }
8870
8871         /* read bytes upto the last 4 byte boundary */
8872         pd = &data[eeprom->len];
8873         for (i = 0; i < (len - (len & 3)); i += 4) {
8874                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8875                 if (ret) {
8876                         eeprom->len += i;
8877                         return ret;
8878                 }
8879                 memcpy(pd + i, &val, 4);
8880         }
8881         eeprom->len += i;
8882
8883         if (len & 3) {
8884                 /* read last bytes not ending on 4 byte boundary */
8885                 pd = &data[eeprom->len];
8886                 b_count = len & 3;
8887                 b_offset = offset + len - b_count;
8888                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8889                 if (ret)
8890                         return ret;
8891                 memcpy(pd, &val, b_count);
8892                 eeprom->len += b_count;
8893         }
8894         return 0;
8895 }
8896
8897 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8898
8899 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8900 {
8901         struct tg3 *tp = netdev_priv(dev);
8902         int ret;
8903         u32 offset, len, b_offset, odd_len;
8904         u8 *buf;
8905         __le32 start, end;
8906
8907         if (tp->link_config.phy_is_low_power)
8908                 return -EAGAIN;
8909
8910         if (eeprom->magic != TG3_EEPROM_MAGIC)
8911                 return -EINVAL;
8912
8913         offset = eeprom->offset;
8914         len = eeprom->len;
8915
8916         if ((b_offset = (offset & 3))) {
8917                 /* adjustments to start on required 4 byte boundary */
8918                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8919                 if (ret)
8920                         return ret;
8921                 len += b_offset;
8922                 offset &= ~3;
8923                 if (len < 4)
8924                         len = 4;
8925         }
8926
8927         odd_len = 0;
8928         if (len & 3) {
8929                 /* adjustments to end on required 4 byte boundary */
8930                 odd_len = 1;
8931                 len = (len + 3) & ~3;
8932                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8933                 if (ret)
8934                         return ret;
8935         }
8936
8937         buf = data;
8938         if (b_offset || odd_len) {
8939                 buf = kmalloc(len, GFP_KERNEL);
8940                 if (!buf)
8941                         return -ENOMEM;
8942                 if (b_offset)
8943                         memcpy(buf, &start, 4);
8944                 if (odd_len)
8945                         memcpy(buf+len-4, &end, 4);
8946                 memcpy(buf + b_offset, data, eeprom->len);
8947         }
8948
8949         ret = tg3_nvram_write_block(tp, offset, len, buf);
8950
8951         if (buf != data)
8952                 kfree(buf);
8953
8954         return ret;
8955 }
8956
8957 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8958 {
8959         struct tg3 *tp = netdev_priv(dev);
8960
8961         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8962                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8963                         return -EAGAIN;
8964                 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
8965         }
8966
8967         cmd->supported = (SUPPORTED_Autoneg);
8968
8969         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8970                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8971                                    SUPPORTED_1000baseT_Full);
8972
8973         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8974                 cmd->supported |= (SUPPORTED_100baseT_Half |
8975                                   SUPPORTED_100baseT_Full |
8976                                   SUPPORTED_10baseT_Half |
8977                                   SUPPORTED_10baseT_Full |
8978                                   SUPPORTED_TP);
8979                 cmd->port = PORT_TP;
8980         } else {
8981                 cmd->supported |= SUPPORTED_FIBRE;
8982                 cmd->port = PORT_FIBRE;
8983         }
8984
8985         cmd->advertising = tp->link_config.advertising;
8986         if (netif_running(dev)) {
8987                 cmd->speed = tp->link_config.active_speed;
8988                 cmd->duplex = tp->link_config.active_duplex;
8989         }
8990         cmd->phy_address = PHY_ADDR;
8991         cmd->transceiver = 0;
8992         cmd->autoneg = tp->link_config.autoneg;
8993         cmd->maxtxpkt = 0;
8994         cmd->maxrxpkt = 0;
8995         return 0;
8996 }
8997
8998 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8999 {
9000         struct tg3 *tp = netdev_priv(dev);
9001
9002         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9003                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9004                         return -EAGAIN;
9005                 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9006         }
9007
9008         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9009                 /* These are the only valid advertisement bits allowed.  */
9010                 if (cmd->autoneg == AUTONEG_ENABLE &&
9011                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9012                                           ADVERTISED_1000baseT_Full |
9013                                           ADVERTISED_Autoneg |
9014                                           ADVERTISED_FIBRE)))
9015                         return -EINVAL;
9016                 /* Fiber can only do SPEED_1000.  */
9017                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9018                          (cmd->speed != SPEED_1000))
9019                         return -EINVAL;
9020         /* Copper cannot force SPEED_1000.  */
9021         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9022                    (cmd->speed == SPEED_1000))
9023                 return -EINVAL;
9024         else if ((cmd->speed == SPEED_1000) &&
9025                  (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9026                 return -EINVAL;
9027
9028         tg3_full_lock(tp, 0);
9029
9030         tp->link_config.autoneg = cmd->autoneg;
9031         if (cmd->autoneg == AUTONEG_ENABLE) {
9032                 tp->link_config.advertising = (cmd->advertising |
9033                                               ADVERTISED_Autoneg);
9034                 tp->link_config.speed = SPEED_INVALID;
9035                 tp->link_config.duplex = DUPLEX_INVALID;
9036         } else {
9037                 tp->link_config.advertising = 0;
9038                 tp->link_config.speed = cmd->speed;
9039                 tp->link_config.duplex = cmd->duplex;
9040         }
9041
9042         tp->link_config.orig_speed = tp->link_config.speed;
9043         tp->link_config.orig_duplex = tp->link_config.duplex;
9044         tp->link_config.orig_autoneg = tp->link_config.autoneg;
9045
9046         if (netif_running(dev))
9047                 tg3_setup_phy(tp, 1);
9048
9049         tg3_full_unlock(tp);
9050
9051         return 0;
9052 }
9053
9054 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9055 {
9056         struct tg3 *tp = netdev_priv(dev);
9057
9058         strcpy(info->driver, DRV_MODULE_NAME);
9059         strcpy(info->version, DRV_MODULE_VERSION);
9060         strcpy(info->fw_version, tp->fw_ver);
9061         strcpy(info->bus_info, pci_name(tp->pdev));
9062 }
9063
9064 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9065 {
9066         struct tg3 *tp = netdev_priv(dev);
9067
9068         if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9069             device_can_wakeup(&tp->pdev->dev))
9070                 wol->supported = WAKE_MAGIC;
9071         else
9072                 wol->supported = 0;
9073         wol->wolopts = 0;
9074         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
9075                 wol->wolopts = WAKE_MAGIC;
9076         memset(&wol->sopass, 0, sizeof(wol->sopass));
9077 }
9078
9079 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9080 {
9081         struct tg3 *tp = netdev_priv(dev);
9082         struct device *dp = &tp->pdev->dev;
9083
9084         if (wol->wolopts & ~WAKE_MAGIC)
9085                 return -EINVAL;
9086         if ((wol->wolopts & WAKE_MAGIC) &&
9087             !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9088                 return -EINVAL;
9089
9090         spin_lock_bh(&tp->lock);
9091         if (wol->wolopts & WAKE_MAGIC) {
9092                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9093                 device_set_wakeup_enable(dp, true);
9094         } else {
9095                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9096                 device_set_wakeup_enable(dp, false);
9097         }
9098         spin_unlock_bh(&tp->lock);
9099
9100         return 0;
9101 }
9102
9103 static u32 tg3_get_msglevel(struct net_device *dev)
9104 {
9105         struct tg3 *tp = netdev_priv(dev);
9106         return tp->msg_enable;
9107 }
9108
9109 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9110 {
9111         struct tg3 *tp = netdev_priv(dev);
9112         tp->msg_enable = value;
9113 }
9114
9115 static int tg3_set_tso(struct net_device *dev, u32 value)
9116 {
9117         struct tg3 *tp = netdev_priv(dev);
9118
9119         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9120                 if (value)
9121                         return -EINVAL;
9122                 return 0;
9123         }
9124         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9125             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9126                 if (value) {
9127                         dev->features |= NETIF_F_TSO6;
9128                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9129                             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9130                              GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9131                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9132                                 dev->features |= NETIF_F_TSO_ECN;
9133                 } else
9134                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9135         }
9136         return ethtool_op_set_tso(dev, value);
9137 }
9138
9139 static int tg3_nway_reset(struct net_device *dev)
9140 {
9141         struct tg3 *tp = netdev_priv(dev);
9142         int r;
9143
9144         if (!netif_running(dev))
9145                 return -EAGAIN;
9146
9147         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9148                 return -EINVAL;
9149
9150         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9151                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9152                         return -EAGAIN;
9153                 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
9154         } else {
9155                 u32 bmcr;
9156
9157                 spin_lock_bh(&tp->lock);
9158                 r = -EINVAL;
9159                 tg3_readphy(tp, MII_BMCR, &bmcr);
9160                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9161                     ((bmcr & BMCR_ANENABLE) ||
9162                      (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9163                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9164                                                    BMCR_ANENABLE);
9165                         r = 0;
9166                 }
9167                 spin_unlock_bh(&tp->lock);
9168         }
9169
9170         return r;
9171 }
9172
9173 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9174 {
9175         struct tg3 *tp = netdev_priv(dev);
9176
9177         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9178         ering->rx_mini_max_pending = 0;
9179         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9180                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9181         else
9182                 ering->rx_jumbo_max_pending = 0;
9183
9184         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9185
9186         ering->rx_pending = tp->rx_pending;
9187         ering->rx_mini_pending = 0;
9188         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9189                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9190         else
9191                 ering->rx_jumbo_pending = 0;
9192
9193         ering->tx_pending = tp->tx_pending;
9194 }
9195
9196 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9197 {
9198         struct tg3 *tp = netdev_priv(dev);
9199         int irq_sync = 0, err = 0;
9200
9201         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9202             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9203             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9204             (ering->tx_pending <= MAX_SKB_FRAGS) ||
9205             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9206              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9207                 return -EINVAL;
9208
9209         if (netif_running(dev)) {
9210                 tg3_phy_stop(tp);
9211                 tg3_netif_stop(tp);
9212                 irq_sync = 1;
9213         }
9214
9215         tg3_full_lock(tp, irq_sync);
9216
9217         tp->rx_pending = ering->rx_pending;
9218
9219         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9220             tp->rx_pending > 63)
9221                 tp->rx_pending = 63;
9222         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9223         tp->tx_pending = ering->tx_pending;
9224
9225         if (netif_running(dev)) {
9226                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9227                 err = tg3_restart_hw(tp, 1);
9228                 if (!err)
9229                         tg3_netif_start(tp);
9230         }
9231
9232         tg3_full_unlock(tp);
9233
9234         if (irq_sync && !err)
9235                 tg3_phy_start(tp);
9236
9237         return err;
9238 }
9239
9240 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9241 {
9242         struct tg3 *tp = netdev_priv(dev);
9243
9244         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9245
9246         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9247                 epause->rx_pause = 1;
9248         else
9249                 epause->rx_pause = 0;
9250
9251         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9252                 epause->tx_pause = 1;
9253         else
9254                 epause->tx_pause = 0;
9255 }
9256
9257 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9258 {
9259         struct tg3 *tp = netdev_priv(dev);
9260         int err = 0;
9261
9262         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9263                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9264                         return -EAGAIN;
9265
9266                 if (epause->autoneg) {
9267                         u32 newadv;
9268                         struct phy_device *phydev;
9269
9270                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9271
9272                         if (epause->rx_pause) {
9273                                 if (epause->tx_pause)
9274                                         newadv = ADVERTISED_Pause;
9275                                 else
9276                                         newadv = ADVERTISED_Pause |
9277                                                  ADVERTISED_Asym_Pause;
9278                         } else if (epause->tx_pause) {
9279                                 newadv = ADVERTISED_Asym_Pause;
9280                         } else
9281                                 newadv = 0;
9282
9283                         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9284                                 u32 oldadv = phydev->advertising &
9285                                              (ADVERTISED_Pause |
9286                                               ADVERTISED_Asym_Pause);
9287                                 if (oldadv != newadv) {
9288                                         phydev->advertising &=
9289                                                 ~(ADVERTISED_Pause |
9290                                                   ADVERTISED_Asym_Pause);
9291                                         phydev->advertising |= newadv;
9292                                         err = phy_start_aneg(phydev);
9293                                 }
9294                         } else {
9295                                 tp->link_config.advertising &=
9296                                                 ~(ADVERTISED_Pause |
9297                                                   ADVERTISED_Asym_Pause);
9298                                 tp->link_config.advertising |= newadv;
9299                         }
9300                 } else {
9301                         if (epause->rx_pause)
9302                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9303                         else
9304                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9305
9306                         if (epause->tx_pause)
9307                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9308                         else
9309                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9310
9311                         if (netif_running(dev))
9312                                 tg3_setup_flow_control(tp, 0, 0);
9313                 }
9314         } else {
9315                 int irq_sync = 0;
9316
9317                 if (netif_running(dev)) {
9318                         tg3_netif_stop(tp);
9319                         irq_sync = 1;
9320                 }
9321
9322                 tg3_full_lock(tp, irq_sync);
9323
9324                 if (epause->autoneg)
9325                         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9326                 else
9327                         tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9328                 if (epause->rx_pause)
9329                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9330                 else
9331                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9332                 if (epause->tx_pause)
9333                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9334                 else
9335                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9336
9337                 if (netif_running(dev)) {
9338                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9339                         err = tg3_restart_hw(tp, 1);
9340                         if (!err)
9341                                 tg3_netif_start(tp);
9342                 }
9343
9344                 tg3_full_unlock(tp);
9345         }
9346
9347         return err;
9348 }
9349
9350 static u32 tg3_get_rx_csum(struct net_device *dev)
9351 {
9352         struct tg3 *tp = netdev_priv(dev);
9353         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9354 }
9355
9356 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9357 {
9358         struct tg3 *tp = netdev_priv(dev);
9359
9360         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9361                 if (data != 0)
9362                         return -EINVAL;
9363                 return 0;
9364         }
9365
9366         spin_lock_bh(&tp->lock);
9367         if (data)
9368                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9369         else
9370                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9371         spin_unlock_bh(&tp->lock);
9372
9373         return 0;
9374 }
9375
9376 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9377 {
9378         struct tg3 *tp = netdev_priv(dev);
9379
9380         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9381                 if (data != 0)
9382                         return -EINVAL;
9383                 return 0;
9384         }
9385
9386         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9387             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9388             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9389             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9390             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9391                 ethtool_op_set_tx_ipv6_csum(dev, data);
9392         else
9393                 ethtool_op_set_tx_csum(dev, data);
9394
9395         return 0;
9396 }
9397
9398 static int tg3_get_sset_count (struct net_device *dev, int sset)
9399 {
9400         switch (sset) {
9401         case ETH_SS_TEST:
9402                 return TG3_NUM_TEST;
9403         case ETH_SS_STATS:
9404                 return TG3_NUM_STATS;
9405         default:
9406                 return -EOPNOTSUPP;
9407         }
9408 }
9409
9410 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9411 {
9412         switch (stringset) {
9413         case ETH_SS_STATS:
9414                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9415                 break;
9416         case ETH_SS_TEST:
9417                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9418                 break;
9419         default:
9420                 WARN_ON(1);     /* we need a WARN() */
9421                 break;
9422         }
9423 }
9424
9425 static int tg3_phys_id(struct net_device *dev, u32 data)
9426 {
9427         struct tg3 *tp = netdev_priv(dev);
9428         int i;
9429
9430         if (!netif_running(tp->dev))
9431                 return -EAGAIN;
9432
9433         if (data == 0)
9434                 data = UINT_MAX / 2;
9435
9436         for (i = 0; i < (data * 2); i++) {
9437                 if ((i % 2) == 0)
9438                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9439                                            LED_CTRL_1000MBPS_ON |
9440                                            LED_CTRL_100MBPS_ON |
9441                                            LED_CTRL_10MBPS_ON |
9442                                            LED_CTRL_TRAFFIC_OVERRIDE |
9443                                            LED_CTRL_TRAFFIC_BLINK |
9444                                            LED_CTRL_TRAFFIC_LED);
9445
9446                 else
9447                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9448                                            LED_CTRL_TRAFFIC_OVERRIDE);
9449
9450                 if (msleep_interruptible(500))
9451                         break;
9452         }
9453         tw32(MAC_LED_CTRL, tp->led_ctrl);
9454         return 0;
9455 }
9456
9457 static void tg3_get_ethtool_stats (struct net_device *dev,
9458                                    struct ethtool_stats *estats, u64 *tmp_stats)
9459 {
9460         struct tg3 *tp = netdev_priv(dev);
9461         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9462 }
9463
9464 #define NVRAM_TEST_SIZE 0x100
9465 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
9466 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
9467 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
9468 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9469 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9470
9471 static int tg3_test_nvram(struct tg3 *tp)
9472 {
9473         u32 csum, magic;
9474         __le32 *buf;
9475         int i, j, k, err = 0, size;
9476
9477         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9478                 return -EIO;
9479
9480         if (magic == TG3_EEPROM_MAGIC)
9481                 size = NVRAM_TEST_SIZE;
9482         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9483                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9484                     TG3_EEPROM_SB_FORMAT_1) {
9485                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9486                         case TG3_EEPROM_SB_REVISION_0:
9487                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9488                                 break;
9489                         case TG3_EEPROM_SB_REVISION_2:
9490                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9491                                 break;
9492                         case TG3_EEPROM_SB_REVISION_3:
9493                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9494                                 break;
9495                         default:
9496                                 return 0;
9497                         }
9498                 } else
9499                         return 0;
9500         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9501                 size = NVRAM_SELFBOOT_HW_SIZE;
9502         else
9503                 return -EIO;
9504
9505         buf = kmalloc(size, GFP_KERNEL);
9506         if (buf == NULL)
9507                 return -ENOMEM;
9508
9509         err = -EIO;
9510         for (i = 0, j = 0; i < size; i += 4, j++) {
9511                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9512                         break;
9513         }
9514         if (i < size)
9515                 goto out;
9516
9517         /* Selfboot format */
9518         magic = swab32(le32_to_cpu(buf[0]));
9519         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9520             TG3_EEPROM_MAGIC_FW) {
9521                 u8 *buf8 = (u8 *) buf, csum8 = 0;
9522
9523                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9524                     TG3_EEPROM_SB_REVISION_2) {
9525                         /* For rev 2, the csum doesn't include the MBA. */
9526                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9527                                 csum8 += buf8[i];
9528                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9529                                 csum8 += buf8[i];
9530                 } else {
9531                         for (i = 0; i < size; i++)
9532                                 csum8 += buf8[i];
9533                 }
9534
9535                 if (csum8 == 0) {
9536                         err = 0;
9537                         goto out;
9538                 }
9539
9540                 err = -EIO;
9541                 goto out;
9542         }
9543
9544         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9545             TG3_EEPROM_MAGIC_HW) {
9546                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9547                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9548                 u8 *buf8 = (u8 *) buf;
9549
9550                 /* Separate the parity bits and the data bytes.  */
9551                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9552                         if ((i == 0) || (i == 8)) {
9553                                 int l;
9554                                 u8 msk;
9555
9556                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9557                                         parity[k++] = buf8[i] & msk;
9558                                 i++;
9559                         }
9560                         else if (i == 16) {
9561                                 int l;
9562                                 u8 msk;
9563
9564                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9565                                         parity[k++] = buf8[i] & msk;
9566                                 i++;
9567
9568                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9569                                         parity[k++] = buf8[i] & msk;
9570                                 i++;
9571                         }
9572                         data[j++] = buf8[i];
9573                 }
9574
9575                 err = -EIO;
9576                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9577                         u8 hw8 = hweight8(data[i]);
9578
9579                         if ((hw8 & 0x1) && parity[i])
9580                                 goto out;
9581                         else if (!(hw8 & 0x1) && !parity[i])
9582                                 goto out;
9583                 }
9584                 err = 0;
9585                 goto out;
9586         }
9587
9588         /* Bootstrap checksum at offset 0x10 */
9589         csum = calc_crc((unsigned char *) buf, 0x10);
9590         if(csum != le32_to_cpu(buf[0x10/4]))
9591                 goto out;
9592
9593         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9594         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9595         if (csum != le32_to_cpu(buf[0xfc/4]))
9596                  goto out;
9597
9598         err = 0;
9599
9600 out:
9601         kfree(buf);
9602         return err;
9603 }
9604
9605 #define TG3_SERDES_TIMEOUT_SEC  2
9606 #define TG3_COPPER_TIMEOUT_SEC  6
9607
9608 static int tg3_test_link(struct tg3 *tp)
9609 {
9610         int i, max;
9611
9612         if (!netif_running(tp->dev))
9613                 return -ENODEV;
9614
9615         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9616                 max = TG3_SERDES_TIMEOUT_SEC;
9617         else
9618                 max = TG3_COPPER_TIMEOUT_SEC;
9619
9620         for (i = 0; i < max; i++) {
9621                 if (netif_carrier_ok(tp->dev))
9622                         return 0;
9623
9624                 if (msleep_interruptible(1000))
9625                         break;
9626         }
9627
9628         return -EIO;
9629 }
9630
9631 /* Only test the commonly used registers */
9632 static int tg3_test_registers(struct tg3 *tp)
9633 {
9634         int i, is_5705, is_5750;
9635         u32 offset, read_mask, write_mask, val, save_val, read_val;
9636         static struct {
9637                 u16 offset;
9638                 u16 flags;
9639 #define TG3_FL_5705     0x1
9640 #define TG3_FL_NOT_5705 0x2
9641 #define TG3_FL_NOT_5788 0x4
9642 #define TG3_FL_NOT_5750 0x8
9643                 u32 read_mask;
9644                 u32 write_mask;
9645         } reg_tbl[] = {
9646                 /* MAC Control Registers */
9647                 { MAC_MODE, TG3_FL_NOT_5705,
9648                         0x00000000, 0x00ef6f8c },
9649                 { MAC_MODE, TG3_FL_5705,
9650                         0x00000000, 0x01ef6b8c },
9651                 { MAC_STATUS, TG3_FL_NOT_5705,
9652                         0x03800107, 0x00000000 },
9653                 { MAC_STATUS, TG3_FL_5705,
9654                         0x03800100, 0x00000000 },
9655                 { MAC_ADDR_0_HIGH, 0x0000,
9656                         0x00000000, 0x0000ffff },
9657                 { MAC_ADDR_0_LOW, 0x0000,
9658                         0x00000000, 0xffffffff },
9659                 { MAC_RX_MTU_SIZE, 0x0000,
9660                         0x00000000, 0x0000ffff },
9661                 { MAC_TX_MODE, 0x0000,
9662                         0x00000000, 0x00000070 },
9663                 { MAC_TX_LENGTHS, 0x0000,
9664                         0x00000000, 0x00003fff },
9665                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9666                         0x00000000, 0x000007fc },
9667                 { MAC_RX_MODE, TG3_FL_5705,
9668                         0x00000000, 0x000007dc },
9669                 { MAC_HASH_REG_0, 0x0000,
9670                         0x00000000, 0xffffffff },
9671                 { MAC_HASH_REG_1, 0x0000,
9672                         0x00000000, 0xffffffff },
9673                 { MAC_HASH_REG_2, 0x0000,
9674                         0x00000000, 0xffffffff },
9675                 { MAC_HASH_REG_3, 0x0000,
9676                         0x00000000, 0xffffffff },
9677
9678                 /* Receive Data and Receive BD Initiator Control Registers. */
9679                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9680                         0x00000000, 0xffffffff },
9681                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9682                         0x00000000, 0xffffffff },
9683                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9684                         0x00000000, 0x00000003 },
9685                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9686                         0x00000000, 0xffffffff },
9687                 { RCVDBDI_STD_BD+0, 0x0000,
9688                         0x00000000, 0xffffffff },
9689                 { RCVDBDI_STD_BD+4, 0x0000,
9690                         0x00000000, 0xffffffff },
9691                 { RCVDBDI_STD_BD+8, 0x0000,
9692                         0x00000000, 0xffff0002 },
9693                 { RCVDBDI_STD_BD+0xc, 0x0000,
9694                         0x00000000, 0xffffffff },
9695
9696                 /* Receive BD Initiator Control Registers. */
9697                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9698                         0x00000000, 0xffffffff },
9699                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9700                         0x00000000, 0x000003ff },
9701                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9702                         0x00000000, 0xffffffff },
9703
9704                 /* Host Coalescing Control Registers. */
9705                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9706                         0x00000000, 0x00000004 },
9707                 { HOSTCC_MODE, TG3_FL_5705,
9708                         0x00000000, 0x000000f6 },
9709                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9710                         0x00000000, 0xffffffff },
9711                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9712                         0x00000000, 0x000003ff },
9713                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9714                         0x00000000, 0xffffffff },
9715                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9716                         0x00000000, 0x000003ff },
9717                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9718                         0x00000000, 0xffffffff },
9719                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9720                         0x00000000, 0x000000ff },
9721                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9722                         0x00000000, 0xffffffff },
9723                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9724                         0x00000000, 0x000000ff },
9725                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9726                         0x00000000, 0xffffffff },
9727                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9728                         0x00000000, 0xffffffff },
9729                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9730                         0x00000000, 0xffffffff },
9731                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9732                         0x00000000, 0x000000ff },
9733                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9734                         0x00000000, 0xffffffff },
9735                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9736                         0x00000000, 0x000000ff },
9737                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9738                         0x00000000, 0xffffffff },
9739                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9740                         0x00000000, 0xffffffff },
9741                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9742                         0x00000000, 0xffffffff },
9743                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9744                         0x00000000, 0xffffffff },
9745                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9746                         0x00000000, 0xffffffff },
9747                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9748                         0xffffffff, 0x00000000 },
9749                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9750                         0xffffffff, 0x00000000 },
9751
9752                 /* Buffer Manager Control Registers. */
9753                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9754                         0x00000000, 0x007fff80 },
9755                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9756                         0x00000000, 0x007fffff },
9757                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9758                         0x00000000, 0x0000003f },
9759                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9760                         0x00000000, 0x000001ff },
9761                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9762                         0x00000000, 0x000001ff },
9763                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9764                         0xffffffff, 0x00000000 },
9765                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9766                         0xffffffff, 0x00000000 },
9767
9768                 /* Mailbox Registers */
9769                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9770                         0x00000000, 0x000001ff },
9771                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9772                         0x00000000, 0x000001ff },
9773                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9774                         0x00000000, 0x000007ff },
9775                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9776                         0x00000000, 0x000001ff },
9777
9778                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9779         };
9780
9781         is_5705 = is_5750 = 0;
9782         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9783                 is_5705 = 1;
9784                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9785                         is_5750 = 1;
9786         }
9787
9788         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9789                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9790                         continue;
9791
9792                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9793                         continue;
9794
9795                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9796                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9797                         continue;
9798
9799                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9800                         continue;
9801
9802                 offset = (u32) reg_tbl[i].offset;
9803                 read_mask = reg_tbl[i].read_mask;
9804                 write_mask = reg_tbl[i].write_mask;
9805
9806                 /* Save the original register content */
9807                 save_val = tr32(offset);
9808
9809                 /* Determine the read-only value. */
9810                 read_val = save_val & read_mask;
9811
9812                 /* Write zero to the register, then make sure the read-only bits
9813                  * are not changed and the read/write bits are all zeros.
9814                  */
9815                 tw32(offset, 0);
9816
9817                 val = tr32(offset);
9818
9819                 /* Test the read-only and read/write bits. */
9820                 if (((val & read_mask) != read_val) || (val & write_mask))
9821                         goto out;
9822
9823                 /* Write ones to all the bits defined by RdMask and WrMask, then
9824                  * make sure the read-only bits are not changed and the
9825                  * read/write bits are all ones.
9826                  */
9827                 tw32(offset, read_mask | write_mask);
9828
9829                 val = tr32(offset);
9830
9831                 /* Test the read-only bits. */
9832                 if ((val & read_mask) != read_val)
9833                         goto out;
9834
9835                 /* Test the read/write bits. */
9836                 if ((val & write_mask) != write_mask)
9837                         goto out;
9838
9839                 tw32(offset, save_val);
9840         }
9841
9842         return 0;
9843
9844 out:
9845         if (netif_msg_hw(tp))
9846                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9847                        offset);
9848         tw32(offset, save_val);
9849         return -EIO;
9850 }
9851
9852 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9853 {
9854         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9855         int i;
9856         u32 j;
9857
9858         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9859                 for (j = 0; j < len; j += 4) {
9860                         u32 val;
9861
9862                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9863                         tg3_read_mem(tp, offset + j, &val);
9864                         if (val != test_pattern[i])
9865                                 return -EIO;
9866                 }
9867         }
9868         return 0;
9869 }
9870
9871 static int tg3_test_memory(struct tg3 *tp)
9872 {
9873         static struct mem_entry {
9874                 u32 offset;
9875                 u32 len;
9876         } mem_tbl_570x[] = {
9877                 { 0x00000000, 0x00b50},
9878                 { 0x00002000, 0x1c000},
9879                 { 0xffffffff, 0x00000}
9880         }, mem_tbl_5705[] = {
9881                 { 0x00000100, 0x0000c},
9882                 { 0x00000200, 0x00008},
9883                 { 0x00004000, 0x00800},
9884                 { 0x00006000, 0x01000},
9885                 { 0x00008000, 0x02000},
9886                 { 0x00010000, 0x0e000},
9887                 { 0xffffffff, 0x00000}
9888         }, mem_tbl_5755[] = {
9889                 { 0x00000200, 0x00008},
9890                 { 0x00004000, 0x00800},
9891                 { 0x00006000, 0x00800},
9892                 { 0x00008000, 0x02000},
9893                 { 0x00010000, 0x0c000},
9894                 { 0xffffffff, 0x00000}
9895         }, mem_tbl_5906[] = {
9896                 { 0x00000200, 0x00008},
9897                 { 0x00004000, 0x00400},
9898                 { 0x00006000, 0x00400},
9899                 { 0x00008000, 0x01000},
9900                 { 0x00010000, 0x01000},
9901                 { 0xffffffff, 0x00000}
9902         };
9903         struct mem_entry *mem_tbl;
9904         int err = 0;
9905         int i;
9906
9907         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9908                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9909                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9910                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9911                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9912                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9913                         mem_tbl = mem_tbl_5755;
9914                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9915                         mem_tbl = mem_tbl_5906;
9916                 else
9917                         mem_tbl = mem_tbl_5705;
9918         } else
9919                 mem_tbl = mem_tbl_570x;
9920
9921         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9922                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9923                     mem_tbl[i].len)) != 0)
9924                         break;
9925         }
9926
9927         return err;
9928 }
9929
9930 #define TG3_MAC_LOOPBACK        0
9931 #define TG3_PHY_LOOPBACK        1
9932
9933 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9934 {
9935         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9936         u32 desc_idx;
9937         struct sk_buff *skb, *rx_skb;
9938         u8 *tx_data;
9939         dma_addr_t map;
9940         int num_pkts, tx_len, rx_len, i, err;
9941         struct tg3_rx_buffer_desc *desc;
9942
9943         if (loopback_mode == TG3_MAC_LOOPBACK) {
9944                 /* HW errata - mac loopback fails in some cases on 5780.
9945                  * Normal traffic and PHY loopback are not affected by
9946                  * errata.
9947                  */
9948                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9949                         return 0;
9950
9951                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9952                            MAC_MODE_PORT_INT_LPBACK;
9953                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9954                         mac_mode |= MAC_MODE_LINK_POLARITY;
9955                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9956                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9957                 else
9958                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9959                 tw32(MAC_MODE, mac_mode);
9960         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9961                 u32 val;
9962
9963                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9964                         u32 phytest;
9965
9966                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9967                                 u32 phy;
9968
9969                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9970                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9971                                 if (!tg3_readphy(tp, 0x1b, &phy))
9972                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9973                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9974                         }
9975                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9976                 } else
9977                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9978
9979                 tg3_phy_toggle_automdix(tp, 0);
9980
9981                 tg3_writephy(tp, MII_BMCR, val);
9982                 udelay(40);
9983
9984                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9985                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9986                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9987                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9988                 } else
9989                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9990
9991                 /* reset to prevent losing 1st rx packet intermittently */
9992                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9993                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9994                         udelay(10);
9995                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9996                 }
9997                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9998                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9999                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10000                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10001                                 mac_mode |= MAC_MODE_LINK_POLARITY;
10002                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
10003                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10004                 }
10005                 tw32(MAC_MODE, mac_mode);
10006         }
10007         else
10008                 return -EINVAL;
10009
10010         err = -EIO;
10011
10012         tx_len = 1514;
10013         skb = netdev_alloc_skb(tp->dev, tx_len);
10014         if (!skb)
10015                 return -ENOMEM;
10016
10017         tx_data = skb_put(skb, tx_len);
10018         memcpy(tx_data, tp->dev->dev_addr, 6);
10019         memset(tx_data + 6, 0x0, 8);
10020
10021         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10022
10023         for (i = 14; i < tx_len; i++)
10024                 tx_data[i] = (u8) (i & 0xff);
10025
10026         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10027
10028         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10029              HOSTCC_MODE_NOW);
10030
10031         udelay(10);
10032
10033         rx_start_idx = tp->hw_status->idx[0].rx_producer;
10034
10035         num_pkts = 0;
10036
10037         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
10038
10039         tp->tx_prod++;
10040         num_pkts++;
10041
10042         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10043                      tp->tx_prod);
10044         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
10045
10046         udelay(10);
10047
10048         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
10049         for (i = 0; i < 25; i++) {
10050                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10051                        HOSTCC_MODE_NOW);
10052
10053                 udelay(10);
10054
10055                 tx_idx = tp->hw_status->idx[0].tx_consumer;
10056                 rx_idx = tp->hw_status->idx[0].rx_producer;
10057                 if ((tx_idx == tp->tx_prod) &&
10058                     (rx_idx == (rx_start_idx + num_pkts)))
10059                         break;
10060         }
10061
10062         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10063         dev_kfree_skb(skb);
10064
10065         if (tx_idx != tp->tx_prod)
10066                 goto out;
10067
10068         if (rx_idx != rx_start_idx + num_pkts)
10069                 goto out;
10070
10071         desc = &tp->rx_rcb[rx_start_idx];
10072         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10073         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10074         if (opaque_key != RXD_OPAQUE_RING_STD)
10075                 goto out;
10076
10077         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10078             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10079                 goto out;
10080
10081         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10082         if (rx_len != tx_len)
10083                 goto out;
10084
10085         rx_skb = tp->rx_std_buffers[desc_idx].skb;
10086
10087         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10088         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10089
10090         for (i = 14; i < tx_len; i++) {
10091                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10092                         goto out;
10093         }
10094         err = 0;
10095
10096         /* tg3_free_rings will unmap and free the rx_skb */
10097 out:
10098         return err;
10099 }
10100
10101 #define TG3_MAC_LOOPBACK_FAILED         1
10102 #define TG3_PHY_LOOPBACK_FAILED         2
10103 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
10104                                          TG3_PHY_LOOPBACK_FAILED)
10105
10106 static int tg3_test_loopback(struct tg3 *tp)
10107 {
10108         int err = 0;
10109         u32 cpmuctrl = 0;
10110
10111         if (!netif_running(tp->dev))
10112                 return TG3_LOOPBACK_FAILED;
10113
10114         err = tg3_reset_hw(tp, 1);
10115         if (err)
10116                 return TG3_LOOPBACK_FAILED;
10117
10118         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10119             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10120             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10121                 int i;
10122                 u32 status;
10123
10124                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10125
10126                 /* Wait for up to 40 microseconds to acquire lock. */
10127                 for (i = 0; i < 4; i++) {
10128                         status = tr32(TG3_CPMU_MUTEX_GNT);
10129                         if (status == CPMU_MUTEX_GNT_DRIVER)
10130                                 break;
10131                         udelay(10);
10132                 }
10133
10134                 if (status != CPMU_MUTEX_GNT_DRIVER)
10135                         return TG3_LOOPBACK_FAILED;
10136
10137                 /* Turn off link-based power management. */
10138                 cpmuctrl = tr32(TG3_CPMU_CTRL);
10139                 tw32(TG3_CPMU_CTRL,
10140                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10141                                   CPMU_CTRL_LINK_AWARE_MODE));
10142         }
10143
10144         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10145                 err |= TG3_MAC_LOOPBACK_FAILED;
10146
10147         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10148             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10149             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10150                 tw32(TG3_CPMU_CTRL, cpmuctrl);
10151
10152                 /* Release the mutex */
10153                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10154         }
10155
10156         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10157             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10158                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10159                         err |= TG3_PHY_LOOPBACK_FAILED;
10160         }
10161
10162         return err;
10163 }
10164
10165 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10166                           u64 *data)
10167 {
10168         struct tg3 *tp = netdev_priv(dev);
10169
10170         if (tp->link_config.phy_is_low_power)
10171                 tg3_set_power_state(tp, PCI_D0);
10172
10173         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10174
10175         if (tg3_test_nvram(tp) != 0) {
10176                 etest->flags |= ETH_TEST_FL_FAILED;
10177                 data[0] = 1;
10178         }
10179         if (tg3_test_link(tp) != 0) {
10180                 etest->flags |= ETH_TEST_FL_FAILED;
10181                 data[1] = 1;
10182         }
10183         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10184                 int err, err2 = 0, irq_sync = 0;
10185
10186                 if (netif_running(dev)) {
10187                         tg3_phy_stop(tp);
10188                         tg3_netif_stop(tp);
10189                         irq_sync = 1;
10190                 }
10191
10192                 tg3_full_lock(tp, irq_sync);
10193
10194                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10195                 err = tg3_nvram_lock(tp);
10196                 tg3_halt_cpu(tp, RX_CPU_BASE);
10197                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10198                         tg3_halt_cpu(tp, TX_CPU_BASE);
10199                 if (!err)
10200                         tg3_nvram_unlock(tp);
10201
10202                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10203                         tg3_phy_reset(tp);
10204
10205                 if (tg3_test_registers(tp) != 0) {
10206                         etest->flags |= ETH_TEST_FL_FAILED;
10207                         data[2] = 1;
10208                 }
10209                 if (tg3_test_memory(tp) != 0) {
10210                         etest->flags |= ETH_TEST_FL_FAILED;
10211                         data[3] = 1;
10212                 }
10213                 if ((data[4] = tg3_test_loopback(tp)) != 0)
10214                         etest->flags |= ETH_TEST_FL_FAILED;
10215
10216                 tg3_full_unlock(tp);
10217
10218                 if (tg3_test_interrupt(tp) != 0) {
10219                         etest->flags |= ETH_TEST_FL_FAILED;
10220                         data[5] = 1;
10221                 }
10222
10223                 tg3_full_lock(tp, 0);
10224
10225                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10226                 if (netif_running(dev)) {
10227                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10228                         err2 = tg3_restart_hw(tp, 1);
10229                         if (!err2)
10230                                 tg3_netif_start(tp);
10231                 }
10232
10233                 tg3_full_unlock(tp);
10234
10235                 if (irq_sync && !err2)
10236                         tg3_phy_start(tp);
10237         }
10238         if (tp->link_config.phy_is_low_power)
10239                 tg3_set_power_state(tp, PCI_D3hot);
10240
10241 }
10242
10243 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10244 {
10245         struct mii_ioctl_data *data = if_mii(ifr);
10246         struct tg3 *tp = netdev_priv(dev);
10247         int err;
10248
10249         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10250                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10251                         return -EAGAIN;
10252                 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10253         }
10254
10255         switch(cmd) {
10256         case SIOCGMIIPHY:
10257                 data->phy_id = PHY_ADDR;
10258
10259                 /* fallthru */
10260         case SIOCGMIIREG: {
10261                 u32 mii_regval;
10262
10263                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10264                         break;                  /* We have no PHY */
10265
10266                 if (tp->link_config.phy_is_low_power)
10267                         return -EAGAIN;
10268
10269                 spin_lock_bh(&tp->lock);
10270                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10271                 spin_unlock_bh(&tp->lock);
10272
10273                 data->val_out = mii_regval;
10274
10275                 return err;
10276         }
10277
10278         case SIOCSMIIREG:
10279                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10280                         break;                  /* We have no PHY */
10281
10282                 if (!capable(CAP_NET_ADMIN))
10283                         return -EPERM;
10284
10285                 if (tp->link_config.phy_is_low_power)
10286                         return -EAGAIN;
10287
10288                 spin_lock_bh(&tp->lock);
10289                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10290                 spin_unlock_bh(&tp->lock);
10291
10292                 return err;
10293
10294         default:
10295                 /* do nothing */
10296                 break;
10297         }
10298         return -EOPNOTSUPP;
10299 }
10300
10301 #if TG3_VLAN_TAG_USED
10302 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10303 {
10304         struct tg3 *tp = netdev_priv(dev);
10305
10306         if (netif_running(dev))
10307                 tg3_netif_stop(tp);
10308
10309         tg3_full_lock(tp, 0);
10310
10311         tp->vlgrp = grp;
10312
10313         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10314         __tg3_set_rx_mode(dev);
10315
10316         if (netif_running(dev))
10317                 tg3_netif_start(tp);
10318
10319         tg3_full_unlock(tp);
10320 }
10321 #endif
10322
10323 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10324 {
10325         struct tg3 *tp = netdev_priv(dev);
10326
10327         memcpy(ec, &tp->coal, sizeof(*ec));
10328         return 0;
10329 }
10330
10331 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10332 {
10333         struct tg3 *tp = netdev_priv(dev);
10334         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10335         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10336
10337         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10338                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10339                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10340                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10341                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10342         }
10343
10344         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10345             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10346             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10347             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10348             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10349             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10350             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10351             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10352             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10353             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10354                 return -EINVAL;
10355
10356         /* No rx interrupts will be generated if both are zero */
10357         if ((ec->rx_coalesce_usecs == 0) &&
10358             (ec->rx_max_coalesced_frames == 0))
10359                 return -EINVAL;
10360
10361         /* No tx interrupts will be generated if both are zero */
10362         if ((ec->tx_coalesce_usecs == 0) &&
10363             (ec->tx_max_coalesced_frames == 0))
10364                 return -EINVAL;
10365
10366         /* Only copy relevant parameters, ignore all others. */
10367         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10368         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10369         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10370         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10371         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10372         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10373         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10374         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10375         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10376
10377         if (netif_running(dev)) {
10378                 tg3_full_lock(tp, 0);
10379                 __tg3_set_coalesce(tp, &tp->coal);
10380                 tg3_full_unlock(tp);
10381         }
10382         return 0;
10383 }
10384
10385 static const struct ethtool_ops tg3_ethtool_ops = {
10386         .get_settings           = tg3_get_settings,
10387         .set_settings           = tg3_set_settings,
10388         .get_drvinfo            = tg3_get_drvinfo,
10389         .get_regs_len           = tg3_get_regs_len,
10390         .get_regs               = tg3_get_regs,
10391         .get_wol                = tg3_get_wol,
10392         .set_wol                = tg3_set_wol,
10393         .get_msglevel           = tg3_get_msglevel,
10394         .set_msglevel           = tg3_set_msglevel,
10395         .nway_reset             = tg3_nway_reset,
10396         .get_link               = ethtool_op_get_link,
10397         .get_eeprom_len         = tg3_get_eeprom_len,
10398         .get_eeprom             = tg3_get_eeprom,
10399         .set_eeprom             = tg3_set_eeprom,
10400         .get_ringparam          = tg3_get_ringparam,
10401         .set_ringparam          = tg3_set_ringparam,
10402         .get_pauseparam         = tg3_get_pauseparam,
10403         .set_pauseparam         = tg3_set_pauseparam,
10404         .get_rx_csum            = tg3_get_rx_csum,
10405         .set_rx_csum            = tg3_set_rx_csum,
10406         .set_tx_csum            = tg3_set_tx_csum,
10407         .set_sg                 = ethtool_op_set_sg,
10408         .set_tso                = tg3_set_tso,
10409         .self_test              = tg3_self_test,
10410         .get_strings            = tg3_get_strings,
10411         .phys_id                = tg3_phys_id,
10412         .get_ethtool_stats      = tg3_get_ethtool_stats,
10413         .get_coalesce           = tg3_get_coalesce,
10414         .set_coalesce           = tg3_set_coalesce,
10415         .get_sset_count         = tg3_get_sset_count,
10416 };
10417
10418 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10419 {
10420         u32 cursize, val, magic;
10421
10422         tp->nvram_size = EEPROM_CHIP_SIZE;
10423
10424         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10425                 return;
10426
10427         if ((magic != TG3_EEPROM_MAGIC) &&
10428             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10429             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10430                 return;
10431
10432         /*
10433          * Size the chip by reading offsets at increasing powers of two.
10434          * When we encounter our validation signature, we know the addressing
10435          * has wrapped around, and thus have our chip size.
10436          */
10437         cursize = 0x10;
10438
10439         while (cursize < tp->nvram_size) {
10440                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10441                         return;
10442
10443                 if (val == magic)
10444                         break;
10445
10446                 cursize <<= 1;
10447         }
10448
10449         tp->nvram_size = cursize;
10450 }
10451
10452 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10453 {
10454         u32 val;
10455
10456         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10457                 return;
10458
10459         /* Selfboot format */
10460         if (val != TG3_EEPROM_MAGIC) {
10461                 tg3_get_eeprom_size(tp);
10462                 return;
10463         }
10464
10465         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10466                 if (val != 0) {
10467                         tp->nvram_size = (val >> 16) * 1024;
10468                         return;
10469                 }
10470         }
10471         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10472 }
10473
10474 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10475 {
10476         u32 nvcfg1;
10477
10478         nvcfg1 = tr32(NVRAM_CFG1);
10479         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10480                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10481         }
10482         else {
10483                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10484                 tw32(NVRAM_CFG1, nvcfg1);
10485         }
10486
10487         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10488             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10489                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10490                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10491                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10492                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10493                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10494                                 break;
10495                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10496                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10497                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10498                                 break;
10499                         case FLASH_VENDOR_ATMEL_EEPROM:
10500                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10501                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10502                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10503                                 break;
10504                         case FLASH_VENDOR_ST:
10505                                 tp->nvram_jedecnum = JEDEC_ST;
10506                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10507                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10508                                 break;
10509                         case FLASH_VENDOR_SAIFUN:
10510                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
10511                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10512                                 break;
10513                         case FLASH_VENDOR_SST_SMALL:
10514                         case FLASH_VENDOR_SST_LARGE:
10515                                 tp->nvram_jedecnum = JEDEC_SST;
10516                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10517                                 break;
10518                 }
10519         }
10520         else {
10521                 tp->nvram_jedecnum = JEDEC_ATMEL;
10522                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10523                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10524         }
10525 }
10526
10527 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10528 {
10529         u32 nvcfg1;
10530
10531         nvcfg1 = tr32(NVRAM_CFG1);
10532
10533         /* NVRAM protection for TPM */
10534         if (nvcfg1 & (1 << 27))
10535                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10536
10537         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10538                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10539                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10540                         tp->nvram_jedecnum = JEDEC_ATMEL;
10541                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10542                         break;
10543                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10544                         tp->nvram_jedecnum = JEDEC_ATMEL;
10545                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10546                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10547                         break;
10548                 case FLASH_5752VENDOR_ST_M45PE10:
10549                 case FLASH_5752VENDOR_ST_M45PE20:
10550                 case FLASH_5752VENDOR_ST_M45PE40:
10551                         tp->nvram_jedecnum = JEDEC_ST;
10552                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10553                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10554                         break;
10555         }
10556
10557         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10558                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10559                         case FLASH_5752PAGE_SIZE_256:
10560                                 tp->nvram_pagesize = 256;
10561                                 break;
10562                         case FLASH_5752PAGE_SIZE_512:
10563                                 tp->nvram_pagesize = 512;
10564                                 break;
10565                         case FLASH_5752PAGE_SIZE_1K:
10566                                 tp->nvram_pagesize = 1024;
10567                                 break;
10568                         case FLASH_5752PAGE_SIZE_2K:
10569                                 tp->nvram_pagesize = 2048;
10570                                 break;
10571                         case FLASH_5752PAGE_SIZE_4K:
10572                                 tp->nvram_pagesize = 4096;
10573                                 break;
10574                         case FLASH_5752PAGE_SIZE_264:
10575                                 tp->nvram_pagesize = 264;
10576                                 break;
10577                 }
10578         }
10579         else {
10580                 /* For eeprom, set pagesize to maximum eeprom size */
10581                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10582
10583                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10584                 tw32(NVRAM_CFG1, nvcfg1);
10585         }
10586 }
10587
10588 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10589 {
10590         u32 nvcfg1, protect = 0;
10591
10592         nvcfg1 = tr32(NVRAM_CFG1);
10593
10594         /* NVRAM protection for TPM */
10595         if (nvcfg1 & (1 << 27)) {
10596                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10597                 protect = 1;
10598         }
10599
10600         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10601         switch (nvcfg1) {
10602                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10603                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10604                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10605                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10606                         tp->nvram_jedecnum = JEDEC_ATMEL;
10607                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10608                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10609                         tp->nvram_pagesize = 264;
10610                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10611                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10612                                 tp->nvram_size = (protect ? 0x3e200 :
10613                                                   TG3_NVRAM_SIZE_512KB);
10614                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10615                                 tp->nvram_size = (protect ? 0x1f200 :
10616                                                   TG3_NVRAM_SIZE_256KB);
10617                         else
10618                                 tp->nvram_size = (protect ? 0x1f200 :
10619                                                   TG3_NVRAM_SIZE_128KB);
10620                         break;
10621                 case FLASH_5752VENDOR_ST_M45PE10:
10622                 case FLASH_5752VENDOR_ST_M45PE20:
10623                 case FLASH_5752VENDOR_ST_M45PE40:
10624                         tp->nvram_jedecnum = JEDEC_ST;
10625                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10626                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10627                         tp->nvram_pagesize = 256;
10628                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10629                                 tp->nvram_size = (protect ?
10630                                                   TG3_NVRAM_SIZE_64KB :
10631                                                   TG3_NVRAM_SIZE_128KB);
10632                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10633                                 tp->nvram_size = (protect ?
10634                                                   TG3_NVRAM_SIZE_64KB :
10635                                                   TG3_NVRAM_SIZE_256KB);
10636                         else
10637                                 tp->nvram_size = (protect ?
10638                                                   TG3_NVRAM_SIZE_128KB :
10639                                                   TG3_NVRAM_SIZE_512KB);
10640                         break;
10641         }
10642 }
10643
10644 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10645 {
10646         u32 nvcfg1;
10647
10648         nvcfg1 = tr32(NVRAM_CFG1);
10649
10650         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10651                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10652                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10653                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10654                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10655                         tp->nvram_jedecnum = JEDEC_ATMEL;
10656                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10657                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10658
10659                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10660                         tw32(NVRAM_CFG1, nvcfg1);
10661                         break;
10662                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10663                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10664                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10665                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10666                         tp->nvram_jedecnum = JEDEC_ATMEL;
10667                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10668                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10669                         tp->nvram_pagesize = 264;
10670                         break;
10671                 case FLASH_5752VENDOR_ST_M45PE10:
10672                 case FLASH_5752VENDOR_ST_M45PE20:
10673                 case FLASH_5752VENDOR_ST_M45PE40:
10674                         tp->nvram_jedecnum = JEDEC_ST;
10675                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10676                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10677                         tp->nvram_pagesize = 256;
10678                         break;
10679         }
10680 }
10681
10682 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10683 {
10684         u32 nvcfg1, protect = 0;
10685
10686         nvcfg1 = tr32(NVRAM_CFG1);
10687
10688         /* NVRAM protection for TPM */
10689         if (nvcfg1 & (1 << 27)) {
10690                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10691                 protect = 1;
10692         }
10693
10694         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10695         switch (nvcfg1) {
10696                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10697                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10698                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10699                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10700                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10701                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10702                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10703                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10704                         tp->nvram_jedecnum = JEDEC_ATMEL;
10705                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10706                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10707                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10708                         tp->nvram_pagesize = 256;
10709                         break;
10710                 case FLASH_5761VENDOR_ST_A_M45PE20:
10711                 case FLASH_5761VENDOR_ST_A_M45PE40:
10712                 case FLASH_5761VENDOR_ST_A_M45PE80:
10713                 case FLASH_5761VENDOR_ST_A_M45PE16:
10714                 case FLASH_5761VENDOR_ST_M_M45PE20:
10715                 case FLASH_5761VENDOR_ST_M_M45PE40:
10716                 case FLASH_5761VENDOR_ST_M_M45PE80:
10717                 case FLASH_5761VENDOR_ST_M_M45PE16:
10718                         tp->nvram_jedecnum = JEDEC_ST;
10719                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10720                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10721                         tp->nvram_pagesize = 256;
10722                         break;
10723         }
10724
10725         if (protect) {
10726                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10727         } else {
10728                 switch (nvcfg1) {
10729                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10730                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10731                         case FLASH_5761VENDOR_ST_A_M45PE16:
10732                         case FLASH_5761VENDOR_ST_M_M45PE16:
10733                                 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10734                                 break;
10735                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10736                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10737                         case FLASH_5761VENDOR_ST_A_M45PE80:
10738                         case FLASH_5761VENDOR_ST_M_M45PE80:
10739                                 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10740                                 break;
10741                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10742                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10743                         case FLASH_5761VENDOR_ST_A_M45PE40:
10744                         case FLASH_5761VENDOR_ST_M_M45PE40:
10745                                 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10746                                 break;
10747                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10748                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10749                         case FLASH_5761VENDOR_ST_A_M45PE20:
10750                         case FLASH_5761VENDOR_ST_M_M45PE20:
10751                                 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10752                                 break;
10753                 }
10754         }
10755 }
10756
10757 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10758 {
10759         tp->nvram_jedecnum = JEDEC_ATMEL;
10760         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10761         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10762 }
10763
10764 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10765 static void __devinit tg3_nvram_init(struct tg3 *tp)
10766 {
10767         tw32_f(GRC_EEPROM_ADDR,
10768              (EEPROM_ADDR_FSM_RESET |
10769               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10770                EEPROM_ADDR_CLKPERD_SHIFT)));
10771
10772         msleep(1);
10773
10774         /* Enable seeprom accesses. */
10775         tw32_f(GRC_LOCAL_CTRL,
10776              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10777         udelay(100);
10778
10779         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10780             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10781                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10782
10783                 if (tg3_nvram_lock(tp)) {
10784                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10785                                "tg3_nvram_init failed.\n", tp->dev->name);
10786                         return;
10787                 }
10788                 tg3_enable_nvram_access(tp);
10789
10790                 tp->nvram_size = 0;
10791
10792                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10793                         tg3_get_5752_nvram_info(tp);
10794                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10795                         tg3_get_5755_nvram_info(tp);
10796                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10797                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10798                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10799                         tg3_get_5787_nvram_info(tp);
10800                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10801                         tg3_get_5761_nvram_info(tp);
10802                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10803                         tg3_get_5906_nvram_info(tp);
10804                 else
10805                         tg3_get_nvram_info(tp);
10806
10807                 if (tp->nvram_size == 0)
10808                         tg3_get_nvram_size(tp);
10809
10810                 tg3_disable_nvram_access(tp);
10811                 tg3_nvram_unlock(tp);
10812
10813         } else {
10814                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10815
10816                 tg3_get_eeprom_size(tp);
10817         }
10818 }
10819
10820 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10821                                         u32 offset, u32 *val)
10822 {
10823         u32 tmp;
10824         int i;
10825
10826         if (offset > EEPROM_ADDR_ADDR_MASK ||
10827             (offset % 4) != 0)
10828                 return -EINVAL;
10829
10830         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10831                                         EEPROM_ADDR_DEVID_MASK |
10832                                         EEPROM_ADDR_READ);
10833         tw32(GRC_EEPROM_ADDR,
10834              tmp |
10835              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10836              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10837               EEPROM_ADDR_ADDR_MASK) |
10838              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10839
10840         for (i = 0; i < 1000; i++) {
10841                 tmp = tr32(GRC_EEPROM_ADDR);
10842
10843                 if (tmp & EEPROM_ADDR_COMPLETE)
10844                         break;
10845                 msleep(1);
10846         }
10847         if (!(tmp & EEPROM_ADDR_COMPLETE))
10848                 return -EBUSY;
10849
10850         *val = tr32(GRC_EEPROM_DATA);
10851         return 0;
10852 }
10853
10854 #define NVRAM_CMD_TIMEOUT 10000
10855
10856 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10857 {
10858         int i;
10859
10860         tw32(NVRAM_CMD, nvram_cmd);
10861         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10862                 udelay(10);
10863                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10864                         udelay(10);
10865                         break;
10866                 }
10867         }
10868         if (i == NVRAM_CMD_TIMEOUT) {
10869                 return -EBUSY;
10870         }
10871         return 0;
10872 }
10873
10874 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10875 {
10876         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10877             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10878             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10879            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10880             (tp->nvram_jedecnum == JEDEC_ATMEL))
10881
10882                 addr = ((addr / tp->nvram_pagesize) <<
10883                         ATMEL_AT45DB0X1B_PAGE_POS) +
10884                        (addr % tp->nvram_pagesize);
10885
10886         return addr;
10887 }
10888
10889 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10890 {
10891         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10892             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10893             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10894            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10895             (tp->nvram_jedecnum == JEDEC_ATMEL))
10896
10897                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10898                         tp->nvram_pagesize) +
10899                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10900
10901         return addr;
10902 }
10903
10904 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10905 {
10906         int ret;
10907
10908         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10909                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10910
10911         offset = tg3_nvram_phys_addr(tp, offset);
10912
10913         if (offset > NVRAM_ADDR_MSK)
10914                 return -EINVAL;
10915
10916         ret = tg3_nvram_lock(tp);
10917         if (ret)
10918                 return ret;
10919
10920         tg3_enable_nvram_access(tp);
10921
10922         tw32(NVRAM_ADDR, offset);
10923         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10924                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10925
10926         if (ret == 0)
10927                 *val = swab32(tr32(NVRAM_RDDATA));
10928
10929         tg3_disable_nvram_access(tp);
10930
10931         tg3_nvram_unlock(tp);
10932
10933         return ret;
10934 }
10935
10936 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10937 {
10938         u32 v;
10939         int res = tg3_nvram_read(tp, offset, &v);
10940         if (!res)
10941                 *val = cpu_to_le32(v);
10942         return res;
10943 }
10944
10945 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10946 {
10947         int err;
10948         u32 tmp;
10949
10950         err = tg3_nvram_read(tp, offset, &tmp);
10951         *val = swab32(tmp);
10952         return err;
10953 }
10954
10955 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10956                                     u32 offset, u32 len, u8 *buf)
10957 {
10958         int i, j, rc = 0;
10959         u32 val;
10960
10961         for (i = 0; i < len; i += 4) {
10962                 u32 addr;
10963                 __le32 data;
10964
10965                 addr = offset + i;
10966
10967                 memcpy(&data, buf + i, 4);
10968
10969                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10970
10971                 val = tr32(GRC_EEPROM_ADDR);
10972                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10973
10974                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10975                         EEPROM_ADDR_READ);
10976                 tw32(GRC_EEPROM_ADDR, val |
10977                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10978                         (addr & EEPROM_ADDR_ADDR_MASK) |
10979                         EEPROM_ADDR_START |
10980                         EEPROM_ADDR_WRITE);
10981
10982                 for (j = 0; j < 1000; j++) {
10983                         val = tr32(GRC_EEPROM_ADDR);
10984
10985                         if (val & EEPROM_ADDR_COMPLETE)
10986                                 break;
10987                         msleep(1);
10988                 }
10989                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10990                         rc = -EBUSY;
10991                         break;
10992                 }
10993         }
10994
10995         return rc;
10996 }
10997
10998 /* offset and length are dword aligned */
10999 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11000                 u8 *buf)
11001 {
11002         int ret = 0;
11003         u32 pagesize = tp->nvram_pagesize;
11004         u32 pagemask = pagesize - 1;
11005         u32 nvram_cmd;
11006         u8 *tmp;
11007
11008         tmp = kmalloc(pagesize, GFP_KERNEL);
11009         if (tmp == NULL)
11010                 return -ENOMEM;
11011
11012         while (len) {
11013                 int j;
11014                 u32 phy_addr, page_off, size;
11015
11016                 phy_addr = offset & ~pagemask;
11017
11018                 for (j = 0; j < pagesize; j += 4) {
11019                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
11020                                                 (__le32 *) (tmp + j))))
11021                                 break;
11022                 }
11023                 if (ret)
11024                         break;
11025
11026                 page_off = offset & pagemask;
11027                 size = pagesize;
11028                 if (len < size)
11029                         size = len;
11030
11031                 len -= size;
11032
11033                 memcpy(tmp + page_off, buf, size);
11034
11035                 offset = offset + (pagesize - page_off);
11036
11037                 tg3_enable_nvram_access(tp);
11038
11039                 /*
11040                  * Before we can erase the flash page, we need
11041                  * to issue a special "write enable" command.
11042                  */
11043                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11044
11045                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11046                         break;
11047
11048                 /* Erase the target page */
11049                 tw32(NVRAM_ADDR, phy_addr);
11050
11051                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11052                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11053
11054                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11055                         break;
11056
11057                 /* Issue another write enable to start the write. */
11058                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11059
11060                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11061                         break;
11062
11063                 for (j = 0; j < pagesize; j += 4) {
11064                         __be32 data;
11065
11066                         data = *((__be32 *) (tmp + j));
11067                         /* swab32(le32_to_cpu(data)), actually */
11068                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
11069
11070                         tw32(NVRAM_ADDR, phy_addr + j);
11071
11072                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11073                                 NVRAM_CMD_WR;
11074
11075                         if (j == 0)
11076                                 nvram_cmd |= NVRAM_CMD_FIRST;
11077                         else if (j == (pagesize - 4))
11078                                 nvram_cmd |= NVRAM_CMD_LAST;
11079
11080                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11081                                 break;
11082                 }
11083                 if (ret)
11084                         break;
11085         }
11086
11087         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11088         tg3_nvram_exec_cmd(tp, nvram_cmd);
11089
11090         kfree(tmp);
11091
11092         return ret;
11093 }
11094
11095 /* offset and length are dword aligned */
11096 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11097                 u8 *buf)
11098 {
11099         int i, ret = 0;
11100
11101         for (i = 0; i < len; i += 4, offset += 4) {
11102                 u32 page_off, phy_addr, nvram_cmd;
11103                 __be32 data;
11104
11105                 memcpy(&data, buf + i, 4);
11106                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11107
11108                 page_off = offset % tp->nvram_pagesize;
11109
11110                 phy_addr = tg3_nvram_phys_addr(tp, offset);
11111
11112                 tw32(NVRAM_ADDR, phy_addr);
11113
11114                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11115
11116                 if ((page_off == 0) || (i == 0))
11117                         nvram_cmd |= NVRAM_CMD_FIRST;
11118                 if (page_off == (tp->nvram_pagesize - 4))
11119                         nvram_cmd |= NVRAM_CMD_LAST;
11120
11121                 if (i == (len - 4))
11122                         nvram_cmd |= NVRAM_CMD_LAST;
11123
11124                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
11125                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
11126                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
11127                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
11128                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11129                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
11130                     (tp->nvram_jedecnum == JEDEC_ST) &&
11131                     (nvram_cmd & NVRAM_CMD_FIRST)) {
11132
11133                         if ((ret = tg3_nvram_exec_cmd(tp,
11134                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11135                                 NVRAM_CMD_DONE)))
11136
11137                                 break;
11138                 }
11139                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11140                         /* We always do complete word writes to eeprom. */
11141                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11142                 }
11143
11144                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11145                         break;
11146         }
11147         return ret;
11148 }
11149
11150 /* offset and length are dword aligned */
11151 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11152 {
11153         int ret;
11154
11155         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11156                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11157                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
11158                 udelay(40);
11159         }
11160
11161         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11162                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11163         }
11164         else {
11165                 u32 grc_mode;
11166
11167                 ret = tg3_nvram_lock(tp);
11168                 if (ret)
11169                         return ret;
11170
11171                 tg3_enable_nvram_access(tp);
11172                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11173                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11174                         tw32(NVRAM_WRITE1, 0x406);
11175
11176                 grc_mode = tr32(GRC_MODE);
11177                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11178
11179                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11180                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11181
11182                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
11183                                 buf);
11184                 }
11185                 else {
11186                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11187                                 buf);
11188                 }
11189
11190                 grc_mode = tr32(GRC_MODE);
11191                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11192
11193                 tg3_disable_nvram_access(tp);
11194                 tg3_nvram_unlock(tp);
11195         }
11196
11197         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11198                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11199                 udelay(40);
11200         }
11201
11202         return ret;
11203 }
11204
11205 struct subsys_tbl_ent {
11206         u16 subsys_vendor, subsys_devid;
11207         u32 phy_id;
11208 };
11209
11210 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11211         /* Broadcom boards. */
11212         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11213         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11214         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11215         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
11216         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11217         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11218         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
11219         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11220         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11221         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11222         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11223
11224         /* 3com boards. */
11225         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11226         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11227         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
11228         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11229         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11230
11231         /* DELL boards. */
11232         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11233         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11234         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11235         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11236
11237         /* Compaq boards. */
11238         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11239         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11240         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
11241         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11242         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11243
11244         /* IBM boards. */
11245         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11246 };
11247
11248 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11249 {
11250         int i;
11251
11252         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11253                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11254                      tp->pdev->subsystem_vendor) &&
11255                     (subsys_id_to_phy_id[i].subsys_devid ==
11256                      tp->pdev->subsystem_device))
11257                         return &subsys_id_to_phy_id[i];
11258         }
11259         return NULL;
11260 }
11261
11262 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11263 {
11264         u32 val;
11265         u16 pmcsr;
11266
11267         /* On some early chips the SRAM cannot be accessed in D3hot state,
11268          * so need make sure we're in D0.
11269          */
11270         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11271         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11272         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11273         msleep(1);
11274
11275         /* Make sure register accesses (indirect or otherwise)
11276          * will function correctly.
11277          */
11278         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11279                                tp->misc_host_ctrl);
11280
11281         /* The memory arbiter has to be enabled in order for SRAM accesses
11282          * to succeed.  Normally on powerup the tg3 chip firmware will make
11283          * sure it is enabled, but other entities such as system netboot
11284          * code might disable it.
11285          */
11286         val = tr32(MEMARB_MODE);
11287         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11288
11289         tp->phy_id = PHY_ID_INVALID;
11290         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11291
11292         /* Assume an onboard device and WOL capable by default.  */
11293         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11294
11295         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11296                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11297                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11298                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11299                 }
11300                 val = tr32(VCPU_CFGSHDW);
11301                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11302                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11303                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11304                     (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11305                     device_may_wakeup(&tp->pdev->dev))
11306                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11307                 return;
11308         }
11309
11310         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11311         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11312                 u32 nic_cfg, led_cfg;
11313                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11314                 int eeprom_phy_serdes = 0;
11315
11316                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11317                 tp->nic_sram_data_cfg = nic_cfg;
11318
11319                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11320                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11321                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11322                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11323                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11324                     (ver > 0) && (ver < 0x100))
11325                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11326
11327                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11328                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11329
11330                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11331                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11332                         eeprom_phy_serdes = 1;
11333
11334                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11335                 if (nic_phy_id != 0) {
11336                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11337                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11338
11339                         eeprom_phy_id  = (id1 >> 16) << 10;
11340                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
11341                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
11342                 } else
11343                         eeprom_phy_id = 0;
11344
11345                 tp->phy_id = eeprom_phy_id;
11346                 if (eeprom_phy_serdes) {
11347                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11348                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11349                         else
11350                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11351                 }
11352
11353                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11354                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11355                                     SHASTA_EXT_LED_MODE_MASK);
11356                 else
11357                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11358
11359                 switch (led_cfg) {
11360                 default:
11361                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11362                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11363                         break;
11364
11365                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11366                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11367                         break;
11368
11369                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11370                         tp->led_ctrl = LED_CTRL_MODE_MAC;
11371
11372                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11373                          * read on some older 5700/5701 bootcode.
11374                          */
11375                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11376                             ASIC_REV_5700 ||
11377                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
11378                             ASIC_REV_5701)
11379                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11380
11381                         break;
11382
11383                 case SHASTA_EXT_LED_SHARED:
11384                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
11385                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11386                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11387                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11388                                                  LED_CTRL_MODE_PHY_2);
11389                         break;
11390
11391                 case SHASTA_EXT_LED_MAC:
11392                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11393                         break;
11394
11395                 case SHASTA_EXT_LED_COMBO:
11396                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
11397                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11398                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11399                                                  LED_CTRL_MODE_PHY_2);
11400                         break;
11401
11402                 }
11403
11404                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11405                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11406                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11407                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11408
11409                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11410                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11411
11412                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11413                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11414                         if ((tp->pdev->subsystem_vendor ==
11415                              PCI_VENDOR_ID_ARIMA) &&
11416                             (tp->pdev->subsystem_device == 0x205a ||
11417                              tp->pdev->subsystem_device == 0x2063))
11418                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11419                 } else {
11420                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11421                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11422                 }
11423
11424                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11425                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11426                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11427                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11428                 }
11429                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
11430                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11431                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11432                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11433                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11434
11435                 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11436                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) &&
11437                     device_may_wakeup(&tp->pdev->dev))
11438                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11439
11440                 if (cfg2 & (1 << 17))
11441                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11442
11443                 /* serdes signal pre-emphasis in register 0x590 set by */
11444                 /* bootcode if bit 18 is set */
11445                 if (cfg2 & (1 << 18))
11446                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11447
11448                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11449                         u32 cfg3;
11450
11451                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11452                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11453                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11454                 }
11455
11456                 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11457                         tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11458                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11459                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11460                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11461                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11462         }
11463 }
11464
11465 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11466 {
11467         int i;
11468         u32 val;
11469
11470         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11471         tw32(OTP_CTRL, cmd);
11472
11473         /* Wait for up to 1 ms for command to execute. */
11474         for (i = 0; i < 100; i++) {
11475                 val = tr32(OTP_STATUS);
11476                 if (val & OTP_STATUS_CMD_DONE)
11477                         break;
11478                 udelay(10);
11479         }
11480
11481         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11482 }
11483
11484 /* Read the gphy configuration from the OTP region of the chip.  The gphy
11485  * configuration is a 32-bit value that straddles the alignment boundary.
11486  * We do two 32-bit reads and then shift and merge the results.
11487  */
11488 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11489 {
11490         u32 bhalf_otp, thalf_otp;
11491
11492         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11493
11494         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11495                 return 0;
11496
11497         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11498
11499         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11500                 return 0;
11501
11502         thalf_otp = tr32(OTP_READ_DATA);
11503
11504         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11505
11506         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11507                 return 0;
11508
11509         bhalf_otp = tr32(OTP_READ_DATA);
11510
11511         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11512 }
11513
11514 static int __devinit tg3_phy_probe(struct tg3 *tp)
11515 {
11516         u32 hw_phy_id_1, hw_phy_id_2;
11517         u32 hw_phy_id, hw_phy_id_masked;
11518         int err;
11519
11520         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11521                 return tg3_phy_init(tp);
11522
11523         /* Reading the PHY ID register can conflict with ASF
11524          * firwmare access to the PHY hardware.
11525          */
11526         err = 0;
11527         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11528             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11529                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11530         } else {
11531                 /* Now read the physical PHY_ID from the chip and verify
11532                  * that it is sane.  If it doesn't look good, we fall back
11533                  * to either the hard-coded table based PHY_ID and failing
11534                  * that the value found in the eeprom area.
11535                  */
11536                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11537                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11538
11539                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11540                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11541                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11542
11543                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11544         }
11545
11546         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11547                 tp->phy_id = hw_phy_id;
11548                 if (hw_phy_id_masked == PHY_ID_BCM8002)
11549                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11550                 else
11551                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11552         } else {
11553                 if (tp->phy_id != PHY_ID_INVALID) {
11554                         /* Do nothing, phy ID already set up in
11555                          * tg3_get_eeprom_hw_cfg().
11556                          */
11557                 } else {
11558                         struct subsys_tbl_ent *p;
11559
11560                         /* No eeprom signature?  Try the hardcoded
11561                          * subsys device table.
11562                          */
11563                         p = lookup_by_subsys(tp);
11564                         if (!p)
11565                                 return -ENODEV;
11566
11567                         tp->phy_id = p->phy_id;
11568                         if (!tp->phy_id ||
11569                             tp->phy_id == PHY_ID_BCM8002)
11570                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11571                 }
11572         }
11573
11574         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11575             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11576             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11577                 u32 bmsr, adv_reg, tg3_ctrl, mask;
11578
11579                 tg3_readphy(tp, MII_BMSR, &bmsr);
11580                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11581                     (bmsr & BMSR_LSTATUS))
11582                         goto skip_phy_reset;
11583
11584                 err = tg3_phy_reset(tp);
11585                 if (err)
11586                         return err;
11587
11588                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11589                            ADVERTISE_100HALF | ADVERTISE_100FULL |
11590                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11591                 tg3_ctrl = 0;
11592                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11593                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11594                                     MII_TG3_CTRL_ADV_1000_FULL);
11595                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11596                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11597                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11598                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
11599                 }
11600
11601                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11602                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11603                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11604                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11605                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11606
11607                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11608                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11609
11610                         tg3_writephy(tp, MII_BMCR,
11611                                      BMCR_ANENABLE | BMCR_ANRESTART);
11612                 }
11613                 tg3_phy_set_wirespeed(tp);
11614
11615                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11616                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11617                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11618         }
11619
11620 skip_phy_reset:
11621         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11622                 err = tg3_init_5401phy_dsp(tp);
11623                 if (err)
11624                         return err;
11625         }
11626
11627         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11628                 err = tg3_init_5401phy_dsp(tp);
11629         }
11630
11631         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11632                 tp->link_config.advertising =
11633                         (ADVERTISED_1000baseT_Half |
11634                          ADVERTISED_1000baseT_Full |
11635                          ADVERTISED_Autoneg |
11636                          ADVERTISED_FIBRE);
11637         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11638                 tp->link_config.advertising &=
11639                         ~(ADVERTISED_1000baseT_Half |
11640                           ADVERTISED_1000baseT_Full);
11641
11642         return err;
11643 }
11644
11645 static void __devinit tg3_read_partno(struct tg3 *tp)
11646 {
11647         unsigned char vpd_data[256];
11648         unsigned int i;
11649         u32 magic;
11650
11651         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11652                 goto out_not_found;
11653
11654         if (magic == TG3_EEPROM_MAGIC) {
11655                 for (i = 0; i < 256; i += 4) {
11656                         u32 tmp;
11657
11658                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11659                                 goto out_not_found;
11660
11661                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11662                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11663                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11664                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11665                 }
11666         } else {
11667                 int vpd_cap;
11668
11669                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11670                 for (i = 0; i < 256; i += 4) {
11671                         u32 tmp, j = 0;
11672                         __le32 v;
11673                         u16 tmp16;
11674
11675                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11676                                               i);
11677                         while (j++ < 100) {
11678                                 pci_read_config_word(tp->pdev, vpd_cap +
11679                                                      PCI_VPD_ADDR, &tmp16);
11680                                 if (tmp16 & 0x8000)
11681                                         break;
11682                                 msleep(1);
11683                         }
11684                         if (!(tmp16 & 0x8000))
11685                                 goto out_not_found;
11686
11687                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11688                                               &tmp);
11689                         v = cpu_to_le32(tmp);
11690                         memcpy(&vpd_data[i], &v, 4);
11691                 }
11692         }
11693
11694         /* Now parse and find the part number. */
11695         for (i = 0; i < 254; ) {
11696                 unsigned char val = vpd_data[i];
11697                 unsigned int block_end;
11698
11699                 if (val == 0x82 || val == 0x91) {
11700                         i = (i + 3 +
11701                              (vpd_data[i + 1] +
11702                               (vpd_data[i + 2] << 8)));
11703                         continue;
11704                 }
11705
11706                 if (val != 0x90)
11707                         goto out_not_found;
11708
11709                 block_end = (i + 3 +
11710                              (vpd_data[i + 1] +
11711                               (vpd_data[i + 2] << 8)));
11712                 i += 3;
11713
11714                 if (block_end > 256)
11715                         goto out_not_found;
11716
11717                 while (i < (block_end - 2)) {
11718                         if (vpd_data[i + 0] == 'P' &&
11719                             vpd_data[i + 1] == 'N') {
11720                                 int partno_len = vpd_data[i + 2];
11721
11722                                 i += 3;
11723                                 if (partno_len > 24 || (partno_len + i) > 256)
11724                                         goto out_not_found;
11725
11726                                 memcpy(tp->board_part_number,
11727                                        &vpd_data[i], partno_len);
11728
11729                                 /* Success. */
11730                                 return;
11731                         }
11732                         i += 3 + vpd_data[i + 2];
11733                 }
11734
11735                 /* Part number not found. */
11736                 goto out_not_found;
11737         }
11738
11739 out_not_found:
11740         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11741                 strcpy(tp->board_part_number, "BCM95906");
11742         else
11743                 strcpy(tp->board_part_number, "none");
11744 }
11745
11746 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11747 {
11748         u32 val;
11749
11750         if (tg3_nvram_read_swab(tp, offset, &val) ||
11751             (val & 0xfc000000) != 0x0c000000 ||
11752             tg3_nvram_read_swab(tp, offset + 4, &val) ||
11753             val != 0)
11754                 return 0;
11755
11756         return 1;
11757 }
11758
11759 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11760 {
11761         u32 val, offset, start;
11762         u32 ver_offset;
11763         int i, bcnt;
11764
11765         if (tg3_nvram_read_swab(tp, 0, &val))
11766                 return;
11767
11768         if (val != TG3_EEPROM_MAGIC)
11769                 return;
11770
11771         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11772             tg3_nvram_read_swab(tp, 0x4, &start))
11773                 return;
11774
11775         offset = tg3_nvram_logical_addr(tp, offset);
11776
11777         if (!tg3_fw_img_is_valid(tp, offset) ||
11778             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11779                 return;
11780
11781         offset = offset + ver_offset - start;
11782         for (i = 0; i < 16; i += 4) {
11783                 __le32 v;
11784                 if (tg3_nvram_read_le(tp, offset + i, &v))
11785                         return;
11786
11787                 memcpy(tp->fw_ver + i, &v, 4);
11788         }
11789
11790         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11791              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11792                 return;
11793
11794         for (offset = TG3_NVM_DIR_START;
11795              offset < TG3_NVM_DIR_END;
11796              offset += TG3_NVM_DIRENT_SIZE) {
11797                 if (tg3_nvram_read_swab(tp, offset, &val))
11798                         return;
11799
11800                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11801                         break;
11802         }
11803
11804         if (offset == TG3_NVM_DIR_END)
11805                 return;
11806
11807         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11808                 start = 0x08000000;
11809         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11810                 return;
11811
11812         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11813             !tg3_fw_img_is_valid(tp, offset) ||
11814             tg3_nvram_read_swab(tp, offset + 8, &val))
11815                 return;
11816
11817         offset += val - start;
11818
11819         bcnt = strlen(tp->fw_ver);
11820
11821         tp->fw_ver[bcnt++] = ',';
11822         tp->fw_ver[bcnt++] = ' ';
11823
11824         for (i = 0; i < 4; i++) {
11825                 __le32 v;
11826                 if (tg3_nvram_read_le(tp, offset, &v))
11827                         return;
11828
11829                 offset += sizeof(v);
11830
11831                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11832                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11833                         break;
11834                 }
11835
11836                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11837                 bcnt += sizeof(v);
11838         }
11839
11840         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11841 }
11842
11843 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11844
11845 static int __devinit tg3_get_invariants(struct tg3 *tp)
11846 {
11847         static struct pci_device_id write_reorder_chipsets[] = {
11848                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11849                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11850                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11851                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11852                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11853                              PCI_DEVICE_ID_VIA_8385_0) },
11854                 { },
11855         };
11856         u32 misc_ctrl_reg;
11857         u32 cacheline_sz_reg;
11858         u32 pci_state_reg, grc_misc_cfg;
11859         u32 val;
11860         u16 pci_cmd;
11861         int err, pcie_cap;
11862
11863         /* Force memory write invalidate off.  If we leave it on,
11864          * then on 5700_BX chips we have to enable a workaround.
11865          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11866          * to match the cacheline size.  The Broadcom driver have this
11867          * workaround but turns MWI off all the times so never uses
11868          * it.  This seems to suggest that the workaround is insufficient.
11869          */
11870         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11871         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11872         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11873
11874         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11875          * has the register indirect write enable bit set before
11876          * we try to access any of the MMIO registers.  It is also
11877          * critical that the PCI-X hw workaround situation is decided
11878          * before that as well.
11879          */
11880         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11881                               &misc_ctrl_reg);
11882
11883         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11884                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11885         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11886                 u32 prod_id_asic_rev;
11887
11888                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11889                                       &prod_id_asic_rev);
11890                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11891         }
11892
11893         /* Wrong chip ID in 5752 A0. This code can be removed later
11894          * as A0 is not in production.
11895          */
11896         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11897                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11898
11899         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11900          * we need to disable memory and use config. cycles
11901          * only to access all registers. The 5702/03 chips
11902          * can mistakenly decode the special cycles from the
11903          * ICH chipsets as memory write cycles, causing corruption
11904          * of register and memory space. Only certain ICH bridges
11905          * will drive special cycles with non-zero data during the
11906          * address phase which can fall within the 5703's address
11907          * range. This is not an ICH bug as the PCI spec allows
11908          * non-zero address during special cycles. However, only
11909          * these ICH bridges are known to drive non-zero addresses
11910          * during special cycles.
11911          *
11912          * Since special cycles do not cross PCI bridges, we only
11913          * enable this workaround if the 5703 is on the secondary
11914          * bus of these ICH bridges.
11915          */
11916         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11917             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11918                 static struct tg3_dev_id {
11919                         u32     vendor;
11920                         u32     device;
11921                         u32     rev;
11922                 } ich_chipsets[] = {
11923                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11924                           PCI_ANY_ID },
11925                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11926                           PCI_ANY_ID },
11927                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11928                           0xa },
11929                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11930                           PCI_ANY_ID },
11931                         { },
11932                 };
11933                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11934                 struct pci_dev *bridge = NULL;
11935
11936                 while (pci_id->vendor != 0) {
11937                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11938                                                 bridge);
11939                         if (!bridge) {
11940                                 pci_id++;
11941                                 continue;
11942                         }
11943                         if (pci_id->rev != PCI_ANY_ID) {
11944                                 if (bridge->revision > pci_id->rev)
11945                                         continue;
11946                         }
11947                         if (bridge->subordinate &&
11948                             (bridge->subordinate->number ==
11949                              tp->pdev->bus->number)) {
11950
11951                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11952                                 pci_dev_put(bridge);
11953                                 break;
11954                         }
11955                 }
11956         }
11957
11958         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11959                 static struct tg3_dev_id {
11960                         u32     vendor;
11961                         u32     device;
11962                 } bridge_chipsets[] = {
11963                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11964                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11965                         { },
11966                 };
11967                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11968                 struct pci_dev *bridge = NULL;
11969
11970                 while (pci_id->vendor != 0) {
11971                         bridge = pci_get_device(pci_id->vendor,
11972                                                 pci_id->device,
11973                                                 bridge);
11974                         if (!bridge) {
11975                                 pci_id++;
11976                                 continue;
11977                         }
11978                         if (bridge->subordinate &&
11979                             (bridge->subordinate->number <=
11980                              tp->pdev->bus->number) &&
11981                             (bridge->subordinate->subordinate >=
11982                              tp->pdev->bus->number)) {
11983                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11984                                 pci_dev_put(bridge);
11985                                 break;
11986                         }
11987                 }
11988         }
11989
11990         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11991          * DMA addresses > 40-bit. This bridge may have other additional
11992          * 57xx devices behind it in some 4-port NIC designs for example.
11993          * Any tg3 device found behind the bridge will also need the 40-bit
11994          * DMA workaround.
11995          */
11996         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11997             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11998                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11999                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12000                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12001         }
12002         else {
12003                 struct pci_dev *bridge = NULL;
12004
12005                 do {
12006                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12007                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
12008                                                 bridge);
12009                         if (bridge && bridge->subordinate &&
12010                             (bridge->subordinate->number <=
12011                              tp->pdev->bus->number) &&
12012                             (bridge->subordinate->subordinate >=
12013                              tp->pdev->bus->number)) {
12014                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12015                                 pci_dev_put(bridge);
12016                                 break;
12017                         }
12018                 } while (bridge);
12019         }
12020
12021         /* Initialize misc host control in PCI block. */
12022         tp->misc_host_ctrl |= (misc_ctrl_reg &
12023                                MISC_HOST_CTRL_CHIPREV);
12024         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12025                                tp->misc_host_ctrl);
12026
12027         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12028                               &cacheline_sz_reg);
12029
12030         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
12031         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
12032         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
12033         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
12034
12035         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12036             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12037                 tp->pdev_peer = tg3_find_peer(tp);
12038
12039         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12040             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12041             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12042             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12043             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12044             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12045             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12046             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12047             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12048                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12049
12050         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12051             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12052                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12053
12054         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12055                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12056                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12057                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12058                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12059                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12060                      tp->pdev_peer == tp->pdev))
12061                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12062
12063                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12064                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12065                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12066                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12067                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12068                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12069                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12070                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12071                 } else {
12072                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12073                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12074                                 ASIC_REV_5750 &&
12075                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12076                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12077                 }
12078         }
12079
12080         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12081              (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12082                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12083
12084         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12085         if (pcie_cap != 0) {
12086                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12087
12088                 pcie_set_readrq(tp->pdev, 4096);
12089
12090                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12091                         u16 lnkctl;
12092
12093                         pci_read_config_word(tp->pdev,
12094                                              pcie_cap + PCI_EXP_LNKCTL,
12095                                              &lnkctl);
12096                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12097                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12098                 }
12099         }
12100
12101         /* If we have an AMD 762 or VIA K8T800 chipset, write
12102          * reordering to the mailbox registers done by the host
12103          * controller can cause major troubles.  We read back from
12104          * every mailbox register write to force the writes to be
12105          * posted to the chip in order.
12106          */
12107         if (pci_dev_present(write_reorder_chipsets) &&
12108             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12109                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12110
12111         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12112             tp->pci_lat_timer < 64) {
12113                 tp->pci_lat_timer = 64;
12114
12115                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
12116                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
12117                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
12118                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
12119
12120                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12121                                        cacheline_sz_reg);
12122         }
12123
12124         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12125             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12126                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12127                 if (!tp->pcix_cap) {
12128                         printk(KERN_ERR PFX "Cannot find PCI-X "
12129                                             "capability, aborting.\n");
12130                         return -EIO;
12131                 }
12132         }
12133
12134         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12135                               &pci_state_reg);
12136
12137         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
12138                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12139
12140                 /* If this is a 5700 BX chipset, and we are in PCI-X
12141                  * mode, enable register write workaround.
12142                  *
12143                  * The workaround is to use indirect register accesses
12144                  * for all chip writes not to mailbox registers.
12145                  */
12146                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12147                         u32 pm_reg;
12148
12149                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12150
12151                         /* The chip can have it's power management PCI config
12152                          * space registers clobbered due to this bug.
12153                          * So explicitly force the chip into D0 here.
12154                          */
12155                         pci_read_config_dword(tp->pdev,
12156                                               tp->pm_cap + PCI_PM_CTRL,
12157                                               &pm_reg);
12158                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12159                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12160                         pci_write_config_dword(tp->pdev,
12161                                                tp->pm_cap + PCI_PM_CTRL,
12162                                                pm_reg);
12163
12164                         /* Also, force SERR#/PERR# in PCI command. */
12165                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12166                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12167                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12168                 }
12169         }
12170
12171         /* 5700 BX chips need to have their TX producer index mailboxes
12172          * written twice to workaround a bug.
12173          */
12174         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12175                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12176
12177         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12178                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12179         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12180                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12181
12182         /* Chip-specific fixup from Broadcom driver */
12183         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12184             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12185                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12186                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12187         }
12188
12189         /* Default fast path register access methods */
12190         tp->read32 = tg3_read32;
12191         tp->write32 = tg3_write32;
12192         tp->read32_mbox = tg3_read32;
12193         tp->write32_mbox = tg3_write32;
12194         tp->write32_tx_mbox = tg3_write32;
12195         tp->write32_rx_mbox = tg3_write32;
12196
12197         /* Various workaround register access methods */
12198         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12199                 tp->write32 = tg3_write_indirect_reg32;
12200         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12201                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12202                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12203                 /*
12204                  * Back to back register writes can cause problems on these
12205                  * chips, the workaround is to read back all reg writes
12206                  * except those to mailbox regs.
12207                  *
12208                  * See tg3_write_indirect_reg32().
12209                  */
12210                 tp->write32 = tg3_write_flush_reg32;
12211         }
12212
12213
12214         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12215             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12216                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12217                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12218                         tp->write32_rx_mbox = tg3_write_flush_reg32;
12219         }
12220
12221         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12222                 tp->read32 = tg3_read_indirect_reg32;
12223                 tp->write32 = tg3_write_indirect_reg32;
12224                 tp->read32_mbox = tg3_read_indirect_mbox;
12225                 tp->write32_mbox = tg3_write_indirect_mbox;
12226                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12227                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12228
12229                 iounmap(tp->regs);
12230                 tp->regs = NULL;
12231
12232                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12233                 pci_cmd &= ~PCI_COMMAND_MEMORY;
12234                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12235         }
12236         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12237                 tp->read32_mbox = tg3_read32_mbox_5906;
12238                 tp->write32_mbox = tg3_write32_mbox_5906;
12239                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12240                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12241         }
12242
12243         if (tp->write32 == tg3_write_indirect_reg32 ||
12244             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12245              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12246               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12247                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12248
12249         /* Get eeprom hw config before calling tg3_set_power_state().
12250          * In particular, the TG3_FLG2_IS_NIC flag must be
12251          * determined before calling tg3_set_power_state() so that
12252          * we know whether or not to switch out of Vaux power.
12253          * When the flag is set, it means that GPIO1 is used for eeprom
12254          * write protect and also implies that it is a LOM where GPIOs
12255          * are not used to switch power.
12256          */
12257         tg3_get_eeprom_hw_cfg(tp);
12258
12259         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12260                 /* Allow reads and writes to the
12261                  * APE register and memory space.
12262                  */
12263                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12264                                  PCISTATE_ALLOW_APE_SHMEM_WR;
12265                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12266                                        pci_state_reg);
12267         }
12268
12269         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12270             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12271             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12272                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12273
12274                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
12275                     tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
12276                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
12277                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
12278                         tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
12279         }
12280
12281         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12282          * GPIO1 driven high will bring 5700's external PHY out of reset.
12283          * It is also used as eeprom write protect on LOMs.
12284          */
12285         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12286         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12287             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12288                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12289                                        GRC_LCLCTRL_GPIO_OUTPUT1);
12290         /* Unused GPIO3 must be driven as output on 5752 because there
12291          * are no pull-up resistors on unused GPIO pins.
12292          */
12293         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12294                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12295
12296         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12297                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12298
12299         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12300                 /* Turn off the debug UART. */
12301                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12302                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12303                         /* Keep VMain power. */
12304                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12305                                               GRC_LCLCTRL_GPIO_OUTPUT0;
12306         }
12307
12308         /* Force the chip into D0. */
12309         err = tg3_set_power_state(tp, PCI_D0);
12310         if (err) {
12311                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12312                        pci_name(tp->pdev));
12313                 return err;
12314         }
12315
12316         /* 5700 B0 chips do not support checksumming correctly due
12317          * to hardware bugs.
12318          */
12319         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12320                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12321
12322         /* Derive initial jumbo mode from MTU assigned in
12323          * ether_setup() via the alloc_etherdev() call
12324          */
12325         if (tp->dev->mtu > ETH_DATA_LEN &&
12326             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12327                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12328
12329         /* Determine WakeOnLan speed to use. */
12330         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12331             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12332             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12333             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12334                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12335         } else {
12336                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12337         }
12338
12339         /* A few boards don't want Ethernet@WireSpeed phy feature */
12340         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12341             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12342              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12343              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12344             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12345             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12346                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12347
12348         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12349             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12350                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12351         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12352                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12353
12354         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12355                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12356                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12357                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12358                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12359                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12360                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12361                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12362                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12363                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12364                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12365                            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
12366                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12367         }
12368
12369         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12370             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12371                 tp->phy_otp = tg3_read_otp_phycfg(tp);
12372                 if (tp->phy_otp == 0)
12373                         tp->phy_otp = TG3_OTP_DEFAULT;
12374         }
12375
12376         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12377                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12378         else
12379                 tp->mi_mode = MAC_MI_MODE_BASE;
12380
12381         tp->coalesce_mode = 0;
12382         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12383             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12384                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12385
12386         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12387                 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12388
12389         err = tg3_mdio_init(tp);
12390         if (err)
12391                 return err;
12392
12393         /* Initialize data/descriptor byte/word swapping. */
12394         val = tr32(GRC_MODE);
12395         val &= GRC_MODE_HOST_STACKUP;
12396         tw32(GRC_MODE, val | tp->grc_mode);
12397
12398         tg3_switch_clocks(tp);
12399
12400         /* Clear this out for sanity. */
12401         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12402
12403         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12404                               &pci_state_reg);
12405         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12406             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12407                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12408
12409                 if (chiprevid == CHIPREV_ID_5701_A0 ||
12410                     chiprevid == CHIPREV_ID_5701_B0 ||
12411                     chiprevid == CHIPREV_ID_5701_B2 ||
12412                     chiprevid == CHIPREV_ID_5701_B5) {
12413                         void __iomem *sram_base;
12414
12415                         /* Write some dummy words into the SRAM status block
12416                          * area, see if it reads back correctly.  If the return
12417                          * value is bad, force enable the PCIX workaround.
12418                          */
12419                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12420
12421                         writel(0x00000000, sram_base);
12422                         writel(0x00000000, sram_base + 4);
12423                         writel(0xffffffff, sram_base + 4);
12424                         if (readl(sram_base) != 0x00000000)
12425                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12426                 }
12427         }
12428
12429         udelay(50);
12430         tg3_nvram_init(tp);
12431
12432         grc_misc_cfg = tr32(GRC_MISC_CFG);
12433         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12434
12435         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12436             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12437              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12438                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12439
12440         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12441             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12442                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12443         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12444                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12445                                       HOSTCC_MODE_CLRTICK_TXBD);
12446
12447                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12448                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12449                                        tp->misc_host_ctrl);
12450         }
12451
12452         /* Preserve the APE MAC_MODE bits */
12453         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12454                 tp->mac_mode = tr32(MAC_MODE) |
12455                                MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12456         else
12457                 tp->mac_mode = TG3_DEF_MAC_MODE;
12458
12459         /* these are limited to 10/100 only */
12460         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12461              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12462             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12463              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12464              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12465               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12466               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12467             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12468              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12469               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12470               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12471             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12472                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12473
12474         err = tg3_phy_probe(tp);
12475         if (err) {
12476                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12477                        pci_name(tp->pdev), err);
12478                 /* ... but do not return immediately ... */
12479                 tg3_mdio_fini(tp);
12480         }
12481
12482         tg3_read_partno(tp);
12483         tg3_read_fw_ver(tp);
12484
12485         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12486                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12487         } else {
12488                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12489                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12490                 else
12491                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12492         }
12493
12494         /* 5700 {AX,BX} chips have a broken status block link
12495          * change bit implementation, so we must use the
12496          * status register in those cases.
12497          */
12498         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12499                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12500         else
12501                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12502
12503         /* The led_ctrl is set during tg3_phy_probe, here we might
12504          * have to force the link status polling mechanism based
12505          * upon subsystem IDs.
12506          */
12507         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12508             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12509             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12510                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12511                                   TG3_FLAG_USE_LINKCHG_REG);
12512         }
12513
12514         /* For all SERDES we poll the MAC status register. */
12515         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12516                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12517         else
12518                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12519
12520         /* All chips before 5787 can get confused if TX buffers
12521          * straddle the 4GB address boundary in some cases.
12522          */
12523         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12524             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12525             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12526             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12527             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12528             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12529                 tp->dev->hard_start_xmit = tg3_start_xmit;
12530         else
12531                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
12532
12533         tp->rx_offset = 2;
12534         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12535             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12536                 tp->rx_offset = 0;
12537
12538         tp->rx_std_max_post = TG3_RX_RING_SIZE;
12539
12540         /* Increment the rx prod index on the rx std ring by at most
12541          * 8 for these chips to workaround hw errata.
12542          */
12543         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12544             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12545             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12546                 tp->rx_std_max_post = 8;
12547
12548         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12549                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12550                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
12551
12552         return err;
12553 }
12554
12555 #ifdef CONFIG_SPARC
12556 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12557 {
12558         struct net_device *dev = tp->dev;
12559         struct pci_dev *pdev = tp->pdev;
12560         struct device_node *dp = pci_device_to_OF_node(pdev);
12561         const unsigned char *addr;
12562         int len;
12563
12564         addr = of_get_property(dp, "local-mac-address", &len);
12565         if (addr && len == 6) {
12566                 memcpy(dev->dev_addr, addr, 6);
12567                 memcpy(dev->perm_addr, dev->dev_addr, 6);
12568                 return 0;
12569         }
12570         return -ENODEV;
12571 }
12572
12573 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12574 {
12575         struct net_device *dev = tp->dev;
12576
12577         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12578         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12579         return 0;
12580 }
12581 #endif
12582
12583 static int __devinit tg3_get_device_address(struct tg3 *tp)
12584 {
12585         struct net_device *dev = tp->dev;
12586         u32 hi, lo, mac_offset;
12587         int addr_ok = 0;
12588
12589 #ifdef CONFIG_SPARC
12590         if (!tg3_get_macaddr_sparc(tp))
12591                 return 0;
12592 #endif
12593
12594         mac_offset = 0x7c;
12595         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12596             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12597                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12598                         mac_offset = 0xcc;
12599                 if (tg3_nvram_lock(tp))
12600                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12601                 else
12602                         tg3_nvram_unlock(tp);
12603         }
12604         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12605                 mac_offset = 0x10;
12606
12607         /* First try to get it from MAC address mailbox. */
12608         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12609         if ((hi >> 16) == 0x484b) {
12610                 dev->dev_addr[0] = (hi >>  8) & 0xff;
12611                 dev->dev_addr[1] = (hi >>  0) & 0xff;
12612
12613                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12614                 dev->dev_addr[2] = (lo >> 24) & 0xff;
12615                 dev->dev_addr[3] = (lo >> 16) & 0xff;
12616                 dev->dev_addr[4] = (lo >>  8) & 0xff;
12617                 dev->dev_addr[5] = (lo >>  0) & 0xff;
12618
12619                 /* Some old bootcode may report a 0 MAC address in SRAM */
12620                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12621         }
12622         if (!addr_ok) {
12623                 /* Next, try NVRAM. */
12624                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12625                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12626                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
12627                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
12628                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
12629                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
12630                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
12631                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
12632                 }
12633                 /* Finally just fetch it out of the MAC control regs. */
12634                 else {
12635                         hi = tr32(MAC_ADDR_0_HIGH);
12636                         lo = tr32(MAC_ADDR_0_LOW);
12637
12638                         dev->dev_addr[5] = lo & 0xff;
12639                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12640                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12641                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12642                         dev->dev_addr[1] = hi & 0xff;
12643                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12644                 }
12645         }
12646
12647         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12648 #ifdef CONFIG_SPARC
12649                 if (!tg3_get_default_macaddr_sparc(tp))
12650                         return 0;
12651 #endif
12652                 return -EINVAL;
12653         }
12654         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12655         return 0;
12656 }
12657
12658 #define BOUNDARY_SINGLE_CACHELINE       1
12659 #define BOUNDARY_MULTI_CACHELINE        2
12660
12661 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12662 {
12663         int cacheline_size;
12664         u8 byte;
12665         int goal;
12666
12667         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12668         if (byte == 0)
12669                 cacheline_size = 1024;
12670         else
12671                 cacheline_size = (int) byte * 4;
12672
12673         /* On 5703 and later chips, the boundary bits have no
12674          * effect.
12675          */
12676         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12677             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12678             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12679                 goto out;
12680
12681 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12682         goal = BOUNDARY_MULTI_CACHELINE;
12683 #else
12684 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12685         goal = BOUNDARY_SINGLE_CACHELINE;
12686 #else
12687         goal = 0;
12688 #endif
12689 #endif
12690
12691         if (!goal)
12692                 goto out;
12693
12694         /* PCI controllers on most RISC systems tend to disconnect
12695          * when a device tries to burst across a cache-line boundary.
12696          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12697          *
12698          * Unfortunately, for PCI-E there are only limited
12699          * write-side controls for this, and thus for reads
12700          * we will still get the disconnects.  We'll also waste
12701          * these PCI cycles for both read and write for chips
12702          * other than 5700 and 5701 which do not implement the
12703          * boundary bits.
12704          */
12705         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12706             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12707                 switch (cacheline_size) {
12708                 case 16:
12709                 case 32:
12710                 case 64:
12711                 case 128:
12712                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12713                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12714                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12715                         } else {
12716                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12717                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12718                         }
12719                         break;
12720
12721                 case 256:
12722                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12723                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12724                         break;
12725
12726                 default:
12727                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12728                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12729                         break;
12730                 }
12731         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12732                 switch (cacheline_size) {
12733                 case 16:
12734                 case 32:
12735                 case 64:
12736                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12737                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12738                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12739                                 break;
12740                         }
12741                         /* fallthrough */
12742                 case 128:
12743                 default:
12744                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12745                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12746                         break;
12747                 }
12748         } else {
12749                 switch (cacheline_size) {
12750                 case 16:
12751                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12752                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12753                                         DMA_RWCTRL_WRITE_BNDRY_16);
12754                                 break;
12755                         }
12756                         /* fallthrough */
12757                 case 32:
12758                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12759                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12760                                         DMA_RWCTRL_WRITE_BNDRY_32);
12761                                 break;
12762                         }
12763                         /* fallthrough */
12764                 case 64:
12765                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12766                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12767                                         DMA_RWCTRL_WRITE_BNDRY_64);
12768                                 break;
12769                         }
12770                         /* fallthrough */
12771                 case 128:
12772                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12773                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12774                                         DMA_RWCTRL_WRITE_BNDRY_128);
12775                                 break;
12776                         }
12777                         /* fallthrough */
12778                 case 256:
12779                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
12780                                 DMA_RWCTRL_WRITE_BNDRY_256);
12781                         break;
12782                 case 512:
12783                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
12784                                 DMA_RWCTRL_WRITE_BNDRY_512);
12785                         break;
12786                 case 1024:
12787                 default:
12788                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12789                                 DMA_RWCTRL_WRITE_BNDRY_1024);
12790                         break;
12791                 }
12792         }
12793
12794 out:
12795         return val;
12796 }
12797
12798 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12799 {
12800         struct tg3_internal_buffer_desc test_desc;
12801         u32 sram_dma_descs;
12802         int i, ret;
12803
12804         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12805
12806         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12807         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12808         tw32(RDMAC_STATUS, 0);
12809         tw32(WDMAC_STATUS, 0);
12810
12811         tw32(BUFMGR_MODE, 0);
12812         tw32(FTQ_RESET, 0);
12813
12814         test_desc.addr_hi = ((u64) buf_dma) >> 32;
12815         test_desc.addr_lo = buf_dma & 0xffffffff;
12816         test_desc.nic_mbuf = 0x00002100;
12817         test_desc.len = size;
12818
12819         /*
12820          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12821          * the *second* time the tg3 driver was getting loaded after an
12822          * initial scan.
12823          *
12824          * Broadcom tells me:
12825          *   ...the DMA engine is connected to the GRC block and a DMA
12826          *   reset may affect the GRC block in some unpredictable way...
12827          *   The behavior of resets to individual blocks has not been tested.
12828          *
12829          * Broadcom noted the GRC reset will also reset all sub-components.
12830          */
12831         if (to_device) {
12832                 test_desc.cqid_sqid = (13 << 8) | 2;
12833
12834                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12835                 udelay(40);
12836         } else {
12837                 test_desc.cqid_sqid = (16 << 8) | 7;
12838
12839                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12840                 udelay(40);
12841         }
12842         test_desc.flags = 0x00000005;
12843
12844         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12845                 u32 val;
12846
12847                 val = *(((u32 *)&test_desc) + i);
12848                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12849                                        sram_dma_descs + (i * sizeof(u32)));
12850                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12851         }
12852         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12853
12854         if (to_device) {
12855                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12856         } else {
12857                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12858         }
12859
12860         ret = -ENODEV;
12861         for (i = 0; i < 40; i++) {
12862                 u32 val;
12863
12864                 if (to_device)
12865                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12866                 else
12867                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12868                 if ((val & 0xffff) == sram_dma_descs) {
12869                         ret = 0;
12870                         break;
12871                 }
12872
12873                 udelay(100);
12874         }
12875
12876         return ret;
12877 }
12878
12879 #define TEST_BUFFER_SIZE        0x2000
12880
12881 static int __devinit tg3_test_dma(struct tg3 *tp)
12882 {
12883         dma_addr_t buf_dma;
12884         u32 *buf, saved_dma_rwctrl;
12885         int ret;
12886
12887         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12888         if (!buf) {
12889                 ret = -ENOMEM;
12890                 goto out_nofree;
12891         }
12892
12893         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12894                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12895
12896         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12897
12898         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12899                 /* DMA read watermark not used on PCIE */
12900                 tp->dma_rwctrl |= 0x00180000;
12901         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12902                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12903                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12904                         tp->dma_rwctrl |= 0x003f0000;
12905                 else
12906                         tp->dma_rwctrl |= 0x003f000f;
12907         } else {
12908                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12909                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12910                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12911                         u32 read_water = 0x7;
12912
12913                         /* If the 5704 is behind the EPB bridge, we can
12914                          * do the less restrictive ONE_DMA workaround for
12915                          * better performance.
12916                          */
12917                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12918                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12919                                 tp->dma_rwctrl |= 0x8000;
12920                         else if (ccval == 0x6 || ccval == 0x7)
12921                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12922
12923                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12924                                 read_water = 4;
12925                         /* Set bit 23 to enable PCIX hw bug fix */
12926                         tp->dma_rwctrl |=
12927                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12928                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12929                                 (1 << 23);
12930                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12931                         /* 5780 always in PCIX mode */
12932                         tp->dma_rwctrl |= 0x00144000;
12933                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12934                         /* 5714 always in PCIX mode */
12935                         tp->dma_rwctrl |= 0x00148000;
12936                 } else {
12937                         tp->dma_rwctrl |= 0x001b000f;
12938                 }
12939         }
12940
12941         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12942             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12943                 tp->dma_rwctrl &= 0xfffffff0;
12944
12945         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12946             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12947                 /* Remove this if it causes problems for some boards. */
12948                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12949
12950                 /* On 5700/5701 chips, we need to set this bit.
12951                  * Otherwise the chip will issue cacheline transactions
12952                  * to streamable DMA memory with not all the byte
12953                  * enables turned on.  This is an error on several
12954                  * RISC PCI controllers, in particular sparc64.
12955                  *
12956                  * On 5703/5704 chips, this bit has been reassigned
12957                  * a different meaning.  In particular, it is used
12958                  * on those chips to enable a PCI-X workaround.
12959                  */
12960                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12961         }
12962
12963         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12964
12965 #if 0
12966         /* Unneeded, already done by tg3_get_invariants.  */
12967         tg3_switch_clocks(tp);
12968 #endif
12969
12970         ret = 0;
12971         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12972             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12973                 goto out;
12974
12975         /* It is best to perform DMA test with maximum write burst size
12976          * to expose the 5700/5701 write DMA bug.
12977          */
12978         saved_dma_rwctrl = tp->dma_rwctrl;
12979         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12980         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12981
12982         while (1) {
12983                 u32 *p = buf, i;
12984
12985                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12986                         p[i] = i;
12987
12988                 /* Send the buffer to the chip. */
12989                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12990                 if (ret) {
12991                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12992                         break;
12993                 }
12994
12995 #if 0
12996                 /* validate data reached card RAM correctly. */
12997                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12998                         u32 val;
12999                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
13000                         if (le32_to_cpu(val) != p[i]) {
13001                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
13002                                 /* ret = -ENODEV here? */
13003                         }
13004                         p[i] = 0;
13005                 }
13006 #endif
13007                 /* Now read it back. */
13008                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13009                 if (ret) {
13010                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13011
13012                         break;
13013                 }
13014
13015                 /* Verify it. */
13016                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13017                         if (p[i] == i)
13018                                 continue;
13019
13020                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13021                             DMA_RWCTRL_WRITE_BNDRY_16) {
13022                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13023                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13024                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13025                                 break;
13026                         } else {
13027                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13028                                 ret = -ENODEV;
13029                                 goto out;
13030                         }
13031                 }
13032
13033                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13034                         /* Success. */
13035                         ret = 0;
13036                         break;
13037                 }
13038         }
13039         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13040             DMA_RWCTRL_WRITE_BNDRY_16) {
13041                 static struct pci_device_id dma_wait_state_chipsets[] = {
13042                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13043                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13044                         { },
13045                 };
13046
13047                 /* DMA test passed without adjusting DMA boundary,
13048                  * now look for chipsets that are known to expose the
13049                  * DMA bug without failing the test.
13050                  */
13051                 if (pci_dev_present(dma_wait_state_chipsets)) {
13052                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13053                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13054                 }
13055                 else
13056                         /* Safe to use the calculated DMA boundary. */
13057                         tp->dma_rwctrl = saved_dma_rwctrl;
13058
13059                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13060         }
13061
13062 out:
13063         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13064 out_nofree:
13065         return ret;
13066 }
13067
13068 static void __devinit tg3_init_link_config(struct tg3 *tp)
13069 {
13070         tp->link_config.advertising =
13071                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13072                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13073                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13074                  ADVERTISED_Autoneg | ADVERTISED_MII);
13075         tp->link_config.speed = SPEED_INVALID;
13076         tp->link_config.duplex = DUPLEX_INVALID;
13077         tp->link_config.autoneg = AUTONEG_ENABLE;
13078         tp->link_config.active_speed = SPEED_INVALID;
13079         tp->link_config.active_duplex = DUPLEX_INVALID;
13080         tp->link_config.phy_is_low_power = 0;
13081         tp->link_config.orig_speed = SPEED_INVALID;
13082         tp->link_config.orig_duplex = DUPLEX_INVALID;
13083         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13084 }
13085
13086 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13087 {
13088         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13089                 tp->bufmgr_config.mbuf_read_dma_low_water =
13090                         DEFAULT_MB_RDMA_LOW_WATER_5705;
13091                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13092                         DEFAULT_MB_MACRX_LOW_WATER_5705;
13093                 tp->bufmgr_config.mbuf_high_water =
13094                         DEFAULT_MB_HIGH_WATER_5705;
13095                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13096                         tp->bufmgr_config.mbuf_mac_rx_low_water =
13097                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
13098                         tp->bufmgr_config.mbuf_high_water =
13099                                 DEFAULT_MB_HIGH_WATER_5906;
13100                 }
13101
13102                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13103                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13104                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13105                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13106                 tp->bufmgr_config.mbuf_high_water_jumbo =
13107                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13108         } else {
13109                 tp->bufmgr_config.mbuf_read_dma_low_water =
13110                         DEFAULT_MB_RDMA_LOW_WATER;
13111                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13112                         DEFAULT_MB_MACRX_LOW_WATER;
13113                 tp->bufmgr_config.mbuf_high_water =
13114                         DEFAULT_MB_HIGH_WATER;
13115
13116                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13117                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13118                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13119                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13120                 tp->bufmgr_config.mbuf_high_water_jumbo =
13121                         DEFAULT_MB_HIGH_WATER_JUMBO;
13122         }
13123
13124         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13125         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13126 }
13127
13128 static char * __devinit tg3_phy_string(struct tg3 *tp)
13129 {
13130         switch (tp->phy_id & PHY_ID_MASK) {
13131         case PHY_ID_BCM5400:    return "5400";
13132         case PHY_ID_BCM5401:    return "5401";
13133         case PHY_ID_BCM5411:    return "5411";
13134         case PHY_ID_BCM5701:    return "5701";
13135         case PHY_ID_BCM5703:    return "5703";
13136         case PHY_ID_BCM5704:    return "5704";
13137         case PHY_ID_BCM5705:    return "5705";
13138         case PHY_ID_BCM5750:    return "5750";
13139         case PHY_ID_BCM5752:    return "5752";
13140         case PHY_ID_BCM5714:    return "5714";
13141         case PHY_ID_BCM5780:    return "5780";
13142         case PHY_ID_BCM5755:    return "5755";
13143         case PHY_ID_BCM5787:    return "5787";
13144         case PHY_ID_BCM5784:    return "5784";
13145         case PHY_ID_BCM5756:    return "5722/5756";
13146         case PHY_ID_BCM5906:    return "5906";
13147         case PHY_ID_BCM5761:    return "5761";
13148         case PHY_ID_BCM8002:    return "8002/serdes";
13149         case 0:                 return "serdes";
13150         default:                return "unknown";
13151         }
13152 }
13153
13154 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13155 {
13156         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13157                 strcpy(str, "PCI Express");
13158                 return str;
13159         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13160                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13161
13162                 strcpy(str, "PCIX:");
13163
13164                 if ((clock_ctrl == 7) ||
13165                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13166                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13167                         strcat(str, "133MHz");
13168                 else if (clock_ctrl == 0)
13169                         strcat(str, "33MHz");
13170                 else if (clock_ctrl == 2)
13171                         strcat(str, "50MHz");
13172                 else if (clock_ctrl == 4)
13173                         strcat(str, "66MHz");
13174                 else if (clock_ctrl == 6)
13175                         strcat(str, "100MHz");
13176         } else {
13177                 strcpy(str, "PCI:");
13178                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13179                         strcat(str, "66MHz");
13180                 else
13181                         strcat(str, "33MHz");
13182         }
13183         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13184                 strcat(str, ":32-bit");
13185         else
13186                 strcat(str, ":64-bit");
13187         return str;
13188 }
13189
13190 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13191 {
13192         struct pci_dev *peer;
13193         unsigned int func, devnr = tp->pdev->devfn & ~7;
13194
13195         for (func = 0; func < 8; func++) {
13196                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13197                 if (peer && peer != tp->pdev)
13198                         break;
13199                 pci_dev_put(peer);
13200         }
13201         /* 5704 can be configured in single-port mode, set peer to
13202          * tp->pdev in that case.
13203          */
13204         if (!peer) {
13205                 peer = tp->pdev;
13206                 return peer;
13207         }
13208
13209         /*
13210          * We don't need to keep the refcount elevated; there's no way
13211          * to remove one half of this device without removing the other
13212          */
13213         pci_dev_put(peer);
13214
13215         return peer;
13216 }
13217
13218 static void __devinit tg3_init_coal(struct tg3 *tp)
13219 {
13220         struct ethtool_coalesce *ec = &tp->coal;
13221
13222         memset(ec, 0, sizeof(*ec));
13223         ec->cmd = ETHTOOL_GCOALESCE;
13224         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13225         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13226         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13227         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13228         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13229         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13230         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13231         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13232         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13233
13234         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13235                                  HOSTCC_MODE_CLRTICK_TXBD)) {
13236                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13237                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13238                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13239                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13240         }
13241
13242         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13243                 ec->rx_coalesce_usecs_irq = 0;
13244                 ec->tx_coalesce_usecs_irq = 0;
13245                 ec->stats_block_coalesce_usecs = 0;
13246         }
13247 }
13248
13249 static int __devinit tg3_init_one(struct pci_dev *pdev,
13250                                   const struct pci_device_id *ent)
13251 {
13252         static int tg3_version_printed = 0;
13253         resource_size_t tg3reg_len;
13254         struct net_device *dev;
13255         struct tg3 *tp;
13256         int err, pm_cap;
13257         char str[40];
13258         u64 dma_mask, persist_dma_mask;
13259
13260         if (tg3_version_printed++ == 0)
13261                 printk(KERN_INFO "%s", version);
13262
13263         err = pci_enable_device(pdev);
13264         if (err) {
13265                 printk(KERN_ERR PFX "Cannot enable PCI device, "
13266                        "aborting.\n");
13267                 return err;
13268         }
13269
13270         if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM)) {
13271                 printk(KERN_ERR PFX "Cannot find proper PCI device "
13272                        "base address, aborting.\n");
13273                 err = -ENODEV;
13274                 goto err_out_disable_pdev;
13275         }
13276
13277         err = pci_request_regions(pdev, DRV_MODULE_NAME);
13278         if (err) {
13279                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13280                        "aborting.\n");
13281                 goto err_out_disable_pdev;
13282         }
13283
13284         pci_set_master(pdev);
13285
13286         /* Find power-management capability. */
13287         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13288         if (pm_cap == 0) {
13289                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13290                        "aborting.\n");
13291                 err = -EIO;
13292                 goto err_out_free_res;
13293         }
13294
13295         dev = alloc_etherdev(sizeof(*tp));
13296         if (!dev) {
13297                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13298                 err = -ENOMEM;
13299                 goto err_out_free_res;
13300         }
13301
13302         SET_NETDEV_DEV(dev, &pdev->dev);
13303
13304 #if TG3_VLAN_TAG_USED
13305         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13306         dev->vlan_rx_register = tg3_vlan_rx_register;
13307 #endif
13308
13309         tp = netdev_priv(dev);
13310         tp->pdev = pdev;
13311         tp->dev = dev;
13312         tp->pm_cap = pm_cap;
13313         tp->rx_mode = TG3_DEF_RX_MODE;
13314         tp->tx_mode = TG3_DEF_TX_MODE;
13315
13316         if (tg3_debug > 0)
13317                 tp->msg_enable = tg3_debug;
13318         else
13319                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13320
13321         /* The word/byte swap controls here control register access byte
13322          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
13323          * setting below.
13324          */
13325         tp->misc_host_ctrl =
13326                 MISC_HOST_CTRL_MASK_PCI_INT |
13327                 MISC_HOST_CTRL_WORD_SWAP |
13328                 MISC_HOST_CTRL_INDIR_ACCESS |
13329                 MISC_HOST_CTRL_PCISTATE_RW;
13330
13331         /* The NONFRM (non-frame) byte/word swap controls take effect
13332          * on descriptor entries, anything which isn't packet data.
13333          *
13334          * The StrongARM chips on the board (one for tx, one for rx)
13335          * are running in big-endian mode.
13336          */
13337         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13338                         GRC_MODE_WSWAP_NONFRM_DATA);
13339 #ifdef __BIG_ENDIAN
13340         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13341 #endif
13342         spin_lock_init(&tp->lock);
13343         spin_lock_init(&tp->indirect_lock);
13344         INIT_WORK(&tp->reset_task, tg3_reset_task);
13345
13346         dev->mem_start = pci_resource_start(pdev, BAR_0);
13347         tg3reg_len = pci_resource_len(pdev, BAR_0);
13348         dev->mem_end = dev->mem_start + tg3reg_len;
13349
13350         tp->regs = ioremap_nocache(dev->mem_start, tg3reg_len);
13351         if (!tp->regs) {
13352                 printk(KERN_ERR PFX "Cannot map device registers, "
13353                        "aborting.\n");
13354                 err = -ENOMEM;
13355                 goto err_out_free_dev;
13356         }
13357
13358         tg3_init_link_config(tp);
13359
13360         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13361         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13362         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13363
13364         dev->open = tg3_open;
13365         dev->stop = tg3_close;
13366         dev->get_stats = tg3_get_stats;
13367         dev->set_multicast_list = tg3_set_rx_mode;
13368         dev->set_mac_address = tg3_set_mac_addr;
13369         dev->do_ioctl = tg3_ioctl;
13370         dev->tx_timeout = tg3_tx_timeout;
13371         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13372         dev->ethtool_ops = &tg3_ethtool_ops;
13373         dev->watchdog_timeo = TG3_TX_TIMEOUT;
13374         dev->change_mtu = tg3_change_mtu;
13375         dev->irq = pdev->irq;
13376 #ifdef CONFIG_NET_POLL_CONTROLLER
13377         dev->poll_controller = tg3_poll_controller;
13378 #endif
13379
13380         err = tg3_get_invariants(tp);
13381         if (err) {
13382                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13383                        "aborting.\n");
13384                 goto err_out_iounmap;
13385         }
13386
13387         /* The EPB bridge inside 5714, 5715, and 5780 and any
13388          * device behind the EPB cannot support DMA addresses > 40-bit.
13389          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13390          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13391          * do DMA address check in tg3_start_xmit().
13392          */
13393         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13394                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13395         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13396                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13397 #ifdef CONFIG_HIGHMEM
13398                 dma_mask = DMA_64BIT_MASK;
13399 #endif
13400         } else
13401                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13402
13403         /* Configure DMA attributes. */
13404         if (dma_mask > DMA_32BIT_MASK) {
13405                 err = pci_set_dma_mask(pdev, dma_mask);
13406                 if (!err) {
13407                         dev->features |= NETIF_F_HIGHDMA;
13408                         err = pci_set_consistent_dma_mask(pdev,
13409                                                           persist_dma_mask);
13410                         if (err < 0) {
13411                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13412                                        "DMA for consistent allocations\n");
13413                                 goto err_out_iounmap;
13414                         }
13415                 }
13416         }
13417         if (err || dma_mask == DMA_32BIT_MASK) {
13418                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13419                 if (err) {
13420                         printk(KERN_ERR PFX "No usable DMA configuration, "
13421                                "aborting.\n");
13422                         goto err_out_iounmap;
13423                 }
13424         }
13425
13426         tg3_init_bufmgr_config(tp);
13427
13428         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13429                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13430         }
13431         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13432             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13433             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13434             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13435             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13436                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13437         } else {
13438                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13439         }
13440
13441         /* TSO is on by default on chips that support hardware TSO.
13442          * Firmware TSO on older chips gives lower performance, so it
13443          * is off by default, but can be enabled using ethtool.
13444          */
13445         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13446                 dev->features |= NETIF_F_TSO;
13447                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13448                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
13449                         dev->features |= NETIF_F_TSO6;
13450                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13451                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13452                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13453                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13454                         dev->features |= NETIF_F_TSO_ECN;
13455         }
13456
13457
13458         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13459             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13460             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13461                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13462                 tp->rx_pending = 63;
13463         }
13464
13465         err = tg3_get_device_address(tp);
13466         if (err) {
13467                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13468                        "aborting.\n");
13469                 goto err_out_iounmap;
13470         }
13471
13472         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13473                 if (!(pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM)) {
13474                         printk(KERN_ERR PFX "Cannot find proper PCI device "
13475                                "base address for APE, aborting.\n");
13476                         err = -ENODEV;
13477                         goto err_out_iounmap;
13478                 }
13479
13480                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13481                 if (!tp->aperegs) {
13482                         printk(KERN_ERR PFX "Cannot map APE registers, "
13483                                "aborting.\n");
13484                         err = -ENOMEM;
13485                         goto err_out_iounmap;
13486                 }
13487
13488                 tg3_ape_lock_init(tp);
13489         }
13490
13491         /*
13492          * Reset chip in case UNDI or EFI driver did not shutdown
13493          * DMA self test will enable WDMAC and we'll see (spurious)
13494          * pending DMA on the PCI bus at that point.
13495          */
13496         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13497             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13498                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13499                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13500         }
13501
13502         err = tg3_test_dma(tp);
13503         if (err) {
13504                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13505                 goto err_out_apeunmap;
13506         }
13507
13508         /* Tigon3 can do ipv4 only... and some chips have buggy
13509          * checksumming.
13510          */
13511         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13512                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13513                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13514                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13515                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13516                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13517                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13518                         dev->features |= NETIF_F_IPV6_CSUM;
13519
13520                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13521         } else
13522                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13523
13524         /* flow control autonegotiation is default behavior */
13525         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13526         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
13527
13528         tg3_init_coal(tp);
13529
13530         pci_set_drvdata(pdev, dev);
13531
13532         err = register_netdev(dev);
13533         if (err) {
13534                 printk(KERN_ERR PFX "Cannot register net device, "
13535                        "aborting.\n");
13536                 goto err_out_apeunmap;
13537         }
13538
13539         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
13540                "(%s) %s Ethernet %pM\n",
13541                dev->name,
13542                tp->board_part_number,
13543                tp->pci_chip_rev_id,
13544                tg3_phy_string(tp),
13545                tg3_bus_string(tp, str),
13546                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13547                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13548                  "10/100/1000Base-T")),
13549                dev->dev_addr);
13550
13551         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
13552                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
13553                dev->name,
13554                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13555                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13556                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13557                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13558                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13559                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13560         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13561                dev->name, tp->dma_rwctrl,
13562                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13563                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13564
13565         return 0;
13566
13567 err_out_apeunmap:
13568         if (tp->aperegs) {
13569                 iounmap(tp->aperegs);
13570                 tp->aperegs = NULL;
13571         }
13572
13573 err_out_iounmap:
13574         if (tp->regs) {
13575                 iounmap(tp->regs);
13576                 tp->regs = NULL;
13577         }
13578
13579 err_out_free_dev:
13580         free_netdev(dev);
13581
13582 err_out_free_res:
13583         pci_release_regions(pdev);
13584
13585 err_out_disable_pdev:
13586         pci_disable_device(pdev);
13587         pci_set_drvdata(pdev, NULL);
13588         return err;
13589 }
13590
13591 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13592 {
13593         struct net_device *dev = pci_get_drvdata(pdev);
13594
13595         if (dev) {
13596                 struct tg3 *tp = netdev_priv(dev);
13597
13598                 flush_scheduled_work();
13599
13600                 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13601                         tg3_phy_fini(tp);
13602                         tg3_mdio_fini(tp);
13603                 }
13604
13605                 unregister_netdev(dev);
13606                 if (tp->aperegs) {
13607                         iounmap(tp->aperegs);
13608                         tp->aperegs = NULL;
13609                 }
13610                 if (tp->regs) {
13611                         iounmap(tp->regs);
13612                         tp->regs = NULL;
13613                 }
13614                 free_netdev(dev);
13615                 pci_release_regions(pdev);
13616                 pci_disable_device(pdev);
13617                 pci_set_drvdata(pdev, NULL);
13618         }
13619 }
13620
13621 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13622 {
13623         struct net_device *dev = pci_get_drvdata(pdev);
13624         struct tg3 *tp = netdev_priv(dev);
13625         pci_power_t target_state;
13626         int err;
13627
13628         /* PCI register 4 needs to be saved whether netif_running() or not.
13629          * MSI address and data need to be saved if using MSI and
13630          * netif_running().
13631          */
13632         pci_save_state(pdev);
13633
13634         if (!netif_running(dev))
13635                 return 0;
13636
13637         flush_scheduled_work();
13638         tg3_phy_stop(tp);
13639         tg3_netif_stop(tp);
13640
13641         del_timer_sync(&tp->timer);
13642
13643         tg3_full_lock(tp, 1);
13644         tg3_disable_ints(tp);
13645         tg3_full_unlock(tp);
13646
13647         netif_device_detach(dev);
13648
13649         tg3_full_lock(tp, 0);
13650         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13651         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13652         tg3_full_unlock(tp);
13653
13654         target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13655
13656         err = tg3_set_power_state(tp, target_state);
13657         if (err) {
13658                 int err2;
13659
13660                 tg3_full_lock(tp, 0);
13661
13662                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13663                 err2 = tg3_restart_hw(tp, 1);
13664                 if (err2)
13665                         goto out;
13666
13667                 tp->timer.expires = jiffies + tp->timer_offset;
13668                 add_timer(&tp->timer);
13669
13670                 netif_device_attach(dev);
13671                 tg3_netif_start(tp);
13672
13673 out:
13674                 tg3_full_unlock(tp);
13675
13676                 if (!err2)
13677                         tg3_phy_start(tp);
13678         }
13679
13680         return err;
13681 }
13682
13683 static int tg3_resume(struct pci_dev *pdev)
13684 {
13685         struct net_device *dev = pci_get_drvdata(pdev);
13686         struct tg3 *tp = netdev_priv(dev);
13687         int err;
13688
13689         pci_restore_state(tp->pdev);
13690
13691         if (!netif_running(dev))
13692                 return 0;
13693
13694         err = tg3_set_power_state(tp, PCI_D0);
13695         if (err)
13696                 return err;
13697
13698         netif_device_attach(dev);
13699
13700         tg3_full_lock(tp, 0);
13701
13702         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13703         err = tg3_restart_hw(tp, 1);
13704         if (err)
13705                 goto out;
13706
13707         tp->timer.expires = jiffies + tp->timer_offset;
13708         add_timer(&tp->timer);
13709
13710         tg3_netif_start(tp);
13711
13712 out:
13713         tg3_full_unlock(tp);
13714
13715         if (!err)
13716                 tg3_phy_start(tp);
13717
13718         return err;
13719 }
13720
13721 static struct pci_driver tg3_driver = {
13722         .name           = DRV_MODULE_NAME,
13723         .id_table       = tg3_pci_tbl,
13724         .probe          = tg3_init_one,
13725         .remove         = __devexit_p(tg3_remove_one),
13726         .suspend        = tg3_suspend,
13727         .resume         = tg3_resume
13728 };
13729
13730 static int __init tg3_init(void)
13731 {
13732         return pci_register_driver(&tg3_driver);
13733 }
13734
13735 static void __exit tg3_cleanup(void)
13736 {
13737         pci_unregister_driver(&tg3_driver);
13738 }
13739
13740 module_init(tg3_init);
13741 module_exit(tg3_cleanup);