Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cascardo/linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     132
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "May 21, 2013"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     60
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
212 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3            "tigon/tg3.bin"
215 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
218
219 static char version[] =
220         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258                         TG3_DRV_DATA_FLAG_5705_10_100},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287                         PCI_VENDOR_ID_LENOVO,
288                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348         {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354         const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356         { "rx_octets" },
357         { "rx_fragments" },
358         { "rx_ucast_packets" },
359         { "rx_mcast_packets" },
360         { "rx_bcast_packets" },
361         { "rx_fcs_errors" },
362         { "rx_align_errors" },
363         { "rx_xon_pause_rcvd" },
364         { "rx_xoff_pause_rcvd" },
365         { "rx_mac_ctrl_rcvd" },
366         { "rx_xoff_entered" },
367         { "rx_frame_too_long_errors" },
368         { "rx_jabbers" },
369         { "rx_undersize_packets" },
370         { "rx_in_length_errors" },
371         { "rx_out_length_errors" },
372         { "rx_64_or_less_octet_packets" },
373         { "rx_65_to_127_octet_packets" },
374         { "rx_128_to_255_octet_packets" },
375         { "rx_256_to_511_octet_packets" },
376         { "rx_512_to_1023_octet_packets" },
377         { "rx_1024_to_1522_octet_packets" },
378         { "rx_1523_to_2047_octet_packets" },
379         { "rx_2048_to_4095_octet_packets" },
380         { "rx_4096_to_8191_octet_packets" },
381         { "rx_8192_to_9022_octet_packets" },
382
383         { "tx_octets" },
384         { "tx_collisions" },
385
386         { "tx_xon_sent" },
387         { "tx_xoff_sent" },
388         { "tx_flow_control" },
389         { "tx_mac_errors" },
390         { "tx_single_collisions" },
391         { "tx_mult_collisions" },
392         { "tx_deferred" },
393         { "tx_excessive_collisions" },
394         { "tx_late_collisions" },
395         { "tx_collide_2times" },
396         { "tx_collide_3times" },
397         { "tx_collide_4times" },
398         { "tx_collide_5times" },
399         { "tx_collide_6times" },
400         { "tx_collide_7times" },
401         { "tx_collide_8times" },
402         { "tx_collide_9times" },
403         { "tx_collide_10times" },
404         { "tx_collide_11times" },
405         { "tx_collide_12times" },
406         { "tx_collide_13times" },
407         { "tx_collide_14times" },
408         { "tx_collide_15times" },
409         { "tx_ucast_packets" },
410         { "tx_mcast_packets" },
411         { "tx_bcast_packets" },
412         { "tx_carrier_sense_errors" },
413         { "tx_discards" },
414         { "tx_errors" },
415
416         { "dma_writeq_full" },
417         { "dma_write_prioq_full" },
418         { "rxbds_empty" },
419         { "rx_discards" },
420         { "rx_errors" },
421         { "rx_threshold_hit" },
422
423         { "dma_readq_full" },
424         { "dma_read_prioq_full" },
425         { "tx_comp_queue_full" },
426
427         { "ring_set_send_prod_index" },
428         { "ring_status_update" },
429         { "nic_irqs" },
430         { "nic_avoided_irqs" },
431         { "nic_tx_threshold_hit" },
432
433         { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST          0
438 #define TG3_LINK_TEST           1
439 #define TG3_REGISTER_TEST       2
440 #define TG3_MEMORY_TEST         3
441 #define TG3_MAC_LOOPB_TEST      4
442 #define TG3_PHY_LOOPB_TEST      5
443 #define TG3_EXT_LOOPB_TEST      6
444 #define TG3_INTERRUPT_TEST      7
445
446
447 static const struct {
448         const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
451         [TG3_LINK_TEST]         = { "link test         (online) " },
452         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
453         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
454         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
455         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
456         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
457         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
458 };
459
460 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465         writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470         return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475         writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480         return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485         unsigned long flags;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495         writel(val, tp->regs + off);
496         readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501         unsigned long flags;
502         u32 val;
503
504         spin_lock_irqsave(&tp->indirect_lock, flags);
505         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507         spin_unlock_irqrestore(&tp->indirect_lock, flags);
508         return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513         unsigned long flags;
514
515         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517                                        TG3_64BIT_REG_LOW, val);
518                 return;
519         }
520         if (off == TG3_RX_STD_PROD_IDX_REG) {
521                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522                                        TG3_64BIT_REG_LOW, val);
523                 return;
524         }
525
526         spin_lock_irqsave(&tp->indirect_lock, flags);
527         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529         spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531         /* In indirect mode when disabling interrupts, we also need
532          * to clear the interrupt bit in the GRC local ctrl register.
533          */
534         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535             (val == 0x1)) {
536                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538         }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543         unsigned long flags;
544         u32 val;
545
546         spin_lock_irqsave(&tp->indirect_lock, flags);
547         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549         spin_unlock_irqrestore(&tp->indirect_lock, flags);
550         return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554  * where it is unsafe to read back the register without some delay.
555  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557  */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561                 /* Non-posted methods */
562                 tp->write32(tp, off, val);
563         else {
564                 /* Posted method */
565                 tg3_write32(tp, off, val);
566                 if (usec_wait)
567                         udelay(usec_wait);
568                 tp->read32(tp, off);
569         }
570         /* Wait again after the read for the posted method to guarantee that
571          * the wait time is met.
572          */
573         if (usec_wait)
574                 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579         tp->write32_mbox(tp, off, val);
580         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582              !tg3_flag(tp, ICH_WORKAROUND)))
583                 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588         void __iomem *mbox = tp->regs + off;
589         writel(val, mbox);
590         if (tg3_flag(tp, TXD_MBOX_HWBUG))
591                 writel(val, mbox);
592         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593             tg3_flag(tp, FLUSH_POSTED_WRITES))
594                 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599         return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604         writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val)                  tp->write32(tp, reg, val)
614 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg)                       tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620         unsigned long flags;
621
622         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624                 return;
625
626         spin_lock_irqsave(&tp->indirect_lock, flags);
627         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631                 /* Always leave this as zero. */
632                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633         } else {
634                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637                 /* Always leave this as zero. */
638                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639         }
640         spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645         unsigned long flags;
646
647         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649                 *val = 0;
650                 return;
651         }
652
653         spin_lock_irqsave(&tp->indirect_lock, flags);
654         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658                 /* Always leave this as zero. */
659                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660         } else {
661                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662                 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664                 /* Always leave this as zero. */
665                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666         }
667         spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672         int i;
673         u32 regbase, bit;
674
675         if (tg3_asic_rev(tp) == ASIC_REV_5761)
676                 regbase = TG3_APE_LOCK_GRANT;
677         else
678                 regbase = TG3_APE_PER_LOCK_GRANT;
679
680         /* Make sure the driver hasn't any stale locks. */
681         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682                 switch (i) {
683                 case TG3_APE_LOCK_PHY0:
684                 case TG3_APE_LOCK_PHY1:
685                 case TG3_APE_LOCK_PHY2:
686                 case TG3_APE_LOCK_PHY3:
687                         bit = APE_LOCK_GRANT_DRIVER;
688                         break;
689                 default:
690                         if (!tp->pci_fn)
691                                 bit = APE_LOCK_GRANT_DRIVER;
692                         else
693                                 bit = 1 << tp->pci_fn;
694                 }
695                 tg3_ape_write32(tp, regbase + 4 * i, bit);
696         }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702         int i, off;
703         int ret = 0;
704         u32 status, req, gnt, bit;
705
706         if (!tg3_flag(tp, ENABLE_APE))
707                 return 0;
708
709         switch (locknum) {
710         case TG3_APE_LOCK_GPIO:
711                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712                         return 0;
713         case TG3_APE_LOCK_GRC:
714         case TG3_APE_LOCK_MEM:
715                 if (!tp->pci_fn)
716                         bit = APE_LOCK_REQ_DRIVER;
717                 else
718                         bit = 1 << tp->pci_fn;
719                 break;
720         case TG3_APE_LOCK_PHY0:
721         case TG3_APE_LOCK_PHY1:
722         case TG3_APE_LOCK_PHY2:
723         case TG3_APE_LOCK_PHY3:
724                 bit = APE_LOCK_REQ_DRIVER;
725                 break;
726         default:
727                 return -EINVAL;
728         }
729
730         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731                 req = TG3_APE_LOCK_REQ;
732                 gnt = TG3_APE_LOCK_GRANT;
733         } else {
734                 req = TG3_APE_PER_LOCK_REQ;
735                 gnt = TG3_APE_PER_LOCK_GRANT;
736         }
737
738         off = 4 * locknum;
739
740         tg3_ape_write32(tp, req + off, bit);
741
742         /* Wait for up to 1 millisecond to acquire lock. */
743         for (i = 0; i < 100; i++) {
744                 status = tg3_ape_read32(tp, gnt + off);
745                 if (status == bit)
746                         break;
747                 udelay(10);
748         }
749
750         if (status != bit) {
751                 /* Revoke the lock request. */
752                 tg3_ape_write32(tp, gnt + off, bit);
753                 ret = -EBUSY;
754         }
755
756         return ret;
757 }
758
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
760 {
761         u32 gnt, bit;
762
763         if (!tg3_flag(tp, ENABLE_APE))
764                 return;
765
766         switch (locknum) {
767         case TG3_APE_LOCK_GPIO:
768                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
769                         return;
770         case TG3_APE_LOCK_GRC:
771         case TG3_APE_LOCK_MEM:
772                 if (!tp->pci_fn)
773                         bit = APE_LOCK_GRANT_DRIVER;
774                 else
775                         bit = 1 << tp->pci_fn;
776                 break;
777         case TG3_APE_LOCK_PHY0:
778         case TG3_APE_LOCK_PHY1:
779         case TG3_APE_LOCK_PHY2:
780         case TG3_APE_LOCK_PHY3:
781                 bit = APE_LOCK_GRANT_DRIVER;
782                 break;
783         default:
784                 return;
785         }
786
787         if (tg3_asic_rev(tp) == ASIC_REV_5761)
788                 gnt = TG3_APE_LOCK_GRANT;
789         else
790                 gnt = TG3_APE_PER_LOCK_GRANT;
791
792         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
793 }
794
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
796 {
797         u32 apedata;
798
799         while (timeout_us) {
800                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
801                         return -EBUSY;
802
803                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
805                         break;
806
807                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
808
809                 udelay(10);
810                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
811         }
812
813         return timeout_us ? 0 : -EBUSY;
814 }
815
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
817 {
818         u32 i, apedata;
819
820         for (i = 0; i < timeout_us / 10; i++) {
821                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822
823                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
824                         break;
825
826                 udelay(10);
827         }
828
829         return i == timeout_us / 10;
830 }
831
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
833                                    u32 len)
834 {
835         int err;
836         u32 i, bufoff, msgoff, maxlen, apedata;
837
838         if (!tg3_flag(tp, APE_HAS_NCSI))
839                 return 0;
840
841         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842         if (apedata != APE_SEG_SIG_MAGIC)
843                 return -ENODEV;
844
845         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846         if (!(apedata & APE_FW_STATUS_READY))
847                 return -EAGAIN;
848
849         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850                  TG3_APE_SHMEM_BASE;
851         msgoff = bufoff + 2 * sizeof(u32);
852         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
853
854         while (len) {
855                 u32 length;
856
857                 /* Cap xfer sizes to scratchpad limits. */
858                 length = (len > maxlen) ? maxlen : len;
859                 len -= length;
860
861                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862                 if (!(apedata & APE_FW_STATUS_READY))
863                         return -EAGAIN;
864
865                 /* Wait for up to 1 msec for APE to service previous event. */
866                 err = tg3_ape_event_lock(tp, 1000);
867                 if (err)
868                         return err;
869
870                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871                           APE_EVENT_STATUS_SCRTCHPD_READ |
872                           APE_EVENT_STATUS_EVENT_PENDING;
873                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874
875                 tg3_ape_write32(tp, bufoff, base_off);
876                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877
878                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
880
881                 base_off += length;
882
883                 if (tg3_ape_wait_for_event(tp, 30000))
884                         return -EAGAIN;
885
886                 for (i = 0; length; i += 4, length -= 4) {
887                         u32 val = tg3_ape_read32(tp, msgoff + i);
888                         memcpy(data, &val, sizeof(u32));
889                         data++;
890                 }
891         }
892
893         return 0;
894 }
895
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
897 {
898         int err;
899         u32 apedata;
900
901         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902         if (apedata != APE_SEG_SIG_MAGIC)
903                 return -EAGAIN;
904
905         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906         if (!(apedata & APE_FW_STATUS_READY))
907                 return -EAGAIN;
908
909         /* Wait for up to 1 millisecond for APE to service previous event. */
910         err = tg3_ape_event_lock(tp, 1000);
911         if (err)
912                 return err;
913
914         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915                         event | APE_EVENT_STATUS_EVENT_PENDING);
916
917         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
919
920         return 0;
921 }
922
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
924 {
925         u32 event;
926         u32 apedata;
927
928         if (!tg3_flag(tp, ENABLE_APE))
929                 return;
930
931         switch (kind) {
932         case RESET_KIND_INIT:
933                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934                                 APE_HOST_SEG_SIG_MAGIC);
935                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936                                 APE_HOST_SEG_LEN_MAGIC);
937                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942                                 APE_HOST_BEHAV_NO_PHYLOCK);
943                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944                                     TG3_APE_HOST_DRVR_STATE_START);
945
946                 event = APE_EVENT_STATUS_STATE_START;
947                 break;
948         case RESET_KIND_SHUTDOWN:
949                 /* With the interface we are currently using,
950                  * APE does not track driver state.  Wiping
951                  * out the HOST SEGMENT SIGNATURE forces
952                  * the APE to assume OS absent status.
953                  */
954                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
955
956                 if (device_may_wakeup(&tp->pdev->dev) &&
957                     tg3_flag(tp, WOL_ENABLE)) {
958                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959                                             TG3_APE_HOST_WOL_SPEED_AUTO);
960                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
961                 } else
962                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
963
964                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
965
966                 event = APE_EVENT_STATUS_STATE_UNLOAD;
967                 break;
968         case RESET_KIND_SUSPEND:
969                 event = APE_EVENT_STATUS_STATE_SUSPEND;
970                 break;
971         default:
972                 return;
973         }
974
975         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976
977         tg3_ape_send_event(tp, event);
978 }
979
980 static void tg3_disable_ints(struct tg3 *tp)
981 {
982         int i;
983
984         tw32(TG3PCI_MISC_HOST_CTRL,
985              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986         for (i = 0; i < tp->irq_max; i++)
987                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
988 }
989
990 static void tg3_enable_ints(struct tg3 *tp)
991 {
992         int i;
993
994         tp->irq_sync = 0;
995         wmb();
996
997         tw32(TG3PCI_MISC_HOST_CTRL,
998              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999
1000         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001         for (i = 0; i < tp->irq_cnt; i++) {
1002                 struct tg3_napi *tnapi = &tp->napi[i];
1003
1004                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005                 if (tg3_flag(tp, 1SHOT_MSI))
1006                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007
1008                 tp->coal_now |= tnapi->coal_now;
1009         }
1010
1011         /* Force an initial interrupt */
1012         if (!tg3_flag(tp, TAGGED_STATUS) &&
1013             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015         else
1016                 tw32(HOSTCC_MODE, tp->coal_now);
1017
1018         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1019 }
1020
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 {
1023         struct tg3 *tp = tnapi->tp;
1024         struct tg3_hw_status *sblk = tnapi->hw_status;
1025         unsigned int work_exists = 0;
1026
1027         /* check for phy events */
1028         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029                 if (sblk->status & SD_STATUS_LINK_CHG)
1030                         work_exists = 1;
1031         }
1032
1033         /* check for TX work to do */
1034         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035                 work_exists = 1;
1036
1037         /* check for RX work to do */
1038         if (tnapi->rx_rcb_prod_idx &&
1039             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1040                 work_exists = 1;
1041
1042         return work_exists;
1043 }
1044
1045 /* tg3_int_reenable
1046  *  similar to tg3_enable_ints, but it accurately determines whether there
1047  *  is new work pending and can return without flushing the PIO write
1048  *  which reenables interrupts
1049  */
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 {
1052         struct tg3 *tp = tnapi->tp;
1053
1054         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1055         mmiowb();
1056
1057         /* When doing tagged status, this work check is unnecessary.
1058          * The last_tag we write above tells the chip which piece of
1059          * work we've completed.
1060          */
1061         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1064 }
1065
1066 static void tg3_switch_clocks(struct tg3 *tp)
1067 {
1068         u32 clock_ctrl;
1069         u32 orig_clock_ctrl;
1070
1071         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1072                 return;
1073
1074         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075
1076         orig_clock_ctrl = clock_ctrl;
1077         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078                        CLOCK_CTRL_CLKRUN_OENABLE |
1079                        0x1f);
1080         tp->pci_clock_ctrl = clock_ctrl;
1081
1082         if (tg3_flag(tp, 5705_PLUS)) {
1083                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086                 }
1087         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089                             clock_ctrl |
1090                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091                             40);
1092                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094                             40);
1095         }
1096         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1097 }
1098
1099 #define PHY_BUSY_LOOPS  5000
1100
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102                          u32 *val)
1103 {
1104         u32 frame_val;
1105         unsigned int loops;
1106         int ret;
1107
1108         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109                 tw32_f(MAC_MI_MODE,
1110                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111                 udelay(80);
1112         }
1113
1114         tg3_ape_lock(tp, tp->phy_ape_lock);
1115
1116         *val = 0x0;
1117
1118         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119                       MI_COM_PHY_ADDR_MASK);
1120         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121                       MI_COM_REG_ADDR_MASK);
1122         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123
1124         tw32_f(MAC_MI_COM, frame_val);
1125
1126         loops = PHY_BUSY_LOOPS;
1127         while (loops != 0) {
1128                 udelay(10);
1129                 frame_val = tr32(MAC_MI_COM);
1130
1131                 if ((frame_val & MI_COM_BUSY) == 0) {
1132                         udelay(5);
1133                         frame_val = tr32(MAC_MI_COM);
1134                         break;
1135                 }
1136                 loops -= 1;
1137         }
1138
1139         ret = -EBUSY;
1140         if (loops != 0) {
1141                 *val = frame_val & MI_COM_DATA_MASK;
1142                 ret = 0;
1143         }
1144
1145         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1147                 udelay(80);
1148         }
1149
1150         tg3_ape_unlock(tp, tp->phy_ape_lock);
1151
1152         return ret;
1153 }
1154
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 {
1157         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158 }
1159
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161                           u32 val)
1162 {
1163         u32 frame_val;
1164         unsigned int loops;
1165         int ret;
1166
1167         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1169                 return 0;
1170
1171         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172                 tw32_f(MAC_MI_MODE,
1173                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174                 udelay(80);
1175         }
1176
1177         tg3_ape_lock(tp, tp->phy_ape_lock);
1178
1179         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180                       MI_COM_PHY_ADDR_MASK);
1181         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182                       MI_COM_REG_ADDR_MASK);
1183         frame_val |= (val & MI_COM_DATA_MASK);
1184         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185
1186         tw32_f(MAC_MI_COM, frame_val);
1187
1188         loops = PHY_BUSY_LOOPS;
1189         while (loops != 0) {
1190                 udelay(10);
1191                 frame_val = tr32(MAC_MI_COM);
1192                 if ((frame_val & MI_COM_BUSY) == 0) {
1193                         udelay(5);
1194                         frame_val = tr32(MAC_MI_COM);
1195                         break;
1196                 }
1197                 loops -= 1;
1198         }
1199
1200         ret = -EBUSY;
1201         if (loops != 0)
1202                 ret = 0;
1203
1204         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1206                 udelay(80);
1207         }
1208
1209         tg3_ape_unlock(tp, tp->phy_ape_lock);
1210
1211         return ret;
1212 }
1213
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 {
1216         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217 }
1218
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 {
1221         int err;
1222
1223         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224         if (err)
1225                 goto done;
1226
1227         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228         if (err)
1229                 goto done;
1230
1231         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233         if (err)
1234                 goto done;
1235
1236         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237
1238 done:
1239         return err;
1240 }
1241
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 {
1244         int err;
1245
1246         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247         if (err)
1248                 goto done;
1249
1250         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251         if (err)
1252                 goto done;
1253
1254         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256         if (err)
1257                 goto done;
1258
1259         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260
1261 done:
1262         return err;
1263 }
1264
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 {
1267         int err;
1268
1269         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270         if (!err)
1271                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272
1273         return err;
1274 }
1275
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 {
1278         int err;
1279
1280         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281         if (!err)
1282                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283
1284         return err;
1285 }
1286
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 {
1289         int err;
1290
1291         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1294         if (!err)
1295                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296
1297         return err;
1298 }
1299
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 {
1302         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303                 set |= MII_TG3_AUXCTL_MISC_WREN;
1304
1305         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306 }
1307
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309 {
1310         u32 val;
1311         int err;
1312
1313         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314
1315         if (err)
1316                 return err;
1317         if (enable)
1318
1319                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320         else
1321                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322
1323         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325
1326         return err;
1327 }
1328
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1330 {
1331         u32 phy_control;
1332         int limit, err;
1333
1334         /* OK, reset it, and poll the BMCR_RESET bit until it
1335          * clears or we time out.
1336          */
1337         phy_control = BMCR_RESET;
1338         err = tg3_writephy(tp, MII_BMCR, phy_control);
1339         if (err != 0)
1340                 return -EBUSY;
1341
1342         limit = 5000;
1343         while (limit--) {
1344                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345                 if (err != 0)
1346                         return -EBUSY;
1347
1348                 if ((phy_control & BMCR_RESET) == 0) {
1349                         udelay(40);
1350                         break;
1351                 }
1352                 udelay(10);
1353         }
1354         if (limit < 0)
1355                 return -EBUSY;
1356
1357         return 0;
1358 }
1359
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 {
1362         struct tg3 *tp = bp->priv;
1363         u32 val;
1364
1365         spin_lock_bh(&tp->lock);
1366
1367         if (tg3_readphy(tp, reg, &val))
1368                 val = -EIO;
1369
1370         spin_unlock_bh(&tp->lock);
1371
1372         return val;
1373 }
1374
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 {
1377         struct tg3 *tp = bp->priv;
1378         u32 ret = 0;
1379
1380         spin_lock_bh(&tp->lock);
1381
1382         if (tg3_writephy(tp, reg, val))
1383                 ret = -EIO;
1384
1385         spin_unlock_bh(&tp->lock);
1386
1387         return ret;
1388 }
1389
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1391 {
1392         return 0;
1393 }
1394
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397         u32 val;
1398         struct phy_device *phydev;
1399
1400         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402         case PHY_ID_BCM50610:
1403         case PHY_ID_BCM50610M:
1404                 val = MAC_PHYCFG2_50610_LED_MODES;
1405                 break;
1406         case PHY_ID_BCMAC131:
1407                 val = MAC_PHYCFG2_AC131_LED_MODES;
1408                 break;
1409         case PHY_ID_RTL8211C:
1410                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411                 break;
1412         case PHY_ID_RTL8201E:
1413                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414                 break;
1415         default:
1416                 return;
1417         }
1418
1419         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420                 tw32(MAC_PHYCFG2, val);
1421
1422                 val = tr32(MAC_PHYCFG1);
1423                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426                 tw32(MAC_PHYCFG1, val);
1427
1428                 return;
1429         }
1430
1431         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433                        MAC_PHYCFG2_FMODE_MASK_MASK |
1434                        MAC_PHYCFG2_GMODE_MASK_MASK |
1435                        MAC_PHYCFG2_ACT_MASK_MASK   |
1436                        MAC_PHYCFG2_QUAL_MASK_MASK |
1437                        MAC_PHYCFG2_INBAND_ENABLE;
1438
1439         tw32(MAC_PHYCFG2, val);
1440
1441         val = tr32(MAC_PHYCFG1);
1442         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449         }
1450         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452         tw32(MAC_PHYCFG1, val);
1453
1454         val = tr32(MAC_EXT_RGMII_MODE);
1455         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456                  MAC_RGMII_MODE_RX_QUALITY |
1457                  MAC_RGMII_MODE_RX_ACTIVITY |
1458                  MAC_RGMII_MODE_RX_ENG_DET |
1459                  MAC_RGMII_MODE_TX_ENABLE |
1460                  MAC_RGMII_MODE_TX_LOWPWR |
1461                  MAC_RGMII_MODE_TX_RESET);
1462         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464                         val |= MAC_RGMII_MODE_RX_INT_B |
1465                                MAC_RGMII_MODE_RX_QUALITY |
1466                                MAC_RGMII_MODE_RX_ACTIVITY |
1467                                MAC_RGMII_MODE_RX_ENG_DET;
1468                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469                         val |= MAC_RGMII_MODE_TX_ENABLE |
1470                                MAC_RGMII_MODE_TX_LOWPWR |
1471                                MAC_RGMII_MODE_TX_RESET;
1472         }
1473         tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479         tw32_f(MAC_MI_MODE, tp->mi_mode);
1480         udelay(80);
1481
1482         if (tg3_flag(tp, MDIOBUS_INITED) &&
1483             tg3_asic_rev(tp) == ASIC_REV_5785)
1484                 tg3_mdio_config_5785(tp);
1485 }
1486
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489         int i;
1490         u32 reg;
1491         struct phy_device *phydev;
1492
1493         if (tg3_flag(tp, 5717_PLUS)) {
1494                 u32 is_serdes;
1495
1496                 tp->phy_addr = tp->pci_fn + 1;
1497
1498                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500                 else
1501                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1503                 if (is_serdes)
1504                         tp->phy_addr += 7;
1505         } else
1506                 tp->phy_addr = TG3_PHY_MII_ADDR;
1507
1508         tg3_mdio_start(tp);
1509
1510         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1511                 return 0;
1512
1513         tp->mdio_bus = mdiobus_alloc();
1514         if (tp->mdio_bus == NULL)
1515                 return -ENOMEM;
1516
1517         tp->mdio_bus->name     = "tg3 mdio bus";
1518         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520         tp->mdio_bus->priv     = tp;
1521         tp->mdio_bus->parent   = &tp->pdev->dev;
1522         tp->mdio_bus->read     = &tg3_mdio_read;
1523         tp->mdio_bus->write    = &tg3_mdio_write;
1524         tp->mdio_bus->reset    = &tg3_mdio_reset;
1525         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1527
1528         for (i = 0; i < PHY_MAX_ADDR; i++)
1529                 tp->mdio_bus->irq[i] = PHY_POLL;
1530
1531         /* The bus registration will look for all the PHYs on the mdio bus.
1532          * Unfortunately, it does not ensure the PHY is powered up before
1533          * accessing the PHY ID registers.  A chip reset is the
1534          * quickest way to bring the device back to an operational state..
1535          */
1536         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537                 tg3_bmcr_reset(tp);
1538
1539         i = mdiobus_register(tp->mdio_bus);
1540         if (i) {
1541                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542                 mdiobus_free(tp->mdio_bus);
1543                 return i;
1544         }
1545
1546         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547
1548         if (!phydev || !phydev->drv) {
1549                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550                 mdiobus_unregister(tp->mdio_bus);
1551                 mdiobus_free(tp->mdio_bus);
1552                 return -ENODEV;
1553         }
1554
1555         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556         case PHY_ID_BCM57780:
1557                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559                 break;
1560         case PHY_ID_BCM50610:
1561         case PHY_ID_BCM50610M:
1562                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563                                      PHY_BRCM_RX_REFCLK_UNUSED |
1564                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572                 /* fallthru */
1573         case PHY_ID_RTL8211C:
1574                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575                 break;
1576         case PHY_ID_RTL8201E:
1577         case PHY_ID_BCMAC131:
1578                 phydev->interface = PHY_INTERFACE_MODE_MII;
1579                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581                 break;
1582         }
1583
1584         tg3_flag_set(tp, MDIOBUS_INITED);
1585
1586         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587                 tg3_mdio_config_5785(tp);
1588
1589         return 0;
1590 }
1591
1592 static void tg3_mdio_fini(struct tg3 *tp)
1593 {
1594         if (tg3_flag(tp, MDIOBUS_INITED)) {
1595                 tg3_flag_clear(tp, MDIOBUS_INITED);
1596                 mdiobus_unregister(tp->mdio_bus);
1597                 mdiobus_free(tp->mdio_bus);
1598         }
1599 }
1600
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 {
1604         u32 val;
1605
1606         val = tr32(GRC_RX_CPU_EVENT);
1607         val |= GRC_RX_CPU_DRIVER_EVENT;
1608         tw32_f(GRC_RX_CPU_EVENT, val);
1609
1610         tp->last_event_jiffies = jiffies;
1611 }
1612
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1617 {
1618         int i;
1619         unsigned int delay_cnt;
1620         long time_remain;
1621
1622         /* If enough time has passed, no wait is necessary. */
1623         time_remain = (long)(tp->last_event_jiffies + 1 +
1624                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625                       (long)jiffies;
1626         if (time_remain < 0)
1627                 return;
1628
1629         /* Check if we can shorten the wait time. */
1630         delay_cnt = jiffies_to_usecs(time_remain);
1631         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633         delay_cnt = (delay_cnt >> 3) + 1;
1634
1635         for (i = 0; i < delay_cnt; i++) {
1636                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637                         break;
1638                 udelay(8);
1639         }
1640 }
1641
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1644 {
1645         u32 reg, val;
1646
1647         val = 0;
1648         if (!tg3_readphy(tp, MII_BMCR, &reg))
1649                 val = reg << 16;
1650         if (!tg3_readphy(tp, MII_BMSR, &reg))
1651                 val |= (reg & 0xffff);
1652         *data++ = val;
1653
1654         val = 0;
1655         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1656                 val = reg << 16;
1657         if (!tg3_readphy(tp, MII_LPA, &reg))
1658                 val |= (reg & 0xffff);
1659         *data++ = val;
1660
1661         val = 0;
1662         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1664                         val = reg << 16;
1665                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1666                         val |= (reg & 0xffff);
1667         }
1668         *data++ = val;
1669
1670         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1671                 val = reg << 16;
1672         else
1673                 val = 0;
1674         *data++ = val;
1675 }
1676
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1679 {
1680         u32 data[4];
1681
1682         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1683                 return;
1684
1685         tg3_phy_gather_ump_data(tp, data);
1686
1687         tg3_wait_for_event_ack(tp);
1688
1689         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1695
1696         tg3_generate_fw_event(tp);
1697 }
1698
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1701 {
1702         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703                 /* Wait for RX cpu to ACK the previous event. */
1704                 tg3_wait_for_event_ack(tp);
1705
1706                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1707
1708                 tg3_generate_fw_event(tp);
1709
1710                 /* Wait for RX cpu to ACK this event. */
1711                 tg3_wait_for_event_ack(tp);
1712         }
1713 }
1714
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1717 {
1718         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1720
1721         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1722                 switch (kind) {
1723                 case RESET_KIND_INIT:
1724                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1725                                       DRV_STATE_START);
1726                         break;
1727
1728                 case RESET_KIND_SHUTDOWN:
1729                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730                                       DRV_STATE_UNLOAD);
1731                         break;
1732
1733                 case RESET_KIND_SUSPEND:
1734                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1735                                       DRV_STATE_SUSPEND);
1736                         break;
1737
1738                 default:
1739                         break;
1740                 }
1741         }
1742
1743         if (kind == RESET_KIND_INIT ||
1744             kind == RESET_KIND_SUSPEND)
1745                 tg3_ape_driver_state_change(tp, kind);
1746 }
1747
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1750 {
1751         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1752                 switch (kind) {
1753                 case RESET_KIND_INIT:
1754                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755                                       DRV_STATE_START_DONE);
1756                         break;
1757
1758                 case RESET_KIND_SHUTDOWN:
1759                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760                                       DRV_STATE_UNLOAD_DONE);
1761                         break;
1762
1763                 default:
1764                         break;
1765                 }
1766         }
1767
1768         if (kind == RESET_KIND_SHUTDOWN)
1769                 tg3_ape_driver_state_change(tp, kind);
1770 }
1771
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1774 {
1775         if (tg3_flag(tp, ENABLE_ASF)) {
1776                 switch (kind) {
1777                 case RESET_KIND_INIT:
1778                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779                                       DRV_STATE_START);
1780                         break;
1781
1782                 case RESET_KIND_SHUTDOWN:
1783                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1784                                       DRV_STATE_UNLOAD);
1785                         break;
1786
1787                 case RESET_KIND_SUSPEND:
1788                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789                                       DRV_STATE_SUSPEND);
1790                         break;
1791
1792                 default:
1793                         break;
1794                 }
1795         }
1796 }
1797
1798 static int tg3_poll_fw(struct tg3 *tp)
1799 {
1800         int i;
1801         u32 val;
1802
1803         if (tg3_flag(tp, NO_FWARE_REPORTED))
1804                 return 0;
1805
1806         if (tg3_flag(tp, IS_SSB_CORE)) {
1807                 /* We don't use firmware. */
1808                 return 0;
1809         }
1810
1811         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1812                 /* Wait up to 20ms for init done. */
1813                 for (i = 0; i < 200; i++) {
1814                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1815                                 return 0;
1816                         udelay(100);
1817                 }
1818                 return -ENODEV;
1819         }
1820
1821         /* Wait for firmware initialization to complete. */
1822         for (i = 0; i < 100000; i++) {
1823                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1824                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1825                         break;
1826                 udelay(10);
1827         }
1828
1829         /* Chip might not be fitted with firmware.  Some Sun onboard
1830          * parts are configured like that.  So don't signal the timeout
1831          * of the above loop as an error, but do report the lack of
1832          * running firmware once.
1833          */
1834         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1835                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1836
1837                 netdev_info(tp->dev, "No firmware running\n");
1838         }
1839
1840         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1841                 /* The 57765 A0 needs a little more
1842                  * time to do some important work.
1843                  */
1844                 mdelay(10);
1845         }
1846
1847         return 0;
1848 }
1849
1850 static void tg3_link_report(struct tg3 *tp)
1851 {
1852         if (!netif_carrier_ok(tp->dev)) {
1853                 netif_info(tp, link, tp->dev, "Link is down\n");
1854                 tg3_ump_link_report(tp);
1855         } else if (netif_msg_link(tp)) {
1856                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1857                             (tp->link_config.active_speed == SPEED_1000 ?
1858                              1000 :
1859                              (tp->link_config.active_speed == SPEED_100 ?
1860                               100 : 10)),
1861                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1862                              "full" : "half"));
1863
1864                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1865                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1866                             "on" : "off",
1867                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1868                             "on" : "off");
1869
1870                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1871                         netdev_info(tp->dev, "EEE is %s\n",
1872                                     tp->setlpicnt ? "enabled" : "disabled");
1873
1874                 tg3_ump_link_report(tp);
1875         }
1876
1877         tp->link_up = netif_carrier_ok(tp->dev);
1878 }
1879
1880 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1881 {
1882         u32 flowctrl = 0;
1883
1884         if (adv & ADVERTISE_PAUSE_CAP) {
1885                 flowctrl |= FLOW_CTRL_RX;
1886                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1887                         flowctrl |= FLOW_CTRL_TX;
1888         } else if (adv & ADVERTISE_PAUSE_ASYM)
1889                 flowctrl |= FLOW_CTRL_TX;
1890
1891         return flowctrl;
1892 }
1893
1894 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1895 {
1896         u16 miireg;
1897
1898         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1899                 miireg = ADVERTISE_1000XPAUSE;
1900         else if (flow_ctrl & FLOW_CTRL_TX)
1901                 miireg = ADVERTISE_1000XPSE_ASYM;
1902         else if (flow_ctrl & FLOW_CTRL_RX)
1903                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1904         else
1905                 miireg = 0;
1906
1907         return miireg;
1908 }
1909
1910 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1911 {
1912         u32 flowctrl = 0;
1913
1914         if (adv & ADVERTISE_1000XPAUSE) {
1915                 flowctrl |= FLOW_CTRL_RX;
1916                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1917                         flowctrl |= FLOW_CTRL_TX;
1918         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1919                 flowctrl |= FLOW_CTRL_TX;
1920
1921         return flowctrl;
1922 }
1923
1924 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1925 {
1926         u8 cap = 0;
1927
1928         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1929                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1930         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1931                 if (lcladv & ADVERTISE_1000XPAUSE)
1932                         cap = FLOW_CTRL_RX;
1933                 if (rmtadv & ADVERTISE_1000XPAUSE)
1934                         cap = FLOW_CTRL_TX;
1935         }
1936
1937         return cap;
1938 }
1939
1940 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1941 {
1942         u8 autoneg;
1943         u8 flowctrl = 0;
1944         u32 old_rx_mode = tp->rx_mode;
1945         u32 old_tx_mode = tp->tx_mode;
1946
1947         if (tg3_flag(tp, USE_PHYLIB))
1948                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1949         else
1950                 autoneg = tp->link_config.autoneg;
1951
1952         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1953                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1954                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1955                 else
1956                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1957         } else
1958                 flowctrl = tp->link_config.flowctrl;
1959
1960         tp->link_config.active_flowctrl = flowctrl;
1961
1962         if (flowctrl & FLOW_CTRL_RX)
1963                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1964         else
1965                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1966
1967         if (old_rx_mode != tp->rx_mode)
1968                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1969
1970         if (flowctrl & FLOW_CTRL_TX)
1971                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1972         else
1973                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1974
1975         if (old_tx_mode != tp->tx_mode)
1976                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1977 }
1978
1979 static void tg3_adjust_link(struct net_device *dev)
1980 {
1981         u8 oldflowctrl, linkmesg = 0;
1982         u32 mac_mode, lcl_adv, rmt_adv;
1983         struct tg3 *tp = netdev_priv(dev);
1984         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1985
1986         spin_lock_bh(&tp->lock);
1987
1988         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1989                                     MAC_MODE_HALF_DUPLEX);
1990
1991         oldflowctrl = tp->link_config.active_flowctrl;
1992
1993         if (phydev->link) {
1994                 lcl_adv = 0;
1995                 rmt_adv = 0;
1996
1997                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1998                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1999                 else if (phydev->speed == SPEED_1000 ||
2000                          tg3_asic_rev(tp) != ASIC_REV_5785)
2001                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2002                 else
2003                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2004
2005                 if (phydev->duplex == DUPLEX_HALF)
2006                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2007                 else {
2008                         lcl_adv = mii_advertise_flowctrl(
2009                                   tp->link_config.flowctrl);
2010
2011                         if (phydev->pause)
2012                                 rmt_adv = LPA_PAUSE_CAP;
2013                         if (phydev->asym_pause)
2014                                 rmt_adv |= LPA_PAUSE_ASYM;
2015                 }
2016
2017                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2018         } else
2019                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2020
2021         if (mac_mode != tp->mac_mode) {
2022                 tp->mac_mode = mac_mode;
2023                 tw32_f(MAC_MODE, tp->mac_mode);
2024                 udelay(40);
2025         }
2026
2027         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2028                 if (phydev->speed == SPEED_10)
2029                         tw32(MAC_MI_STAT,
2030                              MAC_MI_STAT_10MBPS_MODE |
2031                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2032                 else
2033                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2034         }
2035
2036         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2037                 tw32(MAC_TX_LENGTHS,
2038                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2039                       (6 << TX_LENGTHS_IPG_SHIFT) |
2040                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2041         else
2042                 tw32(MAC_TX_LENGTHS,
2043                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2044                       (6 << TX_LENGTHS_IPG_SHIFT) |
2045                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2046
2047         if (phydev->link != tp->old_link ||
2048             phydev->speed != tp->link_config.active_speed ||
2049             phydev->duplex != tp->link_config.active_duplex ||
2050             oldflowctrl != tp->link_config.active_flowctrl)
2051                 linkmesg = 1;
2052
2053         tp->old_link = phydev->link;
2054         tp->link_config.active_speed = phydev->speed;
2055         tp->link_config.active_duplex = phydev->duplex;
2056
2057         spin_unlock_bh(&tp->lock);
2058
2059         if (linkmesg)
2060                 tg3_link_report(tp);
2061 }
2062
2063 static int tg3_phy_init(struct tg3 *tp)
2064 {
2065         struct phy_device *phydev;
2066
2067         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2068                 return 0;
2069
2070         /* Bring the PHY back to a known state. */
2071         tg3_bmcr_reset(tp);
2072
2073         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2074
2075         /* Attach the MAC to the PHY. */
2076         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2077                              tg3_adjust_link, phydev->interface);
2078         if (IS_ERR(phydev)) {
2079                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2080                 return PTR_ERR(phydev);
2081         }
2082
2083         /* Mask with MAC supported features. */
2084         switch (phydev->interface) {
2085         case PHY_INTERFACE_MODE_GMII:
2086         case PHY_INTERFACE_MODE_RGMII:
2087                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2088                         phydev->supported &= (PHY_GBIT_FEATURES |
2089                                               SUPPORTED_Pause |
2090                                               SUPPORTED_Asym_Pause);
2091                         break;
2092                 }
2093                 /* fallthru */
2094         case PHY_INTERFACE_MODE_MII:
2095                 phydev->supported &= (PHY_BASIC_FEATURES |
2096                                       SUPPORTED_Pause |
2097                                       SUPPORTED_Asym_Pause);
2098                 break;
2099         default:
2100                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2101                 return -EINVAL;
2102         }
2103
2104         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2105
2106         phydev->advertising = phydev->supported;
2107
2108         return 0;
2109 }
2110
2111 static void tg3_phy_start(struct tg3 *tp)
2112 {
2113         struct phy_device *phydev;
2114
2115         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2116                 return;
2117
2118         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2119
2120         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2121                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2122                 phydev->speed = tp->link_config.speed;
2123                 phydev->duplex = tp->link_config.duplex;
2124                 phydev->autoneg = tp->link_config.autoneg;
2125                 phydev->advertising = tp->link_config.advertising;
2126         }
2127
2128         phy_start(phydev);
2129
2130         phy_start_aneg(phydev);
2131 }
2132
2133 static void tg3_phy_stop(struct tg3 *tp)
2134 {
2135         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2136                 return;
2137
2138         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2139 }
2140
2141 static void tg3_phy_fini(struct tg3 *tp)
2142 {
2143         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2144                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2145                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2146         }
2147 }
2148
2149 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2150 {
2151         int err;
2152         u32 val;
2153
2154         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2155                 return 0;
2156
2157         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2158                 /* Cannot do read-modify-write on 5401 */
2159                 err = tg3_phy_auxctl_write(tp,
2160                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2161                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2162                                            0x4c20);
2163                 goto done;
2164         }
2165
2166         err = tg3_phy_auxctl_read(tp,
2167                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2168         if (err)
2169                 return err;
2170
2171         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2172         err = tg3_phy_auxctl_write(tp,
2173                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2174
2175 done:
2176         return err;
2177 }
2178
2179 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2180 {
2181         u32 phytest;
2182
2183         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2184                 u32 phy;
2185
2186                 tg3_writephy(tp, MII_TG3_FET_TEST,
2187                              phytest | MII_TG3_FET_SHADOW_EN);
2188                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2189                         if (enable)
2190                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2191                         else
2192                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2193                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2194                 }
2195                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2196         }
2197 }
2198
2199 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2200 {
2201         u32 reg;
2202
2203         if (!tg3_flag(tp, 5705_PLUS) ||
2204             (tg3_flag(tp, 5717_PLUS) &&
2205              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2206                 return;
2207
2208         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2209                 tg3_phy_fet_toggle_apd(tp, enable);
2210                 return;
2211         }
2212
2213         reg = MII_TG3_MISC_SHDW_WREN |
2214               MII_TG3_MISC_SHDW_SCR5_SEL |
2215               MII_TG3_MISC_SHDW_SCR5_LPED |
2216               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2217               MII_TG3_MISC_SHDW_SCR5_SDTL |
2218               MII_TG3_MISC_SHDW_SCR5_C125OE;
2219         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2220                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2221
2222         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2223
2224
2225         reg = MII_TG3_MISC_SHDW_WREN |
2226               MII_TG3_MISC_SHDW_APD_SEL |
2227               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2228         if (enable)
2229                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2230
2231         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2232 }
2233
2234 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2235 {
2236         u32 phy;
2237
2238         if (!tg3_flag(tp, 5705_PLUS) ||
2239             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2240                 return;
2241
2242         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2243                 u32 ephy;
2244
2245                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2246                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2247
2248                         tg3_writephy(tp, MII_TG3_FET_TEST,
2249                                      ephy | MII_TG3_FET_SHADOW_EN);
2250                         if (!tg3_readphy(tp, reg, &phy)) {
2251                                 if (enable)
2252                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2253                                 else
2254                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2255                                 tg3_writephy(tp, reg, phy);
2256                         }
2257                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2258                 }
2259         } else {
2260                 int ret;
2261
2262                 ret = tg3_phy_auxctl_read(tp,
2263                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2264                 if (!ret) {
2265                         if (enable)
2266                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2267                         else
2268                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2269                         tg3_phy_auxctl_write(tp,
2270                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2271                 }
2272         }
2273 }
2274
2275 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2276 {
2277         int ret;
2278         u32 val;
2279
2280         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2281                 return;
2282
2283         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2284         if (!ret)
2285                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2286                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2287 }
2288
2289 static void tg3_phy_apply_otp(struct tg3 *tp)
2290 {
2291         u32 otp, phy;
2292
2293         if (!tp->phy_otp)
2294                 return;
2295
2296         otp = tp->phy_otp;
2297
2298         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2299                 return;
2300
2301         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2302         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2303         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2304
2305         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2306               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2307         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2308
2309         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2310         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2311         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2312
2313         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2314         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2315
2316         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2317         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2318
2319         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2320               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2321         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2322
2323         tg3_phy_toggle_auxctl_smdsp(tp, false);
2324 }
2325
2326 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2327 {
2328         u32 val;
2329
2330         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2331                 return;
2332
2333         tp->setlpicnt = 0;
2334
2335         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2336             current_link_up &&
2337             tp->link_config.active_duplex == DUPLEX_FULL &&
2338             (tp->link_config.active_speed == SPEED_100 ||
2339              tp->link_config.active_speed == SPEED_1000)) {
2340                 u32 eeectl;
2341
2342                 if (tp->link_config.active_speed == SPEED_1000)
2343                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2344                 else
2345                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2346
2347                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2348
2349                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2350                                   TG3_CL45_D7_EEERES_STAT, &val);
2351
2352                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2353                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2354                         tp->setlpicnt = 2;
2355         }
2356
2357         if (!tp->setlpicnt) {
2358                 if (current_link_up &&
2359                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2360                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2361                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2362                 }
2363
2364                 val = tr32(TG3_CPMU_EEE_MODE);
2365                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2366         }
2367 }
2368
2369 static void tg3_phy_eee_enable(struct tg3 *tp)
2370 {
2371         u32 val;
2372
2373         if (tp->link_config.active_speed == SPEED_1000 &&
2374             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2375              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2376              tg3_flag(tp, 57765_CLASS)) &&
2377             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2378                 val = MII_TG3_DSP_TAP26_ALNOKO |
2379                       MII_TG3_DSP_TAP26_RMRXSTO;
2380                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2381                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2382         }
2383
2384         val = tr32(TG3_CPMU_EEE_MODE);
2385         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2386 }
2387
2388 static int tg3_wait_macro_done(struct tg3 *tp)
2389 {
2390         int limit = 100;
2391
2392         while (limit--) {
2393                 u32 tmp32;
2394
2395                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2396                         if ((tmp32 & 0x1000) == 0)
2397                                 break;
2398                 }
2399         }
2400         if (limit < 0)
2401                 return -EBUSY;
2402
2403         return 0;
2404 }
2405
2406 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2407 {
2408         static const u32 test_pat[4][6] = {
2409         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2410         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2411         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2412         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2413         };
2414         int chan;
2415
2416         for (chan = 0; chan < 4; chan++) {
2417                 int i;
2418
2419                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2420                              (chan * 0x2000) | 0x0200);
2421                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2422
2423                 for (i = 0; i < 6; i++)
2424                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2425                                      test_pat[chan][i]);
2426
2427                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2428                 if (tg3_wait_macro_done(tp)) {
2429                         *resetp = 1;
2430                         return -EBUSY;
2431                 }
2432
2433                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2434                              (chan * 0x2000) | 0x0200);
2435                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2436                 if (tg3_wait_macro_done(tp)) {
2437                         *resetp = 1;
2438                         return -EBUSY;
2439                 }
2440
2441                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2442                 if (tg3_wait_macro_done(tp)) {
2443                         *resetp = 1;
2444                         return -EBUSY;
2445                 }
2446
2447                 for (i = 0; i < 6; i += 2) {
2448                         u32 low, high;
2449
2450                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2451                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2452                             tg3_wait_macro_done(tp)) {
2453                                 *resetp = 1;
2454                                 return -EBUSY;
2455                         }
2456                         low &= 0x7fff;
2457                         high &= 0x000f;
2458                         if (low != test_pat[chan][i] ||
2459                             high != test_pat[chan][i+1]) {
2460                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2461                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2462                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2463
2464                                 return -EBUSY;
2465                         }
2466                 }
2467         }
2468
2469         return 0;
2470 }
2471
2472 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2473 {
2474         int chan;
2475
2476         for (chan = 0; chan < 4; chan++) {
2477                 int i;
2478
2479                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2480                              (chan * 0x2000) | 0x0200);
2481                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2482                 for (i = 0; i < 6; i++)
2483                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2484                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2485                 if (tg3_wait_macro_done(tp))
2486                         return -EBUSY;
2487         }
2488
2489         return 0;
2490 }
2491
2492 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2493 {
2494         u32 reg32, phy9_orig;
2495         int retries, do_phy_reset, err;
2496
2497         retries = 10;
2498         do_phy_reset = 1;
2499         do {
2500                 if (do_phy_reset) {
2501                         err = tg3_bmcr_reset(tp);
2502                         if (err)
2503                                 return err;
2504                         do_phy_reset = 0;
2505                 }
2506
2507                 /* Disable transmitter and interrupt.  */
2508                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2509                         continue;
2510
2511                 reg32 |= 0x3000;
2512                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2513
2514                 /* Set full-duplex, 1000 mbps.  */
2515                 tg3_writephy(tp, MII_BMCR,
2516                              BMCR_FULLDPLX | BMCR_SPEED1000);
2517
2518                 /* Set to master mode.  */
2519                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2520                         continue;
2521
2522                 tg3_writephy(tp, MII_CTRL1000,
2523                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2524
2525                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2526                 if (err)
2527                         return err;
2528
2529                 /* Block the PHY control access.  */
2530                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2531
2532                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2533                 if (!err)
2534                         break;
2535         } while (--retries);
2536
2537         err = tg3_phy_reset_chanpat(tp);
2538         if (err)
2539                 return err;
2540
2541         tg3_phydsp_write(tp, 0x8005, 0x0000);
2542
2543         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2544         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2545
2546         tg3_phy_toggle_auxctl_smdsp(tp, false);
2547
2548         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2549
2550         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2551                 reg32 &= ~0x3000;
2552                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2553         } else if (!err)
2554                 err = -EBUSY;
2555
2556         return err;
2557 }
2558
2559 static void tg3_carrier_off(struct tg3 *tp)
2560 {
2561         netif_carrier_off(tp->dev);
2562         tp->link_up = false;
2563 }
2564
2565 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2566 {
2567         if (tg3_flag(tp, ENABLE_ASF))
2568                 netdev_warn(tp->dev,
2569                             "Management side-band traffic will be interrupted during phy settings change\n");
2570 }
2571
2572 /* This will reset the tigon3 PHY if there is no valid
2573  * link unless the FORCE argument is non-zero.
2574  */
2575 static int tg3_phy_reset(struct tg3 *tp)
2576 {
2577         u32 val, cpmuctrl;
2578         int err;
2579
2580         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2581                 val = tr32(GRC_MISC_CFG);
2582                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2583                 udelay(40);
2584         }
2585         err  = tg3_readphy(tp, MII_BMSR, &val);
2586         err |= tg3_readphy(tp, MII_BMSR, &val);
2587         if (err != 0)
2588                 return -EBUSY;
2589
2590         if (netif_running(tp->dev) && tp->link_up) {
2591                 netif_carrier_off(tp->dev);
2592                 tg3_link_report(tp);
2593         }
2594
2595         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2596             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2597             tg3_asic_rev(tp) == ASIC_REV_5705) {
2598                 err = tg3_phy_reset_5703_4_5(tp);
2599                 if (err)
2600                         return err;
2601                 goto out;
2602         }
2603
2604         cpmuctrl = 0;
2605         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2606             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2607                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2608                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2609                         tw32(TG3_CPMU_CTRL,
2610                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2611         }
2612
2613         err = tg3_bmcr_reset(tp);
2614         if (err)
2615                 return err;
2616
2617         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2618                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2619                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2620
2621                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2622         }
2623
2624         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2625             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2626                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2627                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2628                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2629                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2630                         udelay(40);
2631                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2632                 }
2633         }
2634
2635         if (tg3_flag(tp, 5717_PLUS) &&
2636             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2637                 return 0;
2638
2639         tg3_phy_apply_otp(tp);
2640
2641         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2642                 tg3_phy_toggle_apd(tp, true);
2643         else
2644                 tg3_phy_toggle_apd(tp, false);
2645
2646 out:
2647         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2648             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2649                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2650                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2651                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2652         }
2653
2654         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2655                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2656                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2657         }
2658
2659         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2660                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2661                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2662                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2663                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2664                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2665                 }
2666         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2667                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2668                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2669                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2670                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2671                                 tg3_writephy(tp, MII_TG3_TEST1,
2672                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2673                         } else
2674                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2675
2676                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2677                 }
2678         }
2679
2680         /* Set Extended packet length bit (bit 14) on all chips that */
2681         /* support jumbo frames */
2682         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2683                 /* Cannot do read-modify-write on 5401 */
2684                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2685         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2686                 /* Set bit 14 with read-modify-write to preserve other bits */
2687                 err = tg3_phy_auxctl_read(tp,
2688                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2689                 if (!err)
2690                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2691                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2692         }
2693
2694         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2695          * jumbo frames transmission.
2696          */
2697         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2698                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2699                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2700                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2701         }
2702
2703         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2704                 /* adjust output voltage */
2705                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2706         }
2707
2708         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2709                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2710
2711         tg3_phy_toggle_automdix(tp, true);
2712         tg3_phy_set_wirespeed(tp);
2713         return 0;
2714 }
2715
2716 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2717 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2718 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2719                                           TG3_GPIO_MSG_NEED_VAUX)
2720 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2721         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2722          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2723          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2724          (TG3_GPIO_MSG_DRVR_PRES << 12))
2725
2726 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2727         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2728          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2729          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2730          (TG3_GPIO_MSG_NEED_VAUX << 12))
2731
2732 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2733 {
2734         u32 status, shift;
2735
2736         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2737             tg3_asic_rev(tp) == ASIC_REV_5719)
2738                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2739         else
2740                 status = tr32(TG3_CPMU_DRV_STATUS);
2741
2742         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2743         status &= ~(TG3_GPIO_MSG_MASK << shift);
2744         status |= (newstat << shift);
2745
2746         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2747             tg3_asic_rev(tp) == ASIC_REV_5719)
2748                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2749         else
2750                 tw32(TG3_CPMU_DRV_STATUS, status);
2751
2752         return status >> TG3_APE_GPIO_MSG_SHIFT;
2753 }
2754
2755 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2756 {
2757         if (!tg3_flag(tp, IS_NIC))
2758                 return 0;
2759
2760         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2761             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2762             tg3_asic_rev(tp) == ASIC_REV_5720) {
2763                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2764                         return -EIO;
2765
2766                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2767
2768                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2769                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2770
2771                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2772         } else {
2773                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2774                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2775         }
2776
2777         return 0;
2778 }
2779
2780 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2781 {
2782         u32 grc_local_ctrl;
2783
2784         if (!tg3_flag(tp, IS_NIC) ||
2785             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2786             tg3_asic_rev(tp) == ASIC_REV_5701)
2787                 return;
2788
2789         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2790
2791         tw32_wait_f(GRC_LOCAL_CTRL,
2792                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2793                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2794
2795         tw32_wait_f(GRC_LOCAL_CTRL,
2796                     grc_local_ctrl,
2797                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2798
2799         tw32_wait_f(GRC_LOCAL_CTRL,
2800                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2801                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2802 }
2803
2804 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2805 {
2806         if (!tg3_flag(tp, IS_NIC))
2807                 return;
2808
2809         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2810             tg3_asic_rev(tp) == ASIC_REV_5701) {
2811                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2812                             (GRC_LCLCTRL_GPIO_OE0 |
2813                              GRC_LCLCTRL_GPIO_OE1 |
2814                              GRC_LCLCTRL_GPIO_OE2 |
2815                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2816                              GRC_LCLCTRL_GPIO_OUTPUT1),
2817                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2818         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2819                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2820                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2821                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2822                                      GRC_LCLCTRL_GPIO_OE1 |
2823                                      GRC_LCLCTRL_GPIO_OE2 |
2824                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2825                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2826                                      tp->grc_local_ctrl;
2827                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2828                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2829
2830                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2831                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2832                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2833
2834                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2835                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2836                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2837         } else {
2838                 u32 no_gpio2;
2839                 u32 grc_local_ctrl = 0;
2840
2841                 /* Workaround to prevent overdrawing Amps. */
2842                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2843                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2844                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2845                                     grc_local_ctrl,
2846                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2847                 }
2848
2849                 /* On 5753 and variants, GPIO2 cannot be used. */
2850                 no_gpio2 = tp->nic_sram_data_cfg &
2851                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2852
2853                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2854                                   GRC_LCLCTRL_GPIO_OE1 |
2855                                   GRC_LCLCTRL_GPIO_OE2 |
2856                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2857                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2858                 if (no_gpio2) {
2859                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2860                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2861                 }
2862                 tw32_wait_f(GRC_LOCAL_CTRL,
2863                             tp->grc_local_ctrl | grc_local_ctrl,
2864                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2865
2866                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2867
2868                 tw32_wait_f(GRC_LOCAL_CTRL,
2869                             tp->grc_local_ctrl | grc_local_ctrl,
2870                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2871
2872                 if (!no_gpio2) {
2873                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2874                         tw32_wait_f(GRC_LOCAL_CTRL,
2875                                     tp->grc_local_ctrl | grc_local_ctrl,
2876                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2877                 }
2878         }
2879 }
2880
2881 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2882 {
2883         u32 msg = 0;
2884
2885         /* Serialize power state transitions */
2886         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2887                 return;
2888
2889         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2890                 msg = TG3_GPIO_MSG_NEED_VAUX;
2891
2892         msg = tg3_set_function_status(tp, msg);
2893
2894         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2895                 goto done;
2896
2897         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2898                 tg3_pwrsrc_switch_to_vaux(tp);
2899         else
2900                 tg3_pwrsrc_die_with_vmain(tp);
2901
2902 done:
2903         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2904 }
2905
2906 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2907 {
2908         bool need_vaux = false;
2909
2910         /* The GPIOs do something completely different on 57765. */
2911         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2912                 return;
2913
2914         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2915             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2916             tg3_asic_rev(tp) == ASIC_REV_5720) {
2917                 tg3_frob_aux_power_5717(tp, include_wol ?
2918                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2919                 return;
2920         }
2921
2922         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2923                 struct net_device *dev_peer;
2924
2925                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2926
2927                 /* remove_one() may have been run on the peer. */
2928                 if (dev_peer) {
2929                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2930
2931                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2932                                 return;
2933
2934                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2935                             tg3_flag(tp_peer, ENABLE_ASF))
2936                                 need_vaux = true;
2937                 }
2938         }
2939
2940         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2941             tg3_flag(tp, ENABLE_ASF))
2942                 need_vaux = true;
2943
2944         if (need_vaux)
2945                 tg3_pwrsrc_switch_to_vaux(tp);
2946         else
2947                 tg3_pwrsrc_die_with_vmain(tp);
2948 }
2949
2950 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2951 {
2952         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2953                 return 1;
2954         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2955                 if (speed != SPEED_10)
2956                         return 1;
2957         } else if (speed == SPEED_10)
2958                 return 1;
2959
2960         return 0;
2961 }
2962
2963 static bool tg3_phy_power_bug(struct tg3 *tp)
2964 {
2965         switch (tg3_asic_rev(tp)) {
2966         case ASIC_REV_5700:
2967         case ASIC_REV_5704:
2968                 return true;
2969         case ASIC_REV_5780:
2970                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2971                         return true;
2972                 return false;
2973         case ASIC_REV_5717:
2974                 if (!tp->pci_fn)
2975                         return true;
2976                 return false;
2977         case ASIC_REV_5719:
2978         case ASIC_REV_5720:
2979                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2980                     !tp->pci_fn)
2981                         return true;
2982                 return false;
2983         }
2984
2985         return false;
2986 }
2987
2988 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2989 {
2990         u32 val;
2991
2992         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
2993                 return;
2994
2995         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2996                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2997                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2998                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2999
3000                         sg_dig_ctrl |=
3001                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3002                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3003                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3004                 }
3005                 return;
3006         }
3007
3008         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3009                 tg3_bmcr_reset(tp);
3010                 val = tr32(GRC_MISC_CFG);
3011                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3012                 udelay(40);
3013                 return;
3014         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3015                 u32 phytest;
3016                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3017                         u32 phy;
3018
3019                         tg3_writephy(tp, MII_ADVERTISE, 0);
3020                         tg3_writephy(tp, MII_BMCR,
3021                                      BMCR_ANENABLE | BMCR_ANRESTART);
3022
3023                         tg3_writephy(tp, MII_TG3_FET_TEST,
3024                                      phytest | MII_TG3_FET_SHADOW_EN);
3025                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3026                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3027                                 tg3_writephy(tp,
3028                                              MII_TG3_FET_SHDW_AUXMODE4,
3029                                              phy);
3030                         }
3031                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3032                 }
3033                 return;
3034         } else if (do_low_power) {
3035                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3036                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3037
3038                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3039                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3040                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3041                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3042         }
3043
3044         /* The PHY should not be powered down on some chips because
3045          * of bugs.
3046          */
3047         if (tg3_phy_power_bug(tp))
3048                 return;
3049
3050         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3051             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3052                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3053                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3054                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3055                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3056         }
3057
3058         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3059 }
3060
3061 /* tp->lock is held. */
3062 static int tg3_nvram_lock(struct tg3 *tp)
3063 {
3064         if (tg3_flag(tp, NVRAM)) {
3065                 int i;
3066
3067                 if (tp->nvram_lock_cnt == 0) {
3068                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3069                         for (i = 0; i < 8000; i++) {
3070                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3071                                         break;
3072                                 udelay(20);
3073                         }
3074                         if (i == 8000) {
3075                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3076                                 return -ENODEV;
3077                         }
3078                 }
3079                 tp->nvram_lock_cnt++;
3080         }
3081         return 0;
3082 }
3083
3084 /* tp->lock is held. */
3085 static void tg3_nvram_unlock(struct tg3 *tp)
3086 {
3087         if (tg3_flag(tp, NVRAM)) {
3088                 if (tp->nvram_lock_cnt > 0)
3089                         tp->nvram_lock_cnt--;
3090                 if (tp->nvram_lock_cnt == 0)
3091                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3092         }
3093 }
3094
3095 /* tp->lock is held. */
3096 static void tg3_enable_nvram_access(struct tg3 *tp)
3097 {
3098         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3099                 u32 nvaccess = tr32(NVRAM_ACCESS);
3100
3101                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3102         }
3103 }
3104
3105 /* tp->lock is held. */
3106 static void tg3_disable_nvram_access(struct tg3 *tp)
3107 {
3108         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3109                 u32 nvaccess = tr32(NVRAM_ACCESS);
3110
3111                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3112         }
3113 }
3114
3115 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3116                                         u32 offset, u32 *val)
3117 {
3118         u32 tmp;
3119         int i;
3120
3121         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3122                 return -EINVAL;
3123
3124         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3125                                         EEPROM_ADDR_DEVID_MASK |
3126                                         EEPROM_ADDR_READ);
3127         tw32(GRC_EEPROM_ADDR,
3128              tmp |
3129              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3130              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3131               EEPROM_ADDR_ADDR_MASK) |
3132              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3133
3134         for (i = 0; i < 1000; i++) {
3135                 tmp = tr32(GRC_EEPROM_ADDR);
3136
3137                 if (tmp & EEPROM_ADDR_COMPLETE)
3138                         break;
3139                 msleep(1);
3140         }
3141         if (!(tmp & EEPROM_ADDR_COMPLETE))
3142                 return -EBUSY;
3143
3144         tmp = tr32(GRC_EEPROM_DATA);
3145
3146         /*
3147          * The data will always be opposite the native endian
3148          * format.  Perform a blind byteswap to compensate.
3149          */
3150         *val = swab32(tmp);
3151
3152         return 0;
3153 }
3154
3155 #define NVRAM_CMD_TIMEOUT 10000
3156
3157 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3158 {
3159         int i;
3160
3161         tw32(NVRAM_CMD, nvram_cmd);
3162         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3163                 udelay(10);
3164                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3165                         udelay(10);
3166                         break;
3167                 }
3168         }
3169
3170         if (i == NVRAM_CMD_TIMEOUT)
3171                 return -EBUSY;
3172
3173         return 0;
3174 }
3175
3176 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3177 {
3178         if (tg3_flag(tp, NVRAM) &&
3179             tg3_flag(tp, NVRAM_BUFFERED) &&
3180             tg3_flag(tp, FLASH) &&
3181             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3182             (tp->nvram_jedecnum == JEDEC_ATMEL))
3183
3184                 addr = ((addr / tp->nvram_pagesize) <<
3185                         ATMEL_AT45DB0X1B_PAGE_POS) +
3186                        (addr % tp->nvram_pagesize);
3187
3188         return addr;
3189 }
3190
3191 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3192 {
3193         if (tg3_flag(tp, NVRAM) &&
3194             tg3_flag(tp, NVRAM_BUFFERED) &&
3195             tg3_flag(tp, FLASH) &&
3196             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3197             (tp->nvram_jedecnum == JEDEC_ATMEL))
3198
3199                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3200                         tp->nvram_pagesize) +
3201                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3202
3203         return addr;
3204 }
3205
3206 /* NOTE: Data read in from NVRAM is byteswapped according to
3207  * the byteswapping settings for all other register accesses.
3208  * tg3 devices are BE devices, so on a BE machine, the data
3209  * returned will be exactly as it is seen in NVRAM.  On a LE
3210  * machine, the 32-bit value will be byteswapped.
3211  */
3212 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3213 {
3214         int ret;
3215
3216         if (!tg3_flag(tp, NVRAM))
3217                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3218
3219         offset = tg3_nvram_phys_addr(tp, offset);
3220
3221         if (offset > NVRAM_ADDR_MSK)
3222                 return -EINVAL;
3223
3224         ret = tg3_nvram_lock(tp);
3225         if (ret)
3226                 return ret;
3227
3228         tg3_enable_nvram_access(tp);
3229
3230         tw32(NVRAM_ADDR, offset);
3231         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3232                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3233
3234         if (ret == 0)
3235                 *val = tr32(NVRAM_RDDATA);
3236
3237         tg3_disable_nvram_access(tp);
3238
3239         tg3_nvram_unlock(tp);
3240
3241         return ret;
3242 }
3243
3244 /* Ensures NVRAM data is in bytestream format. */
3245 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3246 {
3247         u32 v;
3248         int res = tg3_nvram_read(tp, offset, &v);
3249         if (!res)
3250                 *val = cpu_to_be32(v);
3251         return res;
3252 }
3253
3254 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3255                                     u32 offset, u32 len, u8 *buf)
3256 {
3257         int i, j, rc = 0;
3258         u32 val;
3259
3260         for (i = 0; i < len; i += 4) {
3261                 u32 addr;
3262                 __be32 data;
3263
3264                 addr = offset + i;
3265
3266                 memcpy(&data, buf + i, 4);
3267
3268                 /*
3269                  * The SEEPROM interface expects the data to always be opposite
3270                  * the native endian format.  We accomplish this by reversing
3271                  * all the operations that would have been performed on the
3272                  * data from a call to tg3_nvram_read_be32().
3273                  */
3274                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3275
3276                 val = tr32(GRC_EEPROM_ADDR);
3277                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3278
3279                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3280                         EEPROM_ADDR_READ);
3281                 tw32(GRC_EEPROM_ADDR, val |
3282                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3283                         (addr & EEPROM_ADDR_ADDR_MASK) |
3284                         EEPROM_ADDR_START |
3285                         EEPROM_ADDR_WRITE);
3286
3287                 for (j = 0; j < 1000; j++) {
3288                         val = tr32(GRC_EEPROM_ADDR);
3289
3290                         if (val & EEPROM_ADDR_COMPLETE)
3291                                 break;
3292                         msleep(1);
3293                 }
3294                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3295                         rc = -EBUSY;
3296                         break;
3297                 }
3298         }
3299
3300         return rc;
3301 }
3302
3303 /* offset and length are dword aligned */
3304 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3305                 u8 *buf)
3306 {
3307         int ret = 0;
3308         u32 pagesize = tp->nvram_pagesize;
3309         u32 pagemask = pagesize - 1;
3310         u32 nvram_cmd;
3311         u8 *tmp;
3312
3313         tmp = kmalloc(pagesize, GFP_KERNEL);
3314         if (tmp == NULL)
3315                 return -ENOMEM;
3316
3317         while (len) {
3318                 int j;
3319                 u32 phy_addr, page_off, size;
3320
3321                 phy_addr = offset & ~pagemask;
3322
3323                 for (j = 0; j < pagesize; j += 4) {
3324                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3325                                                   (__be32 *) (tmp + j));
3326                         if (ret)
3327                                 break;
3328                 }
3329                 if (ret)
3330                         break;
3331
3332                 page_off = offset & pagemask;
3333                 size = pagesize;
3334                 if (len < size)
3335                         size = len;
3336
3337                 len -= size;
3338
3339                 memcpy(tmp + page_off, buf, size);
3340
3341                 offset = offset + (pagesize - page_off);
3342
3343                 tg3_enable_nvram_access(tp);
3344
3345                 /*
3346                  * Before we can erase the flash page, we need
3347                  * to issue a special "write enable" command.
3348                  */
3349                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3350
3351                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3352                         break;
3353
3354                 /* Erase the target page */
3355                 tw32(NVRAM_ADDR, phy_addr);
3356
3357                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3358                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3359
3360                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3361                         break;
3362
3363                 /* Issue another write enable to start the write. */
3364                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3365
3366                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3367                         break;
3368
3369                 for (j = 0; j < pagesize; j += 4) {
3370                         __be32 data;
3371
3372                         data = *((__be32 *) (tmp + j));
3373
3374                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3375
3376                         tw32(NVRAM_ADDR, phy_addr + j);
3377
3378                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3379                                 NVRAM_CMD_WR;
3380
3381                         if (j == 0)
3382                                 nvram_cmd |= NVRAM_CMD_FIRST;
3383                         else if (j == (pagesize - 4))
3384                                 nvram_cmd |= NVRAM_CMD_LAST;
3385
3386                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3387                         if (ret)
3388                                 break;
3389                 }
3390                 if (ret)
3391                         break;
3392         }
3393
3394         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3395         tg3_nvram_exec_cmd(tp, nvram_cmd);
3396
3397         kfree(tmp);
3398
3399         return ret;
3400 }
3401
3402 /* offset and length are dword aligned */
3403 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3404                 u8 *buf)
3405 {
3406         int i, ret = 0;
3407
3408         for (i = 0; i < len; i += 4, offset += 4) {
3409                 u32 page_off, phy_addr, nvram_cmd;
3410                 __be32 data;
3411
3412                 memcpy(&data, buf + i, 4);
3413                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3414
3415                 page_off = offset % tp->nvram_pagesize;
3416
3417                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3418
3419                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3420
3421                 if (page_off == 0 || i == 0)
3422                         nvram_cmd |= NVRAM_CMD_FIRST;
3423                 if (page_off == (tp->nvram_pagesize - 4))
3424                         nvram_cmd |= NVRAM_CMD_LAST;
3425
3426                 if (i == (len - 4))
3427                         nvram_cmd |= NVRAM_CMD_LAST;
3428
3429                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3430                     !tg3_flag(tp, FLASH) ||
3431                     !tg3_flag(tp, 57765_PLUS))
3432                         tw32(NVRAM_ADDR, phy_addr);
3433
3434                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3435                     !tg3_flag(tp, 5755_PLUS) &&
3436                     (tp->nvram_jedecnum == JEDEC_ST) &&
3437                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3438                         u32 cmd;
3439
3440                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3441                         ret = tg3_nvram_exec_cmd(tp, cmd);
3442                         if (ret)
3443                                 break;
3444                 }
3445                 if (!tg3_flag(tp, FLASH)) {
3446                         /* We always do complete word writes to eeprom. */
3447                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3448                 }
3449
3450                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3451                 if (ret)
3452                         break;
3453         }
3454         return ret;
3455 }
3456
3457 /* offset and length are dword aligned */
3458 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3459 {
3460         int ret;
3461
3462         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3463                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3464                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3465                 udelay(40);
3466         }
3467
3468         if (!tg3_flag(tp, NVRAM)) {
3469                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3470         } else {
3471                 u32 grc_mode;
3472
3473                 ret = tg3_nvram_lock(tp);
3474                 if (ret)
3475                         return ret;
3476
3477                 tg3_enable_nvram_access(tp);
3478                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3479                         tw32(NVRAM_WRITE1, 0x406);
3480
3481                 grc_mode = tr32(GRC_MODE);
3482                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3483
3484                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3485                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3486                                 buf);
3487                 } else {
3488                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3489                                 buf);
3490                 }
3491
3492                 grc_mode = tr32(GRC_MODE);
3493                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3494
3495                 tg3_disable_nvram_access(tp);
3496                 tg3_nvram_unlock(tp);
3497         }
3498
3499         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3500                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3501                 udelay(40);
3502         }
3503
3504         return ret;
3505 }
3506
3507 #define RX_CPU_SCRATCH_BASE     0x30000
3508 #define RX_CPU_SCRATCH_SIZE     0x04000
3509 #define TX_CPU_SCRATCH_BASE     0x34000
3510 #define TX_CPU_SCRATCH_SIZE     0x04000
3511
3512 /* tp->lock is held. */
3513 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3514 {
3515         int i;
3516         const int iters = 10000;
3517
3518         for (i = 0; i < iters; i++) {
3519                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3520                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3521                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3522                         break;
3523         }
3524
3525         return (i == iters) ? -EBUSY : 0;
3526 }
3527
3528 /* tp->lock is held. */
3529 static int tg3_rxcpu_pause(struct tg3 *tp)
3530 {
3531         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3532
3533         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3534         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3535         udelay(10);
3536
3537         return rc;
3538 }
3539
3540 /* tp->lock is held. */
3541 static int tg3_txcpu_pause(struct tg3 *tp)
3542 {
3543         return tg3_pause_cpu(tp, TX_CPU_BASE);
3544 }
3545
3546 /* tp->lock is held. */
3547 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3548 {
3549         tw32(cpu_base + CPU_STATE, 0xffffffff);
3550         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3551 }
3552
3553 /* tp->lock is held. */
3554 static void tg3_rxcpu_resume(struct tg3 *tp)
3555 {
3556         tg3_resume_cpu(tp, RX_CPU_BASE);
3557 }
3558
3559 /* tp->lock is held. */
3560 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3561 {
3562         int rc;
3563
3564         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3565
3566         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3567                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3568
3569                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3570                 return 0;
3571         }
3572         if (cpu_base == RX_CPU_BASE) {
3573                 rc = tg3_rxcpu_pause(tp);
3574         } else {
3575                 /*
3576                  * There is only an Rx CPU for the 5750 derivative in the
3577                  * BCM4785.
3578                  */
3579                 if (tg3_flag(tp, IS_SSB_CORE))
3580                         return 0;
3581
3582                 rc = tg3_txcpu_pause(tp);
3583         }
3584
3585         if (rc) {
3586                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3587                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3588                 return -ENODEV;
3589         }
3590
3591         /* Clear firmware's nvram arbitration. */
3592         if (tg3_flag(tp, NVRAM))
3593                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3594         return 0;
3595 }
3596
3597 static int tg3_fw_data_len(struct tg3 *tp,
3598                            const struct tg3_firmware_hdr *fw_hdr)
3599 {
3600         int fw_len;
3601
3602         /* Non fragmented firmware have one firmware header followed by a
3603          * contiguous chunk of data to be written. The length field in that
3604          * header is not the length of data to be written but the complete
3605          * length of the bss. The data length is determined based on
3606          * tp->fw->size minus headers.
3607          *
3608          * Fragmented firmware have a main header followed by multiple
3609          * fragments. Each fragment is identical to non fragmented firmware
3610          * with a firmware header followed by a contiguous chunk of data. In
3611          * the main header, the length field is unused and set to 0xffffffff.
3612          * In each fragment header the length is the entire size of that
3613          * fragment i.e. fragment data + header length. Data length is
3614          * therefore length field in the header minus TG3_FW_HDR_LEN.
3615          */
3616         if (tp->fw_len == 0xffffffff)
3617                 fw_len = be32_to_cpu(fw_hdr->len);
3618         else
3619                 fw_len = tp->fw->size;
3620
3621         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3622 }
3623
3624 /* tp->lock is held. */
3625 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3626                                  u32 cpu_scratch_base, int cpu_scratch_size,
3627                                  const struct tg3_firmware_hdr *fw_hdr)
3628 {
3629         int err, i;
3630         void (*write_op)(struct tg3 *, u32, u32);
3631         int total_len = tp->fw->size;
3632
3633         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3634                 netdev_err(tp->dev,
3635                            "%s: Trying to load TX cpu firmware which is 5705\n",
3636                            __func__);
3637                 return -EINVAL;
3638         }
3639
3640         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3641                 write_op = tg3_write_mem;
3642         else
3643                 write_op = tg3_write_indirect_reg32;
3644
3645         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3646                 /* It is possible that bootcode is still loading at this point.
3647                  * Get the nvram lock first before halting the cpu.
3648                  */
3649                 int lock_err = tg3_nvram_lock(tp);
3650                 err = tg3_halt_cpu(tp, cpu_base);
3651                 if (!lock_err)
3652                         tg3_nvram_unlock(tp);
3653                 if (err)
3654                         goto out;
3655
3656                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3657                         write_op(tp, cpu_scratch_base + i, 0);
3658                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3659                 tw32(cpu_base + CPU_MODE,
3660                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3661         } else {
3662                 /* Subtract additional main header for fragmented firmware and
3663                  * advance to the first fragment
3664                  */
3665                 total_len -= TG3_FW_HDR_LEN;
3666                 fw_hdr++;
3667         }
3668
3669         do {
3670                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3671                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3672                         write_op(tp, cpu_scratch_base +
3673                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3674                                      (i * sizeof(u32)),
3675                                  be32_to_cpu(fw_data[i]));
3676
3677                 total_len -= be32_to_cpu(fw_hdr->len);
3678
3679                 /* Advance to next fragment */
3680                 fw_hdr = (struct tg3_firmware_hdr *)
3681                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3682         } while (total_len > 0);
3683
3684         err = 0;
3685
3686 out:
3687         return err;
3688 }
3689
3690 /* tp->lock is held. */
3691 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3692 {
3693         int i;
3694         const int iters = 5;
3695
3696         tw32(cpu_base + CPU_STATE, 0xffffffff);
3697         tw32_f(cpu_base + CPU_PC, pc);
3698
3699         for (i = 0; i < iters; i++) {
3700                 if (tr32(cpu_base + CPU_PC) == pc)
3701                         break;
3702                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3703                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3704                 tw32_f(cpu_base + CPU_PC, pc);
3705                 udelay(1000);
3706         }
3707
3708         return (i == iters) ? -EBUSY : 0;
3709 }
3710
3711 /* tp->lock is held. */
3712 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3713 {
3714         const struct tg3_firmware_hdr *fw_hdr;
3715         int err;
3716
3717         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3718
3719         /* Firmware blob starts with version numbers, followed by
3720            start address and length. We are setting complete length.
3721            length = end_address_of_bss - start_address_of_text.
3722            Remainder is the blob to be loaded contiguously
3723            from start address. */
3724
3725         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3726                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3727                                     fw_hdr);
3728         if (err)
3729                 return err;
3730
3731         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3732                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3733                                     fw_hdr);
3734         if (err)
3735                 return err;
3736
3737         /* Now startup only the RX cpu. */
3738         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3739                                        be32_to_cpu(fw_hdr->base_addr));
3740         if (err) {
3741                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3742                            "should be %08x\n", __func__,
3743                            tr32(RX_CPU_BASE + CPU_PC),
3744                                 be32_to_cpu(fw_hdr->base_addr));
3745                 return -ENODEV;
3746         }
3747
3748         tg3_rxcpu_resume(tp);
3749
3750         return 0;
3751 }
3752
3753 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3754 {
3755         const int iters = 1000;
3756         int i;
3757         u32 val;
3758
3759         /* Wait for boot code to complete initialization and enter service
3760          * loop. It is then safe to download service patches
3761          */
3762         for (i = 0; i < iters; i++) {
3763                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3764                         break;
3765
3766                 udelay(10);
3767         }
3768
3769         if (i == iters) {
3770                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3771                 return -EBUSY;
3772         }
3773
3774         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3775         if (val & 0xff) {
3776                 netdev_warn(tp->dev,
3777                             "Other patches exist. Not downloading EEE patch\n");
3778                 return -EEXIST;
3779         }
3780
3781         return 0;
3782 }
3783
3784 /* tp->lock is held. */
3785 static void tg3_load_57766_firmware(struct tg3 *tp)
3786 {
3787         struct tg3_firmware_hdr *fw_hdr;
3788
3789         if (!tg3_flag(tp, NO_NVRAM))
3790                 return;
3791
3792         if (tg3_validate_rxcpu_state(tp))
3793                 return;
3794
3795         if (!tp->fw)
3796                 return;
3797
3798         /* This firmware blob has a different format than older firmware
3799          * releases as given below. The main difference is we have fragmented
3800          * data to be written to non-contiguous locations.
3801          *
3802          * In the beginning we have a firmware header identical to other
3803          * firmware which consists of version, base addr and length. The length
3804          * here is unused and set to 0xffffffff.
3805          *
3806          * This is followed by a series of firmware fragments which are
3807          * individually identical to previous firmware. i.e. they have the
3808          * firmware header and followed by data for that fragment. The version
3809          * field of the individual fragment header is unused.
3810          */
3811
3812         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3813         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3814                 return;
3815
3816         if (tg3_rxcpu_pause(tp))
3817                 return;
3818
3819         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3820         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3821
3822         tg3_rxcpu_resume(tp);
3823 }
3824
3825 /* tp->lock is held. */
3826 static int tg3_load_tso_firmware(struct tg3 *tp)
3827 {
3828         const struct tg3_firmware_hdr *fw_hdr;
3829         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3830         int err;
3831
3832         if (!tg3_flag(tp, FW_TSO))
3833                 return 0;
3834
3835         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3836
3837         /* Firmware blob starts with version numbers, followed by
3838            start address and length. We are setting complete length.
3839            length = end_address_of_bss - start_address_of_text.
3840            Remainder is the blob to be loaded contiguously
3841            from start address. */
3842
3843         cpu_scratch_size = tp->fw_len;
3844
3845         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3846                 cpu_base = RX_CPU_BASE;
3847                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3848         } else {
3849                 cpu_base = TX_CPU_BASE;
3850                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3851                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3852         }
3853
3854         err = tg3_load_firmware_cpu(tp, cpu_base,
3855                                     cpu_scratch_base, cpu_scratch_size,
3856                                     fw_hdr);
3857         if (err)
3858                 return err;
3859
3860         /* Now startup the cpu. */
3861         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3862                                        be32_to_cpu(fw_hdr->base_addr));
3863         if (err) {
3864                 netdev_err(tp->dev,
3865                            "%s fails to set CPU PC, is %08x should be %08x\n",
3866                            __func__, tr32(cpu_base + CPU_PC),
3867                            be32_to_cpu(fw_hdr->base_addr));
3868                 return -ENODEV;
3869         }
3870
3871         tg3_resume_cpu(tp, cpu_base);
3872         return 0;
3873 }
3874
3875
3876 /* tp->lock is held. */
3877 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3878 {
3879         u32 addr_high, addr_low;
3880         int i;
3881
3882         addr_high = ((tp->dev->dev_addr[0] << 8) |
3883                      tp->dev->dev_addr[1]);
3884         addr_low = ((tp->dev->dev_addr[2] << 24) |
3885                     (tp->dev->dev_addr[3] << 16) |
3886                     (tp->dev->dev_addr[4] <<  8) |
3887                     (tp->dev->dev_addr[5] <<  0));
3888         for (i = 0; i < 4; i++) {
3889                 if (i == 1 && skip_mac_1)
3890                         continue;
3891                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3892                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3893         }
3894
3895         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3896             tg3_asic_rev(tp) == ASIC_REV_5704) {
3897                 for (i = 0; i < 12; i++) {
3898                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3899                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3900                 }
3901         }
3902
3903         addr_high = (tp->dev->dev_addr[0] +
3904                      tp->dev->dev_addr[1] +
3905                      tp->dev->dev_addr[2] +
3906                      tp->dev->dev_addr[3] +
3907                      tp->dev->dev_addr[4] +
3908                      tp->dev->dev_addr[5]) &
3909                 TX_BACKOFF_SEED_MASK;
3910         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3911 }
3912
3913 static void tg3_enable_register_access(struct tg3 *tp)
3914 {
3915         /*
3916          * Make sure register accesses (indirect or otherwise) will function
3917          * correctly.
3918          */
3919         pci_write_config_dword(tp->pdev,
3920                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3921 }
3922
3923 static int tg3_power_up(struct tg3 *tp)
3924 {
3925         int err;
3926
3927         tg3_enable_register_access(tp);
3928
3929         err = pci_set_power_state(tp->pdev, PCI_D0);
3930         if (!err) {
3931                 /* Switch out of Vaux if it is a NIC */
3932                 tg3_pwrsrc_switch_to_vmain(tp);
3933         } else {
3934                 netdev_err(tp->dev, "Transition to D0 failed\n");
3935         }
3936
3937         return err;
3938 }
3939
3940 static int tg3_setup_phy(struct tg3 *, bool);
3941
3942 static int tg3_power_down_prepare(struct tg3 *tp)
3943 {
3944         u32 misc_host_ctrl;
3945         bool device_should_wake, do_low_power;
3946
3947         tg3_enable_register_access(tp);
3948
3949         /* Restore the CLKREQ setting. */
3950         if (tg3_flag(tp, CLKREQ_BUG))
3951                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3952                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3953
3954         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3955         tw32(TG3PCI_MISC_HOST_CTRL,
3956              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3957
3958         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3959                              tg3_flag(tp, WOL_ENABLE);
3960
3961         if (tg3_flag(tp, USE_PHYLIB)) {
3962                 do_low_power = false;
3963                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3964                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3965                         struct phy_device *phydev;
3966                         u32 phyid, advertising;
3967
3968                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3969
3970                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3971
3972                         tp->link_config.speed = phydev->speed;
3973                         tp->link_config.duplex = phydev->duplex;
3974                         tp->link_config.autoneg = phydev->autoneg;
3975                         tp->link_config.advertising = phydev->advertising;
3976
3977                         advertising = ADVERTISED_TP |
3978                                       ADVERTISED_Pause |
3979                                       ADVERTISED_Autoneg |
3980                                       ADVERTISED_10baseT_Half;
3981
3982                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3983                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3984                                         advertising |=
3985                                                 ADVERTISED_100baseT_Half |
3986                                                 ADVERTISED_100baseT_Full |
3987                                                 ADVERTISED_10baseT_Full;
3988                                 else
3989                                         advertising |= ADVERTISED_10baseT_Full;
3990                         }
3991
3992                         phydev->advertising = advertising;
3993
3994                         phy_start_aneg(phydev);
3995
3996                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3997                         if (phyid != PHY_ID_BCMAC131) {
3998                                 phyid &= PHY_BCM_OUI_MASK;
3999                                 if (phyid == PHY_BCM_OUI_1 ||
4000                                     phyid == PHY_BCM_OUI_2 ||
4001                                     phyid == PHY_BCM_OUI_3)
4002                                         do_low_power = true;
4003                         }
4004                 }
4005         } else {
4006                 do_low_power = true;
4007
4008                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4009                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4010
4011                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4012                         tg3_setup_phy(tp, false);
4013         }
4014
4015         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4016                 u32 val;
4017
4018                 val = tr32(GRC_VCPU_EXT_CTRL);
4019                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4020         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4021                 int i;
4022                 u32 val;
4023
4024                 for (i = 0; i < 200; i++) {
4025                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4026                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4027                                 break;
4028                         msleep(1);
4029                 }
4030         }
4031         if (tg3_flag(tp, WOL_CAP))
4032                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4033                                                      WOL_DRV_STATE_SHUTDOWN |
4034                                                      WOL_DRV_WOL |
4035                                                      WOL_SET_MAGIC_PKT);
4036
4037         if (device_should_wake) {
4038                 u32 mac_mode;
4039
4040                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4041                         if (do_low_power &&
4042                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4043                                 tg3_phy_auxctl_write(tp,
4044                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4045                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4046                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4047                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4048                                 udelay(40);
4049                         }
4050
4051                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4052                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4053                         else if (tp->phy_flags &
4054                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4055                                 if (tp->link_config.active_speed == SPEED_1000)
4056                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4057                                 else
4058                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4059                         } else
4060                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4061
4062                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4063                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4064                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4065                                              SPEED_100 : SPEED_10;
4066                                 if (tg3_5700_link_polarity(tp, speed))
4067                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4068                                 else
4069                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4070                         }
4071                 } else {
4072                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4073                 }
4074
4075                 if (!tg3_flag(tp, 5750_PLUS))
4076                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4077
4078                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4079                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4080                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4081                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4082
4083                 if (tg3_flag(tp, ENABLE_APE))
4084                         mac_mode |= MAC_MODE_APE_TX_EN |
4085                                     MAC_MODE_APE_RX_EN |
4086                                     MAC_MODE_TDE_ENABLE;
4087
4088                 tw32_f(MAC_MODE, mac_mode);
4089                 udelay(100);
4090
4091                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4092                 udelay(10);
4093         }
4094
4095         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4096             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4097              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4098                 u32 base_val;
4099
4100                 base_val = tp->pci_clock_ctrl;
4101                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4102                              CLOCK_CTRL_TXCLK_DISABLE);
4103
4104                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4105                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4106         } else if (tg3_flag(tp, 5780_CLASS) ||
4107                    tg3_flag(tp, CPMU_PRESENT) ||
4108                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4109                 /* do nothing */
4110         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4111                 u32 newbits1, newbits2;
4112
4113                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4114                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4115                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4116                                     CLOCK_CTRL_TXCLK_DISABLE |
4117                                     CLOCK_CTRL_ALTCLK);
4118                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4119                 } else if (tg3_flag(tp, 5705_PLUS)) {
4120                         newbits1 = CLOCK_CTRL_625_CORE;
4121                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4122                 } else {
4123                         newbits1 = CLOCK_CTRL_ALTCLK;
4124                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4125                 }
4126
4127                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4128                             40);
4129
4130                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4131                             40);
4132
4133                 if (!tg3_flag(tp, 5705_PLUS)) {
4134                         u32 newbits3;
4135
4136                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4137                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4138                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4139                                             CLOCK_CTRL_TXCLK_DISABLE |
4140                                             CLOCK_CTRL_44MHZ_CORE);
4141                         } else {
4142                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4143                         }
4144
4145                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4146                                     tp->pci_clock_ctrl | newbits3, 40);
4147                 }
4148         }
4149
4150         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4151                 tg3_power_down_phy(tp, do_low_power);
4152
4153         tg3_frob_aux_power(tp, true);
4154
4155         /* Workaround for unstable PLL clock */
4156         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4157             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4158              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4159                 u32 val = tr32(0x7d00);
4160
4161                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4162                 tw32(0x7d00, val);
4163                 if (!tg3_flag(tp, ENABLE_ASF)) {
4164                         int err;
4165
4166                         err = tg3_nvram_lock(tp);
4167                         tg3_halt_cpu(tp, RX_CPU_BASE);
4168                         if (!err)
4169                                 tg3_nvram_unlock(tp);
4170                 }
4171         }
4172
4173         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4174
4175         return 0;
4176 }
4177
4178 static void tg3_power_down(struct tg3 *tp)
4179 {
4180         tg3_power_down_prepare(tp);
4181
4182         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4183         pci_set_power_state(tp->pdev, PCI_D3hot);
4184 }
4185
4186 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4187 {
4188         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4189         case MII_TG3_AUX_STAT_10HALF:
4190                 *speed = SPEED_10;
4191                 *duplex = DUPLEX_HALF;
4192                 break;
4193
4194         case MII_TG3_AUX_STAT_10FULL:
4195                 *speed = SPEED_10;
4196                 *duplex = DUPLEX_FULL;
4197                 break;
4198
4199         case MII_TG3_AUX_STAT_100HALF:
4200                 *speed = SPEED_100;
4201                 *duplex = DUPLEX_HALF;
4202                 break;
4203
4204         case MII_TG3_AUX_STAT_100FULL:
4205                 *speed = SPEED_100;
4206                 *duplex = DUPLEX_FULL;
4207                 break;
4208
4209         case MII_TG3_AUX_STAT_1000HALF:
4210                 *speed = SPEED_1000;
4211                 *duplex = DUPLEX_HALF;
4212                 break;
4213
4214         case MII_TG3_AUX_STAT_1000FULL:
4215                 *speed = SPEED_1000;
4216                 *duplex = DUPLEX_FULL;
4217                 break;
4218
4219         default:
4220                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4221                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4222                                  SPEED_10;
4223                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4224                                   DUPLEX_HALF;
4225                         break;
4226                 }
4227                 *speed = SPEED_UNKNOWN;
4228                 *duplex = DUPLEX_UNKNOWN;
4229                 break;
4230         }
4231 }
4232
4233 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4234 {
4235         int err = 0;
4236         u32 val, new_adv;
4237
4238         new_adv = ADVERTISE_CSMA;
4239         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4240         new_adv |= mii_advertise_flowctrl(flowctrl);
4241
4242         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4243         if (err)
4244                 goto done;
4245
4246         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4247                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4248
4249                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4250                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4251                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4252
4253                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4254                 if (err)
4255                         goto done;
4256         }
4257
4258         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4259                 goto done;
4260
4261         tw32(TG3_CPMU_EEE_MODE,
4262              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4263
4264         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4265         if (!err) {
4266                 u32 err2;
4267
4268                 val = 0;
4269                 /* Advertise 100-BaseTX EEE ability */
4270                 if (advertise & ADVERTISED_100baseT_Full)
4271                         val |= MDIO_AN_EEE_ADV_100TX;
4272                 /* Advertise 1000-BaseT EEE ability */
4273                 if (advertise & ADVERTISED_1000baseT_Full)
4274                         val |= MDIO_AN_EEE_ADV_1000T;
4275                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4276                 if (err)
4277                         val = 0;
4278
4279                 switch (tg3_asic_rev(tp)) {
4280                 case ASIC_REV_5717:
4281                 case ASIC_REV_57765:
4282                 case ASIC_REV_57766:
4283                 case ASIC_REV_5719:
4284                         /* If we advertised any eee advertisements above... */
4285                         if (val)
4286                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4287                                       MII_TG3_DSP_TAP26_RMRXSTO |
4288                                       MII_TG3_DSP_TAP26_OPCSINPT;
4289                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4290                         /* Fall through */
4291                 case ASIC_REV_5720:
4292                 case ASIC_REV_5762:
4293                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4294                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4295                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4296                 }
4297
4298                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4299                 if (!err)
4300                         err = err2;
4301         }
4302
4303 done:
4304         return err;
4305 }
4306
4307 static void tg3_phy_copper_begin(struct tg3 *tp)
4308 {
4309         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4310             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4311                 u32 adv, fc;
4312
4313                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4314                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4315                         adv = ADVERTISED_10baseT_Half |
4316                               ADVERTISED_10baseT_Full;
4317                         if (tg3_flag(tp, WOL_SPEED_100MB))
4318                                 adv |= ADVERTISED_100baseT_Half |
4319                                        ADVERTISED_100baseT_Full;
4320                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4321                                 adv |= ADVERTISED_1000baseT_Half |
4322                                        ADVERTISED_1000baseT_Full;
4323
4324                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4325                 } else {
4326                         adv = tp->link_config.advertising;
4327                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4328                                 adv &= ~(ADVERTISED_1000baseT_Half |
4329                                          ADVERTISED_1000baseT_Full);
4330
4331                         fc = tp->link_config.flowctrl;
4332                 }
4333
4334                 tg3_phy_autoneg_cfg(tp, adv, fc);
4335
4336                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4337                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4338                         /* Normally during power down we want to autonegotiate
4339                          * the lowest possible speed for WOL. However, to avoid
4340                          * link flap, we leave it untouched.
4341                          */
4342                         return;
4343                 }
4344
4345                 tg3_writephy(tp, MII_BMCR,
4346                              BMCR_ANENABLE | BMCR_ANRESTART);
4347         } else {
4348                 int i;
4349                 u32 bmcr, orig_bmcr;
4350
4351                 tp->link_config.active_speed = tp->link_config.speed;
4352                 tp->link_config.active_duplex = tp->link_config.duplex;
4353
4354                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4355                         /* With autoneg disabled, 5715 only links up when the
4356                          * advertisement register has the configured speed
4357                          * enabled.
4358                          */
4359                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4360                 }
4361
4362                 bmcr = 0;
4363                 switch (tp->link_config.speed) {
4364                 default:
4365                 case SPEED_10:
4366                         break;
4367
4368                 case SPEED_100:
4369                         bmcr |= BMCR_SPEED100;
4370                         break;
4371
4372                 case SPEED_1000:
4373                         bmcr |= BMCR_SPEED1000;
4374                         break;
4375                 }
4376
4377                 if (tp->link_config.duplex == DUPLEX_FULL)
4378                         bmcr |= BMCR_FULLDPLX;
4379
4380                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4381                     (bmcr != orig_bmcr)) {
4382                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4383                         for (i = 0; i < 1500; i++) {
4384                                 u32 tmp;
4385
4386                                 udelay(10);
4387                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4388                                     tg3_readphy(tp, MII_BMSR, &tmp))
4389                                         continue;
4390                                 if (!(tmp & BMSR_LSTATUS)) {
4391                                         udelay(40);
4392                                         break;
4393                                 }
4394                         }
4395                         tg3_writephy(tp, MII_BMCR, bmcr);
4396                         udelay(40);
4397                 }
4398         }
4399 }
4400
4401 static int tg3_phy_pull_config(struct tg3 *tp)
4402 {
4403         int err;
4404         u32 val;
4405
4406         err = tg3_readphy(tp, MII_BMCR, &val);
4407         if (err)
4408                 goto done;
4409
4410         if (!(val & BMCR_ANENABLE)) {
4411                 tp->link_config.autoneg = AUTONEG_DISABLE;
4412                 tp->link_config.advertising = 0;
4413                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4414
4415                 err = -EIO;
4416
4417                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4418                 case 0:
4419                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4420                                 goto done;
4421
4422                         tp->link_config.speed = SPEED_10;
4423                         break;
4424                 case BMCR_SPEED100:
4425                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4426                                 goto done;
4427
4428                         tp->link_config.speed = SPEED_100;
4429                         break;
4430                 case BMCR_SPEED1000:
4431                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4432                                 tp->link_config.speed = SPEED_1000;
4433                                 break;
4434                         }
4435                         /* Fall through */
4436                 default:
4437                         goto done;
4438                 }
4439
4440                 if (val & BMCR_FULLDPLX)
4441                         tp->link_config.duplex = DUPLEX_FULL;
4442                 else
4443                         tp->link_config.duplex = DUPLEX_HALF;
4444
4445                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4446
4447                 err = 0;
4448                 goto done;
4449         }
4450
4451         tp->link_config.autoneg = AUTONEG_ENABLE;
4452         tp->link_config.advertising = ADVERTISED_Autoneg;
4453         tg3_flag_set(tp, PAUSE_AUTONEG);
4454
4455         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4456                 u32 adv;
4457
4458                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4459                 if (err)
4460                         goto done;
4461
4462                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4463                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4464
4465                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4466         } else {
4467                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4468         }
4469
4470         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4471                 u32 adv;
4472
4473                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4474                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4475                         if (err)
4476                                 goto done;
4477
4478                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4479                 } else {
4480                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4481                         if (err)
4482                                 goto done;
4483
4484                         adv = tg3_decode_flowctrl_1000X(val);
4485                         tp->link_config.flowctrl = adv;
4486
4487                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4488                         adv = mii_adv_to_ethtool_adv_x(val);
4489                 }
4490
4491                 tp->link_config.advertising |= adv;
4492         }
4493
4494 done:
4495         return err;
4496 }
4497
4498 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4499 {
4500         int err;
4501
4502         /* Turn off tap power management. */
4503         /* Set Extended packet length bit */
4504         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4505
4506         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4507         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4508         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4509         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4510         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4511
4512         udelay(40);
4513
4514         return err;
4515 }
4516
4517 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4518 {
4519         u32 val;
4520         u32 tgtadv = 0;
4521         u32 advertising = tp->link_config.advertising;
4522
4523         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4524                 return true;
4525
4526         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
4527                 return false;
4528
4529         val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
4530
4531
4532         if (advertising & ADVERTISED_100baseT_Full)
4533                 tgtadv |= MDIO_AN_EEE_ADV_100TX;
4534         if (advertising & ADVERTISED_1000baseT_Full)
4535                 tgtadv |= MDIO_AN_EEE_ADV_1000T;
4536
4537         if (val != tgtadv)
4538                 return false;
4539
4540         return true;
4541 }
4542
4543 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4544 {
4545         u32 advmsk, tgtadv, advertising;
4546
4547         advertising = tp->link_config.advertising;
4548         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4549
4550         advmsk = ADVERTISE_ALL;
4551         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4552                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4553                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4554         }
4555
4556         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4557                 return false;
4558
4559         if ((*lcladv & advmsk) != tgtadv)
4560                 return false;
4561
4562         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4563                 u32 tg3_ctrl;
4564
4565                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4566
4567                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4568                         return false;
4569
4570                 if (tgtadv &&
4571                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4572                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4573                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4574                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4575                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4576                 } else {
4577                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4578                 }
4579
4580                 if (tg3_ctrl != tgtadv)
4581                         return false;
4582         }
4583
4584         return true;
4585 }
4586
4587 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4588 {
4589         u32 lpeth = 0;
4590
4591         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4592                 u32 val;
4593
4594                 if (tg3_readphy(tp, MII_STAT1000, &val))
4595                         return false;
4596
4597                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4598         }
4599
4600         if (tg3_readphy(tp, MII_LPA, rmtadv))
4601                 return false;
4602
4603         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4604         tp->link_config.rmt_adv = lpeth;
4605
4606         return true;
4607 }
4608
4609 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4610 {
4611         if (curr_link_up != tp->link_up) {
4612                 if (curr_link_up) {
4613                         netif_carrier_on(tp->dev);
4614                 } else {
4615                         netif_carrier_off(tp->dev);
4616                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4617                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4618                 }
4619
4620                 tg3_link_report(tp);
4621                 return true;
4622         }
4623
4624         return false;
4625 }
4626
4627 static void tg3_clear_mac_status(struct tg3 *tp)
4628 {
4629         tw32(MAC_EVENT, 0);
4630
4631         tw32_f(MAC_STATUS,
4632                MAC_STATUS_SYNC_CHANGED |
4633                MAC_STATUS_CFG_CHANGED |
4634                MAC_STATUS_MI_COMPLETION |
4635                MAC_STATUS_LNKSTATE_CHANGED);
4636         udelay(40);
4637 }
4638
4639 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4640 {
4641         bool current_link_up;
4642         u32 bmsr, val;
4643         u32 lcl_adv, rmt_adv;
4644         u16 current_speed;
4645         u8 current_duplex;
4646         int i, err;
4647
4648         tg3_clear_mac_status(tp);
4649
4650         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4651                 tw32_f(MAC_MI_MODE,
4652                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4653                 udelay(80);
4654         }
4655
4656         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4657
4658         /* Some third-party PHYs need to be reset on link going
4659          * down.
4660          */
4661         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4662              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4663              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4664             tp->link_up) {
4665                 tg3_readphy(tp, MII_BMSR, &bmsr);
4666                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4667                     !(bmsr & BMSR_LSTATUS))
4668                         force_reset = true;
4669         }
4670         if (force_reset)
4671                 tg3_phy_reset(tp);
4672
4673         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4674                 tg3_readphy(tp, MII_BMSR, &bmsr);
4675                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4676                     !tg3_flag(tp, INIT_COMPLETE))
4677                         bmsr = 0;
4678
4679                 if (!(bmsr & BMSR_LSTATUS)) {
4680                         err = tg3_init_5401phy_dsp(tp);
4681                         if (err)
4682                                 return err;
4683
4684                         tg3_readphy(tp, MII_BMSR, &bmsr);
4685                         for (i = 0; i < 1000; i++) {
4686                                 udelay(10);
4687                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4688                                     (bmsr & BMSR_LSTATUS)) {
4689                                         udelay(40);
4690                                         break;
4691                                 }
4692                         }
4693
4694                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4695                             TG3_PHY_REV_BCM5401_B0 &&
4696                             !(bmsr & BMSR_LSTATUS) &&
4697                             tp->link_config.active_speed == SPEED_1000) {
4698                                 err = tg3_phy_reset(tp);
4699                                 if (!err)
4700                                         err = tg3_init_5401phy_dsp(tp);
4701                                 if (err)
4702                                         return err;
4703                         }
4704                 }
4705         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4706                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4707                 /* 5701 {A0,B0} CRC bug workaround */
4708                 tg3_writephy(tp, 0x15, 0x0a75);
4709                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4710                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4711                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4712         }
4713
4714         /* Clear pending interrupts... */
4715         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4716         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4717
4718         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4719                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4720         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4721                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4722
4723         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4724             tg3_asic_rev(tp) == ASIC_REV_5701) {
4725                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4726                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4727                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4728                 else
4729                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4730         }
4731
4732         current_link_up = false;
4733         current_speed = SPEED_UNKNOWN;
4734         current_duplex = DUPLEX_UNKNOWN;
4735         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4736         tp->link_config.rmt_adv = 0;
4737
4738         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4739                 err = tg3_phy_auxctl_read(tp,
4740                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4741                                           &val);
4742                 if (!err && !(val & (1 << 10))) {
4743                         tg3_phy_auxctl_write(tp,
4744                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4745                                              val | (1 << 10));
4746                         goto relink;
4747                 }
4748         }
4749
4750         bmsr = 0;
4751         for (i = 0; i < 100; i++) {
4752                 tg3_readphy(tp, MII_BMSR, &bmsr);
4753                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4754                     (bmsr & BMSR_LSTATUS))
4755                         break;
4756                 udelay(40);
4757         }
4758
4759         if (bmsr & BMSR_LSTATUS) {
4760                 u32 aux_stat, bmcr;
4761
4762                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4763                 for (i = 0; i < 2000; i++) {
4764                         udelay(10);
4765                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4766                             aux_stat)
4767                                 break;
4768                 }
4769
4770                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4771                                              &current_speed,
4772                                              &current_duplex);
4773
4774                 bmcr = 0;
4775                 for (i = 0; i < 200; i++) {
4776                         tg3_readphy(tp, MII_BMCR, &bmcr);
4777                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4778                                 continue;
4779                         if (bmcr && bmcr != 0x7fff)
4780                                 break;
4781                         udelay(10);
4782                 }
4783
4784                 lcl_adv = 0;
4785                 rmt_adv = 0;
4786
4787                 tp->link_config.active_speed = current_speed;
4788                 tp->link_config.active_duplex = current_duplex;
4789
4790                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4791                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4792
4793                         if ((bmcr & BMCR_ANENABLE) &&
4794                             eee_config_ok &&
4795                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4796                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4797                                 current_link_up = true;
4798
4799                         /* EEE settings changes take effect only after a phy
4800                          * reset.  If we have skipped a reset due to Link Flap
4801                          * Avoidance being enabled, do it now.
4802                          */
4803                         if (!eee_config_ok &&
4804                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4805                             !force_reset)
4806                                 tg3_phy_reset(tp);
4807                 } else {
4808                         if (!(bmcr & BMCR_ANENABLE) &&
4809                             tp->link_config.speed == current_speed &&
4810                             tp->link_config.duplex == current_duplex) {
4811                                 current_link_up = true;
4812                         }
4813                 }
4814
4815                 if (current_link_up &&
4816                     tp->link_config.active_duplex == DUPLEX_FULL) {
4817                         u32 reg, bit;
4818
4819                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4820                                 reg = MII_TG3_FET_GEN_STAT;
4821                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4822                         } else {
4823                                 reg = MII_TG3_EXT_STAT;
4824                                 bit = MII_TG3_EXT_STAT_MDIX;
4825                         }
4826
4827                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4828                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4829
4830                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4831                 }
4832         }
4833
4834 relink:
4835         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4836                 tg3_phy_copper_begin(tp);
4837
4838                 if (tg3_flag(tp, ROBOSWITCH)) {
4839                         current_link_up = true;
4840                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4841                         current_speed = SPEED_1000;
4842                         current_duplex = DUPLEX_FULL;
4843                         tp->link_config.active_speed = current_speed;
4844                         tp->link_config.active_duplex = current_duplex;
4845                 }
4846
4847                 tg3_readphy(tp, MII_BMSR, &bmsr);
4848                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4849                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4850                         current_link_up = true;
4851         }
4852
4853         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4854         if (current_link_up) {
4855                 if (tp->link_config.active_speed == SPEED_100 ||
4856                     tp->link_config.active_speed == SPEED_10)
4857                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4858                 else
4859                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4860         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4861                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4862         else
4863                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4864
4865         /* In order for the 5750 core in BCM4785 chip to work properly
4866          * in RGMII mode, the Led Control Register must be set up.
4867          */
4868         if (tg3_flag(tp, RGMII_MODE)) {
4869                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4870                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4871
4872                 if (tp->link_config.active_speed == SPEED_10)
4873                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4874                 else if (tp->link_config.active_speed == SPEED_100)
4875                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4876                                      LED_CTRL_100MBPS_ON);
4877                 else if (tp->link_config.active_speed == SPEED_1000)
4878                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4879                                      LED_CTRL_1000MBPS_ON);
4880
4881                 tw32(MAC_LED_CTRL, led_ctrl);
4882                 udelay(40);
4883         }
4884
4885         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4886         if (tp->link_config.active_duplex == DUPLEX_HALF)
4887                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4888
4889         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4890                 if (current_link_up &&
4891                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4892                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4893                 else
4894                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4895         }
4896
4897         /* ??? Without this setting Netgear GA302T PHY does not
4898          * ??? send/receive packets...
4899          */
4900         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4901             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4902                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4903                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4904                 udelay(80);
4905         }
4906
4907         tw32_f(MAC_MODE, tp->mac_mode);
4908         udelay(40);
4909
4910         tg3_phy_eee_adjust(tp, current_link_up);
4911
4912         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4913                 /* Polled via timer. */
4914                 tw32_f(MAC_EVENT, 0);
4915         } else {
4916                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4917         }
4918         udelay(40);
4919
4920         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4921             current_link_up &&
4922             tp->link_config.active_speed == SPEED_1000 &&
4923             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4924                 udelay(120);
4925                 tw32_f(MAC_STATUS,
4926                      (MAC_STATUS_SYNC_CHANGED |
4927                       MAC_STATUS_CFG_CHANGED));
4928                 udelay(40);
4929                 tg3_write_mem(tp,
4930                               NIC_SRAM_FIRMWARE_MBOX,
4931                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4932         }
4933
4934         /* Prevent send BD corruption. */
4935         if (tg3_flag(tp, CLKREQ_BUG)) {
4936                 if (tp->link_config.active_speed == SPEED_100 ||
4937                     tp->link_config.active_speed == SPEED_10)
4938                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4939                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4940                 else
4941                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4942                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4943         }
4944
4945         tg3_test_and_report_link_chg(tp, current_link_up);
4946
4947         return 0;
4948 }
4949
4950 struct tg3_fiber_aneginfo {
4951         int state;
4952 #define ANEG_STATE_UNKNOWN              0
4953 #define ANEG_STATE_AN_ENABLE            1
4954 #define ANEG_STATE_RESTART_INIT         2
4955 #define ANEG_STATE_RESTART              3
4956 #define ANEG_STATE_DISABLE_LINK_OK      4
4957 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4958 #define ANEG_STATE_ABILITY_DETECT       6
4959 #define ANEG_STATE_ACK_DETECT_INIT      7
4960 #define ANEG_STATE_ACK_DETECT           8
4961 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4962 #define ANEG_STATE_COMPLETE_ACK         10
4963 #define ANEG_STATE_IDLE_DETECT_INIT     11
4964 #define ANEG_STATE_IDLE_DETECT          12
4965 #define ANEG_STATE_LINK_OK              13
4966 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4967 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4968
4969         u32 flags;
4970 #define MR_AN_ENABLE            0x00000001
4971 #define MR_RESTART_AN           0x00000002
4972 #define MR_AN_COMPLETE          0x00000004
4973 #define MR_PAGE_RX              0x00000008
4974 #define MR_NP_LOADED            0x00000010
4975 #define MR_TOGGLE_TX            0x00000020
4976 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4977 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4978 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4979 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4980 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4981 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4982 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4983 #define MR_TOGGLE_RX            0x00002000
4984 #define MR_NP_RX                0x00004000
4985
4986 #define MR_LINK_OK              0x80000000
4987
4988         unsigned long link_time, cur_time;
4989
4990         u32 ability_match_cfg;
4991         int ability_match_count;
4992
4993         char ability_match, idle_match, ack_match;
4994
4995         u32 txconfig, rxconfig;
4996 #define ANEG_CFG_NP             0x00000080
4997 #define ANEG_CFG_ACK            0x00000040
4998 #define ANEG_CFG_RF2            0x00000020
4999 #define ANEG_CFG_RF1            0x00000010
5000 #define ANEG_CFG_PS2            0x00000001
5001 #define ANEG_CFG_PS1            0x00008000
5002 #define ANEG_CFG_HD             0x00004000
5003 #define ANEG_CFG_FD             0x00002000
5004 #define ANEG_CFG_INVAL          0x00001f06
5005
5006 };
5007 #define ANEG_OK         0
5008 #define ANEG_DONE       1
5009 #define ANEG_TIMER_ENAB 2
5010 #define ANEG_FAILED     -1
5011
5012 #define ANEG_STATE_SETTLE_TIME  10000
5013
5014 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5015                                    struct tg3_fiber_aneginfo *ap)
5016 {
5017         u16 flowctrl;
5018         unsigned long delta;
5019         u32 rx_cfg_reg;
5020         int ret;
5021
5022         if (ap->state == ANEG_STATE_UNKNOWN) {
5023                 ap->rxconfig = 0;
5024                 ap->link_time = 0;
5025                 ap->cur_time = 0;
5026                 ap->ability_match_cfg = 0;
5027                 ap->ability_match_count = 0;
5028                 ap->ability_match = 0;
5029                 ap->idle_match = 0;
5030                 ap->ack_match = 0;
5031         }
5032         ap->cur_time++;
5033
5034         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5035                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5036
5037                 if (rx_cfg_reg != ap->ability_match_cfg) {
5038                         ap->ability_match_cfg = rx_cfg_reg;
5039                         ap->ability_match = 0;
5040                         ap->ability_match_count = 0;
5041                 } else {
5042                         if (++ap->ability_match_count > 1) {
5043                                 ap->ability_match = 1;
5044                                 ap->ability_match_cfg = rx_cfg_reg;
5045                         }
5046                 }
5047                 if (rx_cfg_reg & ANEG_CFG_ACK)
5048                         ap->ack_match = 1;
5049                 else
5050                         ap->ack_match = 0;
5051
5052                 ap->idle_match = 0;
5053         } else {
5054                 ap->idle_match = 1;
5055                 ap->ability_match_cfg = 0;
5056                 ap->ability_match_count = 0;
5057                 ap->ability_match = 0;
5058                 ap->ack_match = 0;
5059
5060                 rx_cfg_reg = 0;
5061         }
5062
5063         ap->rxconfig = rx_cfg_reg;
5064         ret = ANEG_OK;
5065
5066         switch (ap->state) {
5067         case ANEG_STATE_UNKNOWN:
5068                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5069                         ap->state = ANEG_STATE_AN_ENABLE;
5070
5071                 /* fallthru */
5072         case ANEG_STATE_AN_ENABLE:
5073                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5074                 if (ap->flags & MR_AN_ENABLE) {
5075                         ap->link_time = 0;
5076                         ap->cur_time = 0;
5077                         ap->ability_match_cfg = 0;
5078                         ap->ability_match_count = 0;
5079                         ap->ability_match = 0;
5080                         ap->idle_match = 0;
5081                         ap->ack_match = 0;
5082
5083                         ap->state = ANEG_STATE_RESTART_INIT;
5084                 } else {
5085                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5086                 }
5087                 break;
5088
5089         case ANEG_STATE_RESTART_INIT:
5090                 ap->link_time = ap->cur_time;
5091                 ap->flags &= ~(MR_NP_LOADED);
5092                 ap->txconfig = 0;
5093                 tw32(MAC_TX_AUTO_NEG, 0);
5094                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5095                 tw32_f(MAC_MODE, tp->mac_mode);
5096                 udelay(40);
5097
5098                 ret = ANEG_TIMER_ENAB;
5099                 ap->state = ANEG_STATE_RESTART;
5100
5101                 /* fallthru */
5102         case ANEG_STATE_RESTART:
5103                 delta = ap->cur_time - ap->link_time;
5104                 if (delta > ANEG_STATE_SETTLE_TIME)
5105                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5106                 else
5107                         ret = ANEG_TIMER_ENAB;
5108                 break;
5109
5110         case ANEG_STATE_DISABLE_LINK_OK:
5111                 ret = ANEG_DONE;
5112                 break;
5113
5114         case ANEG_STATE_ABILITY_DETECT_INIT:
5115                 ap->flags &= ~(MR_TOGGLE_TX);
5116                 ap->txconfig = ANEG_CFG_FD;
5117                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5118                 if (flowctrl & ADVERTISE_1000XPAUSE)
5119                         ap->txconfig |= ANEG_CFG_PS1;
5120                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5121                         ap->txconfig |= ANEG_CFG_PS2;
5122                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5123                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5124                 tw32_f(MAC_MODE, tp->mac_mode);
5125                 udelay(40);
5126
5127                 ap->state = ANEG_STATE_ABILITY_DETECT;
5128                 break;
5129
5130         case ANEG_STATE_ABILITY_DETECT:
5131                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5132                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5133                 break;
5134
5135         case ANEG_STATE_ACK_DETECT_INIT:
5136                 ap->txconfig |= ANEG_CFG_ACK;
5137                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5138                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5139                 tw32_f(MAC_MODE, tp->mac_mode);
5140                 udelay(40);
5141
5142                 ap->state = ANEG_STATE_ACK_DETECT;
5143
5144                 /* fallthru */
5145         case ANEG_STATE_ACK_DETECT:
5146                 if (ap->ack_match != 0) {
5147                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5148                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5149                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5150                         } else {
5151                                 ap->state = ANEG_STATE_AN_ENABLE;
5152                         }
5153                 } else if (ap->ability_match != 0 &&
5154                            ap->rxconfig == 0) {
5155                         ap->state = ANEG_STATE_AN_ENABLE;
5156                 }
5157                 break;
5158
5159         case ANEG_STATE_COMPLETE_ACK_INIT:
5160                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5161                         ret = ANEG_FAILED;
5162                         break;
5163                 }
5164                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5165                                MR_LP_ADV_HALF_DUPLEX |
5166                                MR_LP_ADV_SYM_PAUSE |
5167                                MR_LP_ADV_ASYM_PAUSE |
5168                                MR_LP_ADV_REMOTE_FAULT1 |
5169                                MR_LP_ADV_REMOTE_FAULT2 |
5170                                MR_LP_ADV_NEXT_PAGE |
5171                                MR_TOGGLE_RX |
5172                                MR_NP_RX);
5173                 if (ap->rxconfig & ANEG_CFG_FD)
5174                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5175                 if (ap->rxconfig & ANEG_CFG_HD)
5176                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5177                 if (ap->rxconfig & ANEG_CFG_PS1)
5178                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5179                 if (ap->rxconfig & ANEG_CFG_PS2)
5180                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5181                 if (ap->rxconfig & ANEG_CFG_RF1)
5182                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5183                 if (ap->rxconfig & ANEG_CFG_RF2)
5184                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5185                 if (ap->rxconfig & ANEG_CFG_NP)
5186                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5187
5188                 ap->link_time = ap->cur_time;
5189
5190                 ap->flags ^= (MR_TOGGLE_TX);
5191                 if (ap->rxconfig & 0x0008)
5192                         ap->flags |= MR_TOGGLE_RX;
5193                 if (ap->rxconfig & ANEG_CFG_NP)
5194                         ap->flags |= MR_NP_RX;
5195                 ap->flags |= MR_PAGE_RX;
5196
5197                 ap->state = ANEG_STATE_COMPLETE_ACK;
5198                 ret = ANEG_TIMER_ENAB;
5199                 break;
5200
5201         case ANEG_STATE_COMPLETE_ACK:
5202                 if (ap->ability_match != 0 &&
5203                     ap->rxconfig == 0) {
5204                         ap->state = ANEG_STATE_AN_ENABLE;
5205                         break;
5206                 }
5207                 delta = ap->cur_time - ap->link_time;
5208                 if (delta > ANEG_STATE_SETTLE_TIME) {
5209                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5210                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5211                         } else {
5212                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5213                                     !(ap->flags & MR_NP_RX)) {
5214                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5215                                 } else {
5216                                         ret = ANEG_FAILED;
5217                                 }
5218                         }
5219                 }
5220                 break;
5221
5222         case ANEG_STATE_IDLE_DETECT_INIT:
5223                 ap->link_time = ap->cur_time;
5224                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5225                 tw32_f(MAC_MODE, tp->mac_mode);
5226                 udelay(40);
5227
5228                 ap->state = ANEG_STATE_IDLE_DETECT;
5229                 ret = ANEG_TIMER_ENAB;
5230                 break;
5231
5232         case ANEG_STATE_IDLE_DETECT:
5233                 if (ap->ability_match != 0 &&
5234                     ap->rxconfig == 0) {
5235                         ap->state = ANEG_STATE_AN_ENABLE;
5236                         break;
5237                 }
5238                 delta = ap->cur_time - ap->link_time;
5239                 if (delta > ANEG_STATE_SETTLE_TIME) {
5240                         /* XXX another gem from the Broadcom driver :( */
5241                         ap->state = ANEG_STATE_LINK_OK;
5242                 }
5243                 break;
5244
5245         case ANEG_STATE_LINK_OK:
5246                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5247                 ret = ANEG_DONE;
5248                 break;
5249
5250         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5251                 /* ??? unimplemented */
5252                 break;
5253
5254         case ANEG_STATE_NEXT_PAGE_WAIT:
5255                 /* ??? unimplemented */
5256                 break;
5257
5258         default:
5259                 ret = ANEG_FAILED;
5260                 break;
5261         }
5262
5263         return ret;
5264 }
5265
5266 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5267 {
5268         int res = 0;
5269         struct tg3_fiber_aneginfo aninfo;
5270         int status = ANEG_FAILED;
5271         unsigned int tick;
5272         u32 tmp;
5273
5274         tw32_f(MAC_TX_AUTO_NEG, 0);
5275
5276         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5277         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5278         udelay(40);
5279
5280         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5281         udelay(40);
5282
5283         memset(&aninfo, 0, sizeof(aninfo));
5284         aninfo.flags |= MR_AN_ENABLE;
5285         aninfo.state = ANEG_STATE_UNKNOWN;
5286         aninfo.cur_time = 0;
5287         tick = 0;
5288         while (++tick < 195000) {
5289                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5290                 if (status == ANEG_DONE || status == ANEG_FAILED)
5291                         break;
5292
5293                 udelay(1);
5294         }
5295
5296         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5297         tw32_f(MAC_MODE, tp->mac_mode);
5298         udelay(40);
5299
5300         *txflags = aninfo.txconfig;
5301         *rxflags = aninfo.flags;
5302
5303         if (status == ANEG_DONE &&
5304             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5305                              MR_LP_ADV_FULL_DUPLEX)))
5306                 res = 1;
5307
5308         return res;
5309 }
5310
5311 static void tg3_init_bcm8002(struct tg3 *tp)
5312 {
5313         u32 mac_status = tr32(MAC_STATUS);
5314         int i;
5315
5316         /* Reset when initting first time or we have a link. */
5317         if (tg3_flag(tp, INIT_COMPLETE) &&
5318             !(mac_status & MAC_STATUS_PCS_SYNCED))
5319                 return;
5320
5321         /* Set PLL lock range. */
5322         tg3_writephy(tp, 0x16, 0x8007);
5323
5324         /* SW reset */
5325         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5326
5327         /* Wait for reset to complete. */
5328         /* XXX schedule_timeout() ... */
5329         for (i = 0; i < 500; i++)
5330                 udelay(10);
5331
5332         /* Config mode; select PMA/Ch 1 regs. */
5333         tg3_writephy(tp, 0x10, 0x8411);
5334
5335         /* Enable auto-lock and comdet, select txclk for tx. */
5336         tg3_writephy(tp, 0x11, 0x0a10);
5337
5338         tg3_writephy(tp, 0x18, 0x00a0);
5339         tg3_writephy(tp, 0x16, 0x41ff);
5340
5341         /* Assert and deassert POR. */
5342         tg3_writephy(tp, 0x13, 0x0400);
5343         udelay(40);
5344         tg3_writephy(tp, 0x13, 0x0000);
5345
5346         tg3_writephy(tp, 0x11, 0x0a50);
5347         udelay(40);
5348         tg3_writephy(tp, 0x11, 0x0a10);
5349
5350         /* Wait for signal to stabilize */
5351         /* XXX schedule_timeout() ... */
5352         for (i = 0; i < 15000; i++)
5353                 udelay(10);
5354
5355         /* Deselect the channel register so we can read the PHYID
5356          * later.
5357          */
5358         tg3_writephy(tp, 0x10, 0x8011);
5359 }
5360
5361 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5362 {
5363         u16 flowctrl;
5364         bool current_link_up;
5365         u32 sg_dig_ctrl, sg_dig_status;
5366         u32 serdes_cfg, expected_sg_dig_ctrl;
5367         int workaround, port_a;
5368
5369         serdes_cfg = 0;
5370         expected_sg_dig_ctrl = 0;
5371         workaround = 0;
5372         port_a = 1;
5373         current_link_up = false;
5374
5375         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5376             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5377                 workaround = 1;
5378                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5379                         port_a = 0;
5380
5381                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5382                 /* preserve bits 20-23 for voltage regulator */
5383                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5384         }
5385
5386         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5387
5388         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5389                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5390                         if (workaround) {
5391                                 u32 val = serdes_cfg;
5392
5393                                 if (port_a)
5394                                         val |= 0xc010000;
5395                                 else
5396                                         val |= 0x4010000;
5397                                 tw32_f(MAC_SERDES_CFG, val);
5398                         }
5399
5400                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5401                 }
5402                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5403                         tg3_setup_flow_control(tp, 0, 0);
5404                         current_link_up = true;
5405                 }
5406                 goto out;
5407         }
5408
5409         /* Want auto-negotiation.  */
5410         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5411
5412         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5413         if (flowctrl & ADVERTISE_1000XPAUSE)
5414                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5415         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5416                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5417
5418         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5419                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5420                     tp->serdes_counter &&
5421                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5422                                     MAC_STATUS_RCVD_CFG)) ==
5423                      MAC_STATUS_PCS_SYNCED)) {
5424                         tp->serdes_counter--;
5425                         current_link_up = true;
5426                         goto out;
5427                 }
5428 restart_autoneg:
5429                 if (workaround)
5430                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5431                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5432                 udelay(5);
5433                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5434
5435                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5436                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5437         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5438                                  MAC_STATUS_SIGNAL_DET)) {
5439                 sg_dig_status = tr32(SG_DIG_STATUS);
5440                 mac_status = tr32(MAC_STATUS);
5441
5442                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5443                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5444                         u32 local_adv = 0, remote_adv = 0;
5445
5446                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5447                                 local_adv |= ADVERTISE_1000XPAUSE;
5448                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5449                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5450
5451                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5452                                 remote_adv |= LPA_1000XPAUSE;
5453                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5454                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5455
5456                         tp->link_config.rmt_adv =
5457                                            mii_adv_to_ethtool_adv_x(remote_adv);
5458
5459                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5460                         current_link_up = true;
5461                         tp->serdes_counter = 0;
5462                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5463                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5464                         if (tp->serdes_counter)
5465                                 tp->serdes_counter--;
5466                         else {
5467                                 if (workaround) {
5468                                         u32 val = serdes_cfg;
5469
5470                                         if (port_a)
5471                                                 val |= 0xc010000;
5472                                         else
5473                                                 val |= 0x4010000;
5474
5475                                         tw32_f(MAC_SERDES_CFG, val);
5476                                 }
5477
5478                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5479                                 udelay(40);
5480
5481                                 /* Link parallel detection - link is up */
5482                                 /* only if we have PCS_SYNC and not */
5483                                 /* receiving config code words */
5484                                 mac_status = tr32(MAC_STATUS);
5485                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5486                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5487                                         tg3_setup_flow_control(tp, 0, 0);
5488                                         current_link_up = true;
5489                                         tp->phy_flags |=
5490                                                 TG3_PHYFLG_PARALLEL_DETECT;
5491                                         tp->serdes_counter =
5492                                                 SERDES_PARALLEL_DET_TIMEOUT;
5493                                 } else
5494                                         goto restart_autoneg;
5495                         }
5496                 }
5497         } else {
5498                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5499                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5500         }
5501
5502 out:
5503         return current_link_up;
5504 }
5505
5506 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5507 {
5508         bool current_link_up = false;
5509
5510         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5511                 goto out;
5512
5513         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5514                 u32 txflags, rxflags;
5515                 int i;
5516
5517                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5518                         u32 local_adv = 0, remote_adv = 0;
5519
5520                         if (txflags & ANEG_CFG_PS1)
5521                                 local_adv |= ADVERTISE_1000XPAUSE;
5522                         if (txflags & ANEG_CFG_PS2)
5523                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5524
5525                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5526                                 remote_adv |= LPA_1000XPAUSE;
5527                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5528                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5529
5530                         tp->link_config.rmt_adv =
5531                                            mii_adv_to_ethtool_adv_x(remote_adv);
5532
5533                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5534
5535                         current_link_up = true;
5536                 }
5537                 for (i = 0; i < 30; i++) {
5538                         udelay(20);
5539                         tw32_f(MAC_STATUS,
5540                                (MAC_STATUS_SYNC_CHANGED |
5541                                 MAC_STATUS_CFG_CHANGED));
5542                         udelay(40);
5543                         if ((tr32(MAC_STATUS) &
5544                              (MAC_STATUS_SYNC_CHANGED |
5545                               MAC_STATUS_CFG_CHANGED)) == 0)
5546                                 break;
5547                 }
5548
5549                 mac_status = tr32(MAC_STATUS);
5550                 if (!current_link_up &&
5551                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5552                     !(mac_status & MAC_STATUS_RCVD_CFG))
5553                         current_link_up = true;
5554         } else {
5555                 tg3_setup_flow_control(tp, 0, 0);
5556
5557                 /* Forcing 1000FD link up. */
5558                 current_link_up = true;
5559
5560                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5561                 udelay(40);
5562
5563                 tw32_f(MAC_MODE, tp->mac_mode);
5564                 udelay(40);
5565         }
5566
5567 out:
5568         return current_link_up;
5569 }
5570
5571 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5572 {
5573         u32 orig_pause_cfg;
5574         u16 orig_active_speed;
5575         u8 orig_active_duplex;
5576         u32 mac_status;
5577         bool current_link_up;
5578         int i;
5579
5580         orig_pause_cfg = tp->link_config.active_flowctrl;
5581         orig_active_speed = tp->link_config.active_speed;
5582         orig_active_duplex = tp->link_config.active_duplex;
5583
5584         if (!tg3_flag(tp, HW_AUTONEG) &&
5585             tp->link_up &&
5586             tg3_flag(tp, INIT_COMPLETE)) {
5587                 mac_status = tr32(MAC_STATUS);
5588                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5589                                MAC_STATUS_SIGNAL_DET |
5590                                MAC_STATUS_CFG_CHANGED |
5591                                MAC_STATUS_RCVD_CFG);
5592                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5593                                    MAC_STATUS_SIGNAL_DET)) {
5594                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5595                                             MAC_STATUS_CFG_CHANGED));
5596                         return 0;
5597                 }
5598         }
5599
5600         tw32_f(MAC_TX_AUTO_NEG, 0);
5601
5602         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5603         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5604         tw32_f(MAC_MODE, tp->mac_mode);
5605         udelay(40);
5606
5607         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5608                 tg3_init_bcm8002(tp);
5609
5610         /* Enable link change event even when serdes polling.  */
5611         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5612         udelay(40);
5613
5614         current_link_up = false;
5615         tp->link_config.rmt_adv = 0;
5616         mac_status = tr32(MAC_STATUS);
5617
5618         if (tg3_flag(tp, HW_AUTONEG))
5619                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5620         else
5621                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5622
5623         tp->napi[0].hw_status->status =
5624                 (SD_STATUS_UPDATED |
5625                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5626
5627         for (i = 0; i < 100; i++) {
5628                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5629                                     MAC_STATUS_CFG_CHANGED));
5630                 udelay(5);
5631                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5632                                          MAC_STATUS_CFG_CHANGED |
5633                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5634                         break;
5635         }
5636
5637         mac_status = tr32(MAC_STATUS);
5638         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5639                 current_link_up = false;
5640                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5641                     tp->serdes_counter == 0) {
5642                         tw32_f(MAC_MODE, (tp->mac_mode |
5643                                           MAC_MODE_SEND_CONFIGS));
5644                         udelay(1);
5645                         tw32_f(MAC_MODE, tp->mac_mode);
5646                 }
5647         }
5648
5649         if (current_link_up) {
5650                 tp->link_config.active_speed = SPEED_1000;
5651                 tp->link_config.active_duplex = DUPLEX_FULL;
5652                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5653                                     LED_CTRL_LNKLED_OVERRIDE |
5654                                     LED_CTRL_1000MBPS_ON));
5655         } else {
5656                 tp->link_config.active_speed = SPEED_UNKNOWN;
5657                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5658                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5659                                     LED_CTRL_LNKLED_OVERRIDE |
5660                                     LED_CTRL_TRAFFIC_OVERRIDE));
5661         }
5662
5663         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5664                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5665                 if (orig_pause_cfg != now_pause_cfg ||
5666                     orig_active_speed != tp->link_config.active_speed ||
5667                     orig_active_duplex != tp->link_config.active_duplex)
5668                         tg3_link_report(tp);
5669         }
5670
5671         return 0;
5672 }
5673
5674 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5675 {
5676         int err = 0;
5677         u32 bmsr, bmcr;
5678         u16 current_speed = SPEED_UNKNOWN;
5679         u8 current_duplex = DUPLEX_UNKNOWN;
5680         bool current_link_up = false;
5681         u32 local_adv, remote_adv, sgsr;
5682
5683         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5684              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5685              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5686              (sgsr & SERDES_TG3_SGMII_MODE)) {
5687
5688                 if (force_reset)
5689                         tg3_phy_reset(tp);
5690
5691                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5692
5693                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5694                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5695                 } else {
5696                         current_link_up = true;
5697                         if (sgsr & SERDES_TG3_SPEED_1000) {
5698                                 current_speed = SPEED_1000;
5699                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5700                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5701                                 current_speed = SPEED_100;
5702                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5703                         } else {
5704                                 current_speed = SPEED_10;
5705                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5706                         }
5707
5708                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5709                                 current_duplex = DUPLEX_FULL;
5710                         else
5711                                 current_duplex = DUPLEX_HALF;
5712                 }
5713
5714                 tw32_f(MAC_MODE, tp->mac_mode);
5715                 udelay(40);
5716
5717                 tg3_clear_mac_status(tp);
5718
5719                 goto fiber_setup_done;
5720         }
5721
5722         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5723         tw32_f(MAC_MODE, tp->mac_mode);
5724         udelay(40);
5725
5726         tg3_clear_mac_status(tp);
5727
5728         if (force_reset)
5729                 tg3_phy_reset(tp);
5730
5731         tp->link_config.rmt_adv = 0;
5732
5733         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5734         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5735         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5736                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5737                         bmsr |= BMSR_LSTATUS;
5738                 else
5739                         bmsr &= ~BMSR_LSTATUS;
5740         }
5741
5742         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5743
5744         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5745             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5746                 /* do nothing, just check for link up at the end */
5747         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5748                 u32 adv, newadv;
5749
5750                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5751                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5752                                  ADVERTISE_1000XPAUSE |
5753                                  ADVERTISE_1000XPSE_ASYM |
5754                                  ADVERTISE_SLCT);
5755
5756                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5757                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5758
5759                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5760                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5761                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5762                         tg3_writephy(tp, MII_BMCR, bmcr);
5763
5764                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5765                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5766                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5767
5768                         return err;
5769                 }
5770         } else {
5771                 u32 new_bmcr;
5772
5773                 bmcr &= ~BMCR_SPEED1000;
5774                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5775
5776                 if (tp->link_config.duplex == DUPLEX_FULL)
5777                         new_bmcr |= BMCR_FULLDPLX;
5778
5779                 if (new_bmcr != bmcr) {
5780                         /* BMCR_SPEED1000 is a reserved bit that needs
5781                          * to be set on write.
5782                          */
5783                         new_bmcr |= BMCR_SPEED1000;
5784
5785                         /* Force a linkdown */
5786                         if (tp->link_up) {
5787                                 u32 adv;
5788
5789                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5790                                 adv &= ~(ADVERTISE_1000XFULL |
5791                                          ADVERTISE_1000XHALF |
5792                                          ADVERTISE_SLCT);
5793                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5794                                 tg3_writephy(tp, MII_BMCR, bmcr |
5795                                                            BMCR_ANRESTART |
5796                                                            BMCR_ANENABLE);
5797                                 udelay(10);
5798                                 tg3_carrier_off(tp);
5799                         }
5800                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5801                         bmcr = new_bmcr;
5802                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5803                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5804                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5805                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5806                                         bmsr |= BMSR_LSTATUS;
5807                                 else
5808                                         bmsr &= ~BMSR_LSTATUS;
5809                         }
5810                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5811                 }
5812         }
5813
5814         if (bmsr & BMSR_LSTATUS) {
5815                 current_speed = SPEED_1000;
5816                 current_link_up = true;
5817                 if (bmcr & BMCR_FULLDPLX)
5818                         current_duplex = DUPLEX_FULL;
5819                 else
5820                         current_duplex = DUPLEX_HALF;
5821
5822                 local_adv = 0;
5823                 remote_adv = 0;
5824
5825                 if (bmcr & BMCR_ANENABLE) {
5826                         u32 common;
5827
5828                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5829                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5830                         common = local_adv & remote_adv;
5831                         if (common & (ADVERTISE_1000XHALF |
5832                                       ADVERTISE_1000XFULL)) {
5833                                 if (common & ADVERTISE_1000XFULL)
5834                                         current_duplex = DUPLEX_FULL;
5835                                 else
5836                                         current_duplex = DUPLEX_HALF;
5837
5838                                 tp->link_config.rmt_adv =
5839                                            mii_adv_to_ethtool_adv_x(remote_adv);
5840                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5841                                 /* Link is up via parallel detect */
5842                         } else {
5843                                 current_link_up = false;
5844                         }
5845                 }
5846         }
5847
5848 fiber_setup_done:
5849         if (current_link_up && current_duplex == DUPLEX_FULL)
5850                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5851
5852         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5853         if (tp->link_config.active_duplex == DUPLEX_HALF)
5854                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5855
5856         tw32_f(MAC_MODE, tp->mac_mode);
5857         udelay(40);
5858
5859         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5860
5861         tp->link_config.active_speed = current_speed;
5862         tp->link_config.active_duplex = current_duplex;
5863
5864         tg3_test_and_report_link_chg(tp, current_link_up);
5865         return err;
5866 }
5867
5868 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5869 {
5870         if (tp->serdes_counter) {
5871                 /* Give autoneg time to complete. */
5872                 tp->serdes_counter--;
5873                 return;
5874         }
5875
5876         if (!tp->link_up &&
5877             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5878                 u32 bmcr;
5879
5880                 tg3_readphy(tp, MII_BMCR, &bmcr);
5881                 if (bmcr & BMCR_ANENABLE) {
5882                         u32 phy1, phy2;
5883
5884                         /* Select shadow register 0x1f */
5885                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5886                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5887
5888                         /* Select expansion interrupt status register */
5889                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5890                                          MII_TG3_DSP_EXP1_INT_STAT);
5891                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5892                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5893
5894                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5895                                 /* We have signal detect and not receiving
5896                                  * config code words, link is up by parallel
5897                                  * detection.
5898                                  */
5899
5900                                 bmcr &= ~BMCR_ANENABLE;
5901                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5902                                 tg3_writephy(tp, MII_BMCR, bmcr);
5903                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5904                         }
5905                 }
5906         } else if (tp->link_up &&
5907                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5908                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5909                 u32 phy2;
5910
5911                 /* Select expansion interrupt status register */
5912                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5913                                  MII_TG3_DSP_EXP1_INT_STAT);
5914                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5915                 if (phy2 & 0x20) {
5916                         u32 bmcr;
5917
5918                         /* Config code words received, turn on autoneg. */
5919                         tg3_readphy(tp, MII_BMCR, &bmcr);
5920                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5921
5922                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5923
5924                 }
5925         }
5926 }
5927
5928 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
5929 {
5930         u32 val;
5931         int err;
5932
5933         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5934                 err = tg3_setup_fiber_phy(tp, force_reset);
5935         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5936                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5937         else
5938                 err = tg3_setup_copper_phy(tp, force_reset);
5939
5940         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5941                 u32 scale;
5942
5943                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5944                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5945                         scale = 65;
5946                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5947                         scale = 6;
5948                 else
5949                         scale = 12;
5950
5951                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5952                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5953                 tw32(GRC_MISC_CFG, val);
5954         }
5955
5956         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5957               (6 << TX_LENGTHS_IPG_SHIFT);
5958         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5959             tg3_asic_rev(tp) == ASIC_REV_5762)
5960                 val |= tr32(MAC_TX_LENGTHS) &
5961                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5962                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5963
5964         if (tp->link_config.active_speed == SPEED_1000 &&
5965             tp->link_config.active_duplex == DUPLEX_HALF)
5966                 tw32(MAC_TX_LENGTHS, val |
5967                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5968         else
5969                 tw32(MAC_TX_LENGTHS, val |
5970                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5971
5972         if (!tg3_flag(tp, 5705_PLUS)) {
5973                 if (tp->link_up) {
5974                         tw32(HOSTCC_STAT_COAL_TICKS,
5975                              tp->coal.stats_block_coalesce_usecs);
5976                 } else {
5977                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5978                 }
5979         }
5980
5981         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5982                 val = tr32(PCIE_PWR_MGMT_THRESH);
5983                 if (!tp->link_up)
5984                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5985                               tp->pwrmgmt_thresh;
5986                 else
5987                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5988                 tw32(PCIE_PWR_MGMT_THRESH, val);
5989         }
5990
5991         return err;
5992 }
5993
5994 /* tp->lock must be held */
5995 static u64 tg3_refclk_read(struct tg3 *tp)
5996 {
5997         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5998         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5999 }
6000
6001 /* tp->lock must be held */
6002 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6003 {
6004         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
6005         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6006         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6007         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
6008 }
6009
6010 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6011 static inline void tg3_full_unlock(struct tg3 *tp);
6012 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6013 {
6014         struct tg3 *tp = netdev_priv(dev);
6015
6016         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6017                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6018                                 SOF_TIMESTAMPING_SOFTWARE;
6019
6020         if (tg3_flag(tp, PTP_CAPABLE)) {
6021                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6022                                         SOF_TIMESTAMPING_RX_HARDWARE |
6023                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6024         }
6025
6026         if (tp->ptp_clock)
6027                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6028         else
6029                 info->phc_index = -1;
6030
6031         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6032
6033         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6034                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6035                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6036                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6037         return 0;
6038 }
6039
6040 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6041 {
6042         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6043         bool neg_adj = false;
6044         u32 correction = 0;
6045
6046         if (ppb < 0) {
6047                 neg_adj = true;
6048                 ppb = -ppb;
6049         }
6050
6051         /* Frequency adjustment is performed using hardware with a 24 bit
6052          * accumulator and a programmable correction value. On each clk, the
6053          * correction value gets added to the accumulator and when it
6054          * overflows, the time counter is incremented/decremented.
6055          *
6056          * So conversion from ppb to correction value is
6057          *              ppb * (1 << 24) / 1000000000
6058          */
6059         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6060                      TG3_EAV_REF_CLK_CORRECT_MASK;
6061
6062         tg3_full_lock(tp, 0);
6063
6064         if (correction)
6065                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6066                      TG3_EAV_REF_CLK_CORRECT_EN |
6067                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6068         else
6069                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6070
6071         tg3_full_unlock(tp);
6072
6073         return 0;
6074 }
6075
6076 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6077 {
6078         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6079
6080         tg3_full_lock(tp, 0);
6081         tp->ptp_adjust += delta;
6082         tg3_full_unlock(tp);
6083
6084         return 0;
6085 }
6086
6087 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6088 {
6089         u64 ns;
6090         u32 remainder;
6091         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6092
6093         tg3_full_lock(tp, 0);
6094         ns = tg3_refclk_read(tp);
6095         ns += tp->ptp_adjust;
6096         tg3_full_unlock(tp);
6097
6098         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6099         ts->tv_nsec = remainder;
6100
6101         return 0;
6102 }
6103
6104 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6105                            const struct timespec *ts)
6106 {
6107         u64 ns;
6108         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6109
6110         ns = timespec_to_ns(ts);
6111
6112         tg3_full_lock(tp, 0);
6113         tg3_refclk_write(tp, ns);
6114         tp->ptp_adjust = 0;
6115         tg3_full_unlock(tp);
6116
6117         return 0;
6118 }
6119
6120 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6121                           struct ptp_clock_request *rq, int on)
6122 {
6123         return -EOPNOTSUPP;
6124 }
6125
6126 static const struct ptp_clock_info tg3_ptp_caps = {
6127         .owner          = THIS_MODULE,
6128         .name           = "tg3 clock",
6129         .max_adj        = 250000000,
6130         .n_alarm        = 0,
6131         .n_ext_ts       = 0,
6132         .n_per_out      = 0,
6133         .pps            = 0,
6134         .adjfreq        = tg3_ptp_adjfreq,
6135         .adjtime        = tg3_ptp_adjtime,
6136         .gettime        = tg3_ptp_gettime,
6137         .settime        = tg3_ptp_settime,
6138         .enable         = tg3_ptp_enable,
6139 };
6140
6141 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6142                                      struct skb_shared_hwtstamps *timestamp)
6143 {
6144         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6145         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6146                                            tp->ptp_adjust);
6147 }
6148
6149 /* tp->lock must be held */
6150 static void tg3_ptp_init(struct tg3 *tp)
6151 {
6152         if (!tg3_flag(tp, PTP_CAPABLE))
6153                 return;
6154
6155         /* Initialize the hardware clock to the system time. */
6156         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6157         tp->ptp_adjust = 0;
6158         tp->ptp_info = tg3_ptp_caps;
6159 }
6160
6161 /* tp->lock must be held */
6162 static void tg3_ptp_resume(struct tg3 *tp)
6163 {
6164         if (!tg3_flag(tp, PTP_CAPABLE))
6165                 return;
6166
6167         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6168         tp->ptp_adjust = 0;
6169 }
6170
6171 static void tg3_ptp_fini(struct tg3 *tp)
6172 {
6173         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6174                 return;
6175
6176         ptp_clock_unregister(tp->ptp_clock);
6177         tp->ptp_clock = NULL;
6178         tp->ptp_adjust = 0;
6179 }
6180
6181 static inline int tg3_irq_sync(struct tg3 *tp)
6182 {
6183         return tp->irq_sync;
6184 }
6185
6186 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6187 {
6188         int i;
6189
6190         dst = (u32 *)((u8 *)dst + off);
6191         for (i = 0; i < len; i += sizeof(u32))
6192                 *dst++ = tr32(off + i);
6193 }
6194
6195 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6196 {
6197         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6198         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6199         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6200         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6201         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6202         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6203         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6204         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6205         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6206         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6207         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6208         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6209         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6210         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6211         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6212         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6213         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6214         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6215         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6216
6217         if (tg3_flag(tp, SUPPORT_MSIX))
6218                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6219
6220         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6221         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6222         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6223         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6224         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6225         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6226         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6227         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6228
6229         if (!tg3_flag(tp, 5705_PLUS)) {
6230                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6231                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6232                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6233         }
6234
6235         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6236         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6237         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6238         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6239         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6240
6241         if (tg3_flag(tp, NVRAM))
6242                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6243 }
6244
6245 static void tg3_dump_state(struct tg3 *tp)
6246 {
6247         int i;
6248         u32 *regs;
6249
6250         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6251         if (!regs)
6252                 return;
6253
6254         if (tg3_flag(tp, PCI_EXPRESS)) {
6255                 /* Read up to but not including private PCI registers */
6256                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6257                         regs[i / sizeof(u32)] = tr32(i);
6258         } else
6259                 tg3_dump_legacy_regs(tp, regs);
6260
6261         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6262                 if (!regs[i + 0] && !regs[i + 1] &&
6263                     !regs[i + 2] && !regs[i + 3])
6264                         continue;
6265
6266                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6267                            i * 4,
6268                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6269         }
6270
6271         kfree(regs);
6272
6273         for (i = 0; i < tp->irq_cnt; i++) {
6274                 struct tg3_napi *tnapi = &tp->napi[i];
6275
6276                 /* SW status block */
6277                 netdev_err(tp->dev,
6278                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6279                            i,
6280                            tnapi->hw_status->status,
6281                            tnapi->hw_status->status_tag,
6282                            tnapi->hw_status->rx_jumbo_consumer,
6283                            tnapi->hw_status->rx_consumer,
6284                            tnapi->hw_status->rx_mini_consumer,
6285                            tnapi->hw_status->idx[0].rx_producer,
6286                            tnapi->hw_status->idx[0].tx_consumer);
6287
6288                 netdev_err(tp->dev,
6289                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6290                            i,
6291                            tnapi->last_tag, tnapi->last_irq_tag,
6292                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6293                            tnapi->rx_rcb_ptr,
6294                            tnapi->prodring.rx_std_prod_idx,
6295                            tnapi->prodring.rx_std_cons_idx,
6296                            tnapi->prodring.rx_jmb_prod_idx,
6297                            tnapi->prodring.rx_jmb_cons_idx);
6298         }
6299 }
6300
6301 /* This is called whenever we suspect that the system chipset is re-
6302  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6303  * is bogus tx completions. We try to recover by setting the
6304  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6305  * in the workqueue.
6306  */
6307 static void tg3_tx_recover(struct tg3 *tp)
6308 {
6309         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6310                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6311
6312         netdev_warn(tp->dev,
6313                     "The system may be re-ordering memory-mapped I/O "
6314                     "cycles to the network device, attempting to recover. "
6315                     "Please report the problem to the driver maintainer "
6316                     "and include system chipset information.\n");
6317
6318         spin_lock(&tp->lock);
6319         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6320         spin_unlock(&tp->lock);
6321 }
6322
6323 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6324 {
6325         /* Tell compiler to fetch tx indices from memory. */
6326         barrier();
6327         return tnapi->tx_pending -
6328                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6329 }
6330
6331 /* Tigon3 never reports partial packet sends.  So we do not
6332  * need special logic to handle SKBs that have not had all
6333  * of their frags sent yet, like SunGEM does.
6334  */
6335 static void tg3_tx(struct tg3_napi *tnapi)
6336 {
6337         struct tg3 *tp = tnapi->tp;
6338         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6339         u32 sw_idx = tnapi->tx_cons;
6340         struct netdev_queue *txq;
6341         int index = tnapi - tp->napi;
6342         unsigned int pkts_compl = 0, bytes_compl = 0;
6343
6344         if (tg3_flag(tp, ENABLE_TSS))
6345                 index--;
6346
6347         txq = netdev_get_tx_queue(tp->dev, index);
6348
6349         while (sw_idx != hw_idx) {
6350                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6351                 struct sk_buff *skb = ri->skb;
6352                 int i, tx_bug = 0;
6353
6354                 if (unlikely(skb == NULL)) {
6355                         tg3_tx_recover(tp);
6356                         return;
6357                 }
6358
6359                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6360                         struct skb_shared_hwtstamps timestamp;
6361                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6362                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6363
6364                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6365
6366                         skb_tstamp_tx(skb, &timestamp);
6367                 }
6368
6369                 pci_unmap_single(tp->pdev,
6370                                  dma_unmap_addr(ri, mapping),
6371                                  skb_headlen(skb),
6372                                  PCI_DMA_TODEVICE);
6373
6374                 ri->skb = NULL;
6375
6376                 while (ri->fragmented) {
6377                         ri->fragmented = false;
6378                         sw_idx = NEXT_TX(sw_idx);
6379                         ri = &tnapi->tx_buffers[sw_idx];
6380                 }
6381
6382                 sw_idx = NEXT_TX(sw_idx);
6383
6384                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6385                         ri = &tnapi->tx_buffers[sw_idx];
6386                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6387                                 tx_bug = 1;
6388
6389                         pci_unmap_page(tp->pdev,
6390                                        dma_unmap_addr(ri, mapping),
6391                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6392                                        PCI_DMA_TODEVICE);
6393
6394                         while (ri->fragmented) {
6395                                 ri->fragmented = false;
6396                                 sw_idx = NEXT_TX(sw_idx);
6397                                 ri = &tnapi->tx_buffers[sw_idx];
6398                         }
6399
6400                         sw_idx = NEXT_TX(sw_idx);
6401                 }
6402
6403                 pkts_compl++;
6404                 bytes_compl += skb->len;
6405
6406                 dev_kfree_skb(skb);
6407
6408                 if (unlikely(tx_bug)) {
6409                         tg3_tx_recover(tp);
6410                         return;
6411                 }
6412         }
6413
6414         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6415
6416         tnapi->tx_cons = sw_idx;
6417
6418         /* Need to make the tx_cons update visible to tg3_start_xmit()
6419          * before checking for netif_queue_stopped().  Without the
6420          * memory barrier, there is a small possibility that tg3_start_xmit()
6421          * will miss it and cause the queue to be stopped forever.
6422          */
6423         smp_mb();
6424
6425         if (unlikely(netif_tx_queue_stopped(txq) &&
6426                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6427                 __netif_tx_lock(txq, smp_processor_id());
6428                 if (netif_tx_queue_stopped(txq) &&
6429                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6430                         netif_tx_wake_queue(txq);
6431                 __netif_tx_unlock(txq);
6432         }
6433 }
6434
6435 static void tg3_frag_free(bool is_frag, void *data)
6436 {
6437         if (is_frag)
6438                 put_page(virt_to_head_page(data));
6439         else
6440                 kfree(data);
6441 }
6442
6443 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6444 {
6445         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6446                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6447
6448         if (!ri->data)
6449                 return;
6450
6451         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6452                          map_sz, PCI_DMA_FROMDEVICE);
6453         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6454         ri->data = NULL;
6455 }
6456
6457
6458 /* Returns size of skb allocated or < 0 on error.
6459  *
6460  * We only need to fill in the address because the other members
6461  * of the RX descriptor are invariant, see tg3_init_rings.
6462  *
6463  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6464  * posting buffers we only dirty the first cache line of the RX
6465  * descriptor (containing the address).  Whereas for the RX status
6466  * buffers the cpu only reads the last cacheline of the RX descriptor
6467  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6468  */
6469 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6470                              u32 opaque_key, u32 dest_idx_unmasked,
6471                              unsigned int *frag_size)
6472 {
6473         struct tg3_rx_buffer_desc *desc;
6474         struct ring_info *map;
6475         u8 *data;
6476         dma_addr_t mapping;
6477         int skb_size, data_size, dest_idx;
6478
6479         switch (opaque_key) {
6480         case RXD_OPAQUE_RING_STD:
6481                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6482                 desc = &tpr->rx_std[dest_idx];
6483                 map = &tpr->rx_std_buffers[dest_idx];
6484                 data_size = tp->rx_pkt_map_sz;
6485                 break;
6486
6487         case RXD_OPAQUE_RING_JUMBO:
6488                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6489                 desc = &tpr->rx_jmb[dest_idx].std;
6490                 map = &tpr->rx_jmb_buffers[dest_idx];
6491                 data_size = TG3_RX_JMB_MAP_SZ;
6492                 break;
6493
6494         default:
6495                 return -EINVAL;
6496         }
6497
6498         /* Do not overwrite any of the map or rp information
6499          * until we are sure we can commit to a new buffer.
6500          *
6501          * Callers depend upon this behavior and assume that
6502          * we leave everything unchanged if we fail.
6503          */
6504         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6505                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6506         if (skb_size <= PAGE_SIZE) {
6507                 data = netdev_alloc_frag(skb_size);
6508                 *frag_size = skb_size;
6509         } else {
6510                 data = kmalloc(skb_size, GFP_ATOMIC);
6511                 *frag_size = 0;
6512         }
6513         if (!data)
6514                 return -ENOMEM;
6515
6516         mapping = pci_map_single(tp->pdev,
6517                                  data + TG3_RX_OFFSET(tp),
6518                                  data_size,
6519                                  PCI_DMA_FROMDEVICE);
6520         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6521                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6522                 return -EIO;
6523         }
6524
6525         map->data = data;
6526         dma_unmap_addr_set(map, mapping, mapping);
6527
6528         desc->addr_hi = ((u64)mapping >> 32);
6529         desc->addr_lo = ((u64)mapping & 0xffffffff);
6530
6531         return data_size;
6532 }
6533
6534 /* We only need to move over in the address because the other
6535  * members of the RX descriptor are invariant.  See notes above
6536  * tg3_alloc_rx_data for full details.
6537  */
6538 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6539                            struct tg3_rx_prodring_set *dpr,
6540                            u32 opaque_key, int src_idx,
6541                            u32 dest_idx_unmasked)
6542 {
6543         struct tg3 *tp = tnapi->tp;
6544         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6545         struct ring_info *src_map, *dest_map;
6546         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6547         int dest_idx;
6548
6549         switch (opaque_key) {
6550         case RXD_OPAQUE_RING_STD:
6551                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6552                 dest_desc = &dpr->rx_std[dest_idx];
6553                 dest_map = &dpr->rx_std_buffers[dest_idx];
6554                 src_desc = &spr->rx_std[src_idx];
6555                 src_map = &spr->rx_std_buffers[src_idx];
6556                 break;
6557
6558         case RXD_OPAQUE_RING_JUMBO:
6559                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6560                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6561                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6562                 src_desc = &spr->rx_jmb[src_idx].std;
6563                 src_map = &spr->rx_jmb_buffers[src_idx];
6564                 break;
6565
6566         default:
6567                 return;
6568         }
6569
6570         dest_map->data = src_map->data;
6571         dma_unmap_addr_set(dest_map, mapping,
6572                            dma_unmap_addr(src_map, mapping));
6573         dest_desc->addr_hi = src_desc->addr_hi;
6574         dest_desc->addr_lo = src_desc->addr_lo;
6575
6576         /* Ensure that the update to the skb happens after the physical
6577          * addresses have been transferred to the new BD location.
6578          */
6579         smp_wmb();
6580
6581         src_map->data = NULL;
6582 }
6583
6584 /* The RX ring scheme is composed of multiple rings which post fresh
6585  * buffers to the chip, and one special ring the chip uses to report
6586  * status back to the host.
6587  *
6588  * The special ring reports the status of received packets to the
6589  * host.  The chip does not write into the original descriptor the
6590  * RX buffer was obtained from.  The chip simply takes the original
6591  * descriptor as provided by the host, updates the status and length
6592  * field, then writes this into the next status ring entry.
6593  *
6594  * Each ring the host uses to post buffers to the chip is described
6595  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6596  * it is first placed into the on-chip ram.  When the packet's length
6597  * is known, it walks down the TG3_BDINFO entries to select the ring.
6598  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6599  * which is within the range of the new packet's length is chosen.
6600  *
6601  * The "separate ring for rx status" scheme may sound queer, but it makes
6602  * sense from a cache coherency perspective.  If only the host writes
6603  * to the buffer post rings, and only the chip writes to the rx status
6604  * rings, then cache lines never move beyond shared-modified state.
6605  * If both the host and chip were to write into the same ring, cache line
6606  * eviction could occur since both entities want it in an exclusive state.
6607  */
6608 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6609 {
6610         struct tg3 *tp = tnapi->tp;
6611         u32 work_mask, rx_std_posted = 0;
6612         u32 std_prod_idx, jmb_prod_idx;
6613         u32 sw_idx = tnapi->rx_rcb_ptr;
6614         u16 hw_idx;
6615         int received;
6616         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6617
6618         hw_idx = *(tnapi->rx_rcb_prod_idx);
6619         /*
6620          * We need to order the read of hw_idx and the read of
6621          * the opaque cookie.
6622          */
6623         rmb();
6624         work_mask = 0;
6625         received = 0;
6626         std_prod_idx = tpr->rx_std_prod_idx;
6627         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6628         while (sw_idx != hw_idx && budget > 0) {
6629                 struct ring_info *ri;
6630                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6631                 unsigned int len;
6632                 struct sk_buff *skb;
6633                 dma_addr_t dma_addr;
6634                 u32 opaque_key, desc_idx, *post_ptr;
6635                 u8 *data;
6636                 u64 tstamp = 0;
6637
6638                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6639                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6640                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6641                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6642                         dma_addr = dma_unmap_addr(ri, mapping);
6643                         data = ri->data;
6644                         post_ptr = &std_prod_idx;
6645                         rx_std_posted++;
6646                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6647                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6648                         dma_addr = dma_unmap_addr(ri, mapping);
6649                         data = ri->data;
6650                         post_ptr = &jmb_prod_idx;
6651                 } else
6652                         goto next_pkt_nopost;
6653
6654                 work_mask |= opaque_key;
6655
6656                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6657                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6658                 drop_it:
6659                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6660                                        desc_idx, *post_ptr);
6661                 drop_it_no_recycle:
6662                         /* Other statistics kept track of by card. */
6663                         tp->rx_dropped++;
6664                         goto next_pkt;
6665                 }
6666
6667                 prefetch(data + TG3_RX_OFFSET(tp));
6668                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6669                       ETH_FCS_LEN;
6670
6671                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6672                      RXD_FLAG_PTPSTAT_PTPV1 ||
6673                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6674                      RXD_FLAG_PTPSTAT_PTPV2) {
6675                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6676                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6677                 }
6678
6679                 if (len > TG3_RX_COPY_THRESH(tp)) {
6680                         int skb_size;
6681                         unsigned int frag_size;
6682
6683                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6684                                                     *post_ptr, &frag_size);
6685                         if (skb_size < 0)
6686                                 goto drop_it;
6687
6688                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6689                                          PCI_DMA_FROMDEVICE);
6690
6691                         skb = build_skb(data, frag_size);
6692                         if (!skb) {
6693                                 tg3_frag_free(frag_size != 0, data);
6694                                 goto drop_it_no_recycle;
6695                         }
6696                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6697                         /* Ensure that the update to the data happens
6698                          * after the usage of the old DMA mapping.
6699                          */
6700                         smp_wmb();
6701
6702                         ri->data = NULL;
6703
6704                 } else {
6705                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6706                                        desc_idx, *post_ptr);
6707
6708                         skb = netdev_alloc_skb(tp->dev,
6709                                                len + TG3_RAW_IP_ALIGN);
6710                         if (skb == NULL)
6711                                 goto drop_it_no_recycle;
6712
6713                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6714                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6715                         memcpy(skb->data,
6716                                data + TG3_RX_OFFSET(tp),
6717                                len);
6718                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6719                 }
6720
6721                 skb_put(skb, len);
6722                 if (tstamp)
6723                         tg3_hwclock_to_timestamp(tp, tstamp,
6724                                                  skb_hwtstamps(skb));
6725
6726                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6727                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6728                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6729                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6730                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6731                 else
6732                         skb_checksum_none_assert(skb);
6733
6734                 skb->protocol = eth_type_trans(skb, tp->dev);
6735
6736                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6737                     skb->protocol != htons(ETH_P_8021Q)) {
6738                         dev_kfree_skb(skb);
6739                         goto drop_it_no_recycle;
6740                 }
6741
6742                 if (desc->type_flags & RXD_FLAG_VLAN &&
6743                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6744                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6745                                                desc->err_vlan & RXD_VLAN_MASK);
6746
6747                 napi_gro_receive(&tnapi->napi, skb);
6748
6749                 received++;
6750                 budget--;
6751
6752 next_pkt:
6753                 (*post_ptr)++;
6754
6755                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6756                         tpr->rx_std_prod_idx = std_prod_idx &
6757                                                tp->rx_std_ring_mask;
6758                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6759                                      tpr->rx_std_prod_idx);
6760                         work_mask &= ~RXD_OPAQUE_RING_STD;
6761                         rx_std_posted = 0;
6762                 }
6763 next_pkt_nopost:
6764                 sw_idx++;
6765                 sw_idx &= tp->rx_ret_ring_mask;
6766
6767                 /* Refresh hw_idx to see if there is new work */
6768                 if (sw_idx == hw_idx) {
6769                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6770                         rmb();
6771                 }
6772         }
6773
6774         /* ACK the status ring. */
6775         tnapi->rx_rcb_ptr = sw_idx;
6776         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6777
6778         /* Refill RX ring(s). */
6779         if (!tg3_flag(tp, ENABLE_RSS)) {
6780                 /* Sync BD data before updating mailbox */
6781                 wmb();
6782
6783                 if (work_mask & RXD_OPAQUE_RING_STD) {
6784                         tpr->rx_std_prod_idx = std_prod_idx &
6785                                                tp->rx_std_ring_mask;
6786                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6787                                      tpr->rx_std_prod_idx);
6788                 }
6789                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6790                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6791                                                tp->rx_jmb_ring_mask;
6792                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6793                                      tpr->rx_jmb_prod_idx);
6794                 }
6795                 mmiowb();
6796         } else if (work_mask) {
6797                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6798                  * updated before the producer indices can be updated.
6799                  */
6800                 smp_wmb();
6801
6802                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6803                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6804
6805                 if (tnapi != &tp->napi[1]) {
6806                         tp->rx_refill = true;
6807                         napi_schedule(&tp->napi[1].napi);
6808                 }
6809         }
6810
6811         return received;
6812 }
6813
6814 static void tg3_poll_link(struct tg3 *tp)
6815 {
6816         /* handle link change and other phy events */
6817         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6818                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6819
6820                 if (sblk->status & SD_STATUS_LINK_CHG) {
6821                         sblk->status = SD_STATUS_UPDATED |
6822                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6823                         spin_lock(&tp->lock);
6824                         if (tg3_flag(tp, USE_PHYLIB)) {
6825                                 tw32_f(MAC_STATUS,
6826                                      (MAC_STATUS_SYNC_CHANGED |
6827                                       MAC_STATUS_CFG_CHANGED |
6828                                       MAC_STATUS_MI_COMPLETION |
6829                                       MAC_STATUS_LNKSTATE_CHANGED));
6830                                 udelay(40);
6831                         } else
6832                                 tg3_setup_phy(tp, false);
6833                         spin_unlock(&tp->lock);
6834                 }
6835         }
6836 }
6837
6838 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6839                                 struct tg3_rx_prodring_set *dpr,
6840                                 struct tg3_rx_prodring_set *spr)
6841 {
6842         u32 si, di, cpycnt, src_prod_idx;
6843         int i, err = 0;
6844
6845         while (1) {
6846                 src_prod_idx = spr->rx_std_prod_idx;
6847
6848                 /* Make sure updates to the rx_std_buffers[] entries and the
6849                  * standard producer index are seen in the correct order.
6850                  */
6851                 smp_rmb();
6852
6853                 if (spr->rx_std_cons_idx == src_prod_idx)
6854                         break;
6855
6856                 if (spr->rx_std_cons_idx < src_prod_idx)
6857                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6858                 else
6859                         cpycnt = tp->rx_std_ring_mask + 1 -
6860                                  spr->rx_std_cons_idx;
6861
6862                 cpycnt = min(cpycnt,
6863                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6864
6865                 si = spr->rx_std_cons_idx;
6866                 di = dpr->rx_std_prod_idx;
6867
6868                 for (i = di; i < di + cpycnt; i++) {
6869                         if (dpr->rx_std_buffers[i].data) {
6870                                 cpycnt = i - di;
6871                                 err = -ENOSPC;
6872                                 break;
6873                         }
6874                 }
6875
6876                 if (!cpycnt)
6877                         break;
6878
6879                 /* Ensure that updates to the rx_std_buffers ring and the
6880                  * shadowed hardware producer ring from tg3_recycle_skb() are
6881                  * ordered correctly WRT the skb check above.
6882                  */
6883                 smp_rmb();
6884
6885                 memcpy(&dpr->rx_std_buffers[di],
6886                        &spr->rx_std_buffers[si],
6887                        cpycnt * sizeof(struct ring_info));
6888
6889                 for (i = 0; i < cpycnt; i++, di++, si++) {
6890                         struct tg3_rx_buffer_desc *sbd, *dbd;
6891                         sbd = &spr->rx_std[si];
6892                         dbd = &dpr->rx_std[di];
6893                         dbd->addr_hi = sbd->addr_hi;
6894                         dbd->addr_lo = sbd->addr_lo;
6895                 }
6896
6897                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6898                                        tp->rx_std_ring_mask;
6899                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6900                                        tp->rx_std_ring_mask;
6901         }
6902
6903         while (1) {
6904                 src_prod_idx = spr->rx_jmb_prod_idx;
6905
6906                 /* Make sure updates to the rx_jmb_buffers[] entries and
6907                  * the jumbo producer index are seen in the correct order.
6908                  */
6909                 smp_rmb();
6910
6911                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6912                         break;
6913
6914                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6915                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6916                 else
6917                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6918                                  spr->rx_jmb_cons_idx;
6919
6920                 cpycnt = min(cpycnt,
6921                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6922
6923                 si = spr->rx_jmb_cons_idx;
6924                 di = dpr->rx_jmb_prod_idx;
6925
6926                 for (i = di; i < di + cpycnt; i++) {
6927                         if (dpr->rx_jmb_buffers[i].data) {
6928                                 cpycnt = i - di;
6929                                 err = -ENOSPC;
6930                                 break;
6931                         }
6932                 }
6933
6934                 if (!cpycnt)
6935                         break;
6936
6937                 /* Ensure that updates to the rx_jmb_buffers ring and the
6938                  * shadowed hardware producer ring from tg3_recycle_skb() are
6939                  * ordered correctly WRT the skb check above.
6940                  */
6941                 smp_rmb();
6942
6943                 memcpy(&dpr->rx_jmb_buffers[di],
6944                        &spr->rx_jmb_buffers[si],
6945                        cpycnt * sizeof(struct ring_info));
6946
6947                 for (i = 0; i < cpycnt; i++, di++, si++) {
6948                         struct tg3_rx_buffer_desc *sbd, *dbd;
6949                         sbd = &spr->rx_jmb[si].std;
6950                         dbd = &dpr->rx_jmb[di].std;
6951                         dbd->addr_hi = sbd->addr_hi;
6952                         dbd->addr_lo = sbd->addr_lo;
6953                 }
6954
6955                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6956                                        tp->rx_jmb_ring_mask;
6957                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6958                                        tp->rx_jmb_ring_mask;
6959         }
6960
6961         return err;
6962 }
6963
6964 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6965 {
6966         struct tg3 *tp = tnapi->tp;
6967
6968         /* run TX completion thread */
6969         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6970                 tg3_tx(tnapi);
6971                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6972                         return work_done;
6973         }
6974
6975         if (!tnapi->rx_rcb_prod_idx)
6976                 return work_done;
6977
6978         /* run RX thread, within the bounds set by NAPI.
6979          * All RX "locking" is done by ensuring outside
6980          * code synchronizes with tg3->napi.poll()
6981          */
6982         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6983                 work_done += tg3_rx(tnapi, budget - work_done);
6984
6985         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6986                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6987                 int i, err = 0;
6988                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6989                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6990
6991                 tp->rx_refill = false;
6992                 for (i = 1; i <= tp->rxq_cnt; i++)
6993                         err |= tg3_rx_prodring_xfer(tp, dpr,
6994                                                     &tp->napi[i].prodring);
6995
6996                 wmb();
6997
6998                 if (std_prod_idx != dpr->rx_std_prod_idx)
6999                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7000                                      dpr->rx_std_prod_idx);
7001
7002                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7003                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7004                                      dpr->rx_jmb_prod_idx);
7005
7006                 mmiowb();
7007
7008                 if (err)
7009                         tw32_f(HOSTCC_MODE, tp->coal_now);
7010         }
7011
7012         return work_done;
7013 }
7014
7015 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7016 {
7017         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7018                 schedule_work(&tp->reset_task);
7019 }
7020
7021 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7022 {
7023         cancel_work_sync(&tp->reset_task);
7024         tg3_flag_clear(tp, RESET_TASK_PENDING);
7025         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7026 }
7027
7028 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7029 {
7030         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7031         struct tg3 *tp = tnapi->tp;
7032         int work_done = 0;
7033         struct tg3_hw_status *sblk = tnapi->hw_status;
7034
7035         while (1) {
7036                 work_done = tg3_poll_work(tnapi, work_done, budget);
7037
7038                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7039                         goto tx_recovery;
7040
7041                 if (unlikely(work_done >= budget))
7042                         break;
7043
7044                 /* tp->last_tag is used in tg3_int_reenable() below
7045                  * to tell the hw how much work has been processed,
7046                  * so we must read it before checking for more work.
7047                  */
7048                 tnapi->last_tag = sblk->status_tag;
7049                 tnapi->last_irq_tag = tnapi->last_tag;
7050                 rmb();
7051
7052                 /* check for RX/TX work to do */
7053                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7054                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7055
7056                         /* This test here is not race free, but will reduce
7057                          * the number of interrupts by looping again.
7058                          */
7059                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7060                                 continue;
7061
7062                         napi_complete(napi);
7063                         /* Reenable interrupts. */
7064                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7065
7066                         /* This test here is synchronized by napi_schedule()
7067                          * and napi_complete() to close the race condition.
7068                          */
7069                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7070                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7071                                                   HOSTCC_MODE_ENABLE |
7072                                                   tnapi->coal_now);
7073                         }
7074                         mmiowb();
7075                         break;
7076                 }
7077         }
7078
7079         return work_done;
7080
7081 tx_recovery:
7082         /* work_done is guaranteed to be less than budget. */
7083         napi_complete(napi);
7084         tg3_reset_task_schedule(tp);
7085         return work_done;
7086 }
7087
7088 static void tg3_process_error(struct tg3 *tp)
7089 {
7090         u32 val;
7091         bool real_error = false;
7092
7093         if (tg3_flag(tp, ERROR_PROCESSED))
7094                 return;
7095
7096         /* Check Flow Attention register */
7097         val = tr32(HOSTCC_FLOW_ATTN);
7098         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7099                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7100                 real_error = true;
7101         }
7102
7103         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7104                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7105                 real_error = true;
7106         }
7107
7108         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7109                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7110                 real_error = true;
7111         }
7112
7113         if (!real_error)
7114                 return;
7115
7116         tg3_dump_state(tp);
7117
7118         tg3_flag_set(tp, ERROR_PROCESSED);
7119         tg3_reset_task_schedule(tp);
7120 }
7121
7122 static int tg3_poll(struct napi_struct *napi, int budget)
7123 {
7124         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7125         struct tg3 *tp = tnapi->tp;
7126         int work_done = 0;
7127         struct tg3_hw_status *sblk = tnapi->hw_status;
7128
7129         while (1) {
7130                 if (sblk->status & SD_STATUS_ERROR)
7131                         tg3_process_error(tp);
7132
7133                 tg3_poll_link(tp);
7134
7135                 work_done = tg3_poll_work(tnapi, work_done, budget);
7136
7137                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7138                         goto tx_recovery;
7139
7140                 if (unlikely(work_done >= budget))
7141                         break;
7142
7143                 if (tg3_flag(tp, TAGGED_STATUS)) {
7144                         /* tp->last_tag is used in tg3_int_reenable() below
7145                          * to tell the hw how much work has been processed,
7146                          * so we must read it before checking for more work.
7147                          */
7148                         tnapi->last_tag = sblk->status_tag;
7149                         tnapi->last_irq_tag = tnapi->last_tag;
7150                         rmb();
7151                 } else
7152                         sblk->status &= ~SD_STATUS_UPDATED;
7153
7154                 if (likely(!tg3_has_work(tnapi))) {
7155                         napi_complete(napi);
7156                         tg3_int_reenable(tnapi);
7157                         break;
7158                 }
7159         }
7160
7161         return work_done;
7162
7163 tx_recovery:
7164         /* work_done is guaranteed to be less than budget. */
7165         napi_complete(napi);
7166         tg3_reset_task_schedule(tp);
7167         return work_done;
7168 }
7169
7170 static void tg3_napi_disable(struct tg3 *tp)
7171 {
7172         int i;
7173
7174         for (i = tp->irq_cnt - 1; i >= 0; i--)
7175                 napi_disable(&tp->napi[i].napi);
7176 }
7177
7178 static void tg3_napi_enable(struct tg3 *tp)
7179 {
7180         int i;
7181
7182         for (i = 0; i < tp->irq_cnt; i++)
7183                 napi_enable(&tp->napi[i].napi);
7184 }
7185
7186 static void tg3_napi_init(struct tg3 *tp)
7187 {
7188         int i;
7189
7190         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7191         for (i = 1; i < tp->irq_cnt; i++)
7192                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7193 }
7194
7195 static void tg3_napi_fini(struct tg3 *tp)
7196 {
7197         int i;
7198
7199         for (i = 0; i < tp->irq_cnt; i++)
7200                 netif_napi_del(&tp->napi[i].napi);
7201 }
7202
7203 static inline void tg3_netif_stop(struct tg3 *tp)
7204 {
7205         tp->dev->trans_start = jiffies; /* prevent tx timeout */
7206         tg3_napi_disable(tp);
7207         netif_carrier_off(tp->dev);
7208         netif_tx_disable(tp->dev);
7209 }
7210
7211 /* tp->lock must be held */
7212 static inline void tg3_netif_start(struct tg3 *tp)
7213 {
7214         tg3_ptp_resume(tp);
7215
7216         /* NOTE: unconditional netif_tx_wake_all_queues is only
7217          * appropriate so long as all callers are assured to
7218          * have free tx slots (such as after tg3_init_hw)
7219          */
7220         netif_tx_wake_all_queues(tp->dev);
7221
7222         if (tp->link_up)
7223                 netif_carrier_on(tp->dev);
7224
7225         tg3_napi_enable(tp);
7226         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7227         tg3_enable_ints(tp);
7228 }
7229
7230 static void tg3_irq_quiesce(struct tg3 *tp)
7231 {
7232         int i;
7233
7234         BUG_ON(tp->irq_sync);
7235
7236         tp->irq_sync = 1;
7237         smp_mb();
7238
7239         for (i = 0; i < tp->irq_cnt; i++)
7240                 synchronize_irq(tp->napi[i].irq_vec);
7241 }
7242
7243 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7244  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7245  * with as well.  Most of the time, this is not necessary except when
7246  * shutting down the device.
7247  */
7248 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7249 {
7250         spin_lock_bh(&tp->lock);
7251         if (irq_sync)
7252                 tg3_irq_quiesce(tp);
7253 }
7254
7255 static inline void tg3_full_unlock(struct tg3 *tp)
7256 {
7257         spin_unlock_bh(&tp->lock);
7258 }
7259
7260 /* One-shot MSI handler - Chip automatically disables interrupt
7261  * after sending MSI so driver doesn't have to do it.
7262  */
7263 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7264 {
7265         struct tg3_napi *tnapi = dev_id;
7266         struct tg3 *tp = tnapi->tp;
7267
7268         prefetch(tnapi->hw_status);
7269         if (tnapi->rx_rcb)
7270                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7271
7272         if (likely(!tg3_irq_sync(tp)))
7273                 napi_schedule(&tnapi->napi);
7274
7275         return IRQ_HANDLED;
7276 }
7277
7278 /* MSI ISR - No need to check for interrupt sharing and no need to
7279  * flush status block and interrupt mailbox. PCI ordering rules
7280  * guarantee that MSI will arrive after the status block.
7281  */
7282 static irqreturn_t tg3_msi(int irq, void *dev_id)
7283 {
7284         struct tg3_napi *tnapi = dev_id;
7285         struct tg3 *tp = tnapi->tp;
7286
7287         prefetch(tnapi->hw_status);
7288         if (tnapi->rx_rcb)
7289                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7290         /*
7291          * Writing any value to intr-mbox-0 clears PCI INTA# and
7292          * chip-internal interrupt pending events.
7293          * Writing non-zero to intr-mbox-0 additional tells the
7294          * NIC to stop sending us irqs, engaging "in-intr-handler"
7295          * event coalescing.
7296          */
7297         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7298         if (likely(!tg3_irq_sync(tp)))
7299                 napi_schedule(&tnapi->napi);
7300
7301         return IRQ_RETVAL(1);
7302 }
7303
7304 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7305 {
7306         struct tg3_napi *tnapi = dev_id;
7307         struct tg3 *tp = tnapi->tp;
7308         struct tg3_hw_status *sblk = tnapi->hw_status;
7309         unsigned int handled = 1;
7310
7311         /* In INTx mode, it is possible for the interrupt to arrive at
7312          * the CPU before the status block posted prior to the interrupt.
7313          * Reading the PCI State register will confirm whether the
7314          * interrupt is ours and will flush the status block.
7315          */
7316         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7317                 if (tg3_flag(tp, CHIP_RESETTING) ||
7318                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7319                         handled = 0;
7320                         goto out;
7321                 }
7322         }
7323
7324         /*
7325          * Writing any value to intr-mbox-0 clears PCI INTA# and
7326          * chip-internal interrupt pending events.
7327          * Writing non-zero to intr-mbox-0 additional tells the
7328          * NIC to stop sending us irqs, engaging "in-intr-handler"
7329          * event coalescing.
7330          *
7331          * Flush the mailbox to de-assert the IRQ immediately to prevent
7332          * spurious interrupts.  The flush impacts performance but
7333          * excessive spurious interrupts can be worse in some cases.
7334          */
7335         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7336         if (tg3_irq_sync(tp))
7337                 goto out;
7338         sblk->status &= ~SD_STATUS_UPDATED;
7339         if (likely(tg3_has_work(tnapi))) {
7340                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7341                 napi_schedule(&tnapi->napi);
7342         } else {
7343                 /* No work, shared interrupt perhaps?  re-enable
7344                  * interrupts, and flush that PCI write
7345                  */
7346                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7347                                0x00000000);
7348         }
7349 out:
7350         return IRQ_RETVAL(handled);
7351 }
7352
7353 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7354 {
7355         struct tg3_napi *tnapi = dev_id;
7356         struct tg3 *tp = tnapi->tp;
7357         struct tg3_hw_status *sblk = tnapi->hw_status;
7358         unsigned int handled = 1;
7359
7360         /* In INTx mode, it is possible for the interrupt to arrive at
7361          * the CPU before the status block posted prior to the interrupt.
7362          * Reading the PCI State register will confirm whether the
7363          * interrupt is ours and will flush the status block.
7364          */
7365         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7366                 if (tg3_flag(tp, CHIP_RESETTING) ||
7367                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7368                         handled = 0;
7369                         goto out;
7370                 }
7371         }
7372
7373         /*
7374          * writing any value to intr-mbox-0 clears PCI INTA# and
7375          * chip-internal interrupt pending events.
7376          * writing non-zero to intr-mbox-0 additional tells the
7377          * NIC to stop sending us irqs, engaging "in-intr-handler"
7378          * event coalescing.
7379          *
7380          * Flush the mailbox to de-assert the IRQ immediately to prevent
7381          * spurious interrupts.  The flush impacts performance but
7382          * excessive spurious interrupts can be worse in some cases.
7383          */
7384         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7385
7386         /*
7387          * In a shared interrupt configuration, sometimes other devices'
7388          * interrupts will scream.  We record the current status tag here
7389          * so that the above check can report that the screaming interrupts
7390          * are unhandled.  Eventually they will be silenced.
7391          */
7392         tnapi->last_irq_tag = sblk->status_tag;
7393
7394         if (tg3_irq_sync(tp))
7395                 goto out;
7396
7397         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7398
7399         napi_schedule(&tnapi->napi);
7400
7401 out:
7402         return IRQ_RETVAL(handled);
7403 }
7404
7405 /* ISR for interrupt test */
7406 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7407 {
7408         struct tg3_napi *tnapi = dev_id;
7409         struct tg3 *tp = tnapi->tp;
7410         struct tg3_hw_status *sblk = tnapi->hw_status;
7411
7412         if ((sblk->status & SD_STATUS_UPDATED) ||
7413             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7414                 tg3_disable_ints(tp);
7415                 return IRQ_RETVAL(1);
7416         }
7417         return IRQ_RETVAL(0);
7418 }
7419
7420 #ifdef CONFIG_NET_POLL_CONTROLLER
7421 static void tg3_poll_controller(struct net_device *dev)
7422 {
7423         int i;
7424         struct tg3 *tp = netdev_priv(dev);
7425
7426         if (tg3_irq_sync(tp))
7427                 return;
7428
7429         for (i = 0; i < tp->irq_cnt; i++)
7430                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7431 }
7432 #endif
7433
7434 static void tg3_tx_timeout(struct net_device *dev)
7435 {
7436         struct tg3 *tp = netdev_priv(dev);
7437
7438         if (netif_msg_tx_err(tp)) {
7439                 netdev_err(dev, "transmit timed out, resetting\n");
7440                 tg3_dump_state(tp);
7441         }
7442
7443         tg3_reset_task_schedule(tp);
7444 }
7445
7446 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7447 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7448 {
7449         u32 base = (u32) mapping & 0xffffffff;
7450
7451         return (base > 0xffffdcc0) && (base + len + 8 < base);
7452 }
7453
7454 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7455  * of any 4GB boundaries: 4G, 8G, etc
7456  */
7457 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7458                                            u32 len, u32 mss)
7459 {
7460         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7461                 u32 base = (u32) mapping & 0xffffffff;
7462
7463                 return ((base + len + (mss & 0x3fff)) < base);
7464         }
7465         return 0;
7466 }
7467
7468 /* Test for DMA addresses > 40-bit */
7469 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7470                                           int len)
7471 {
7472 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7473         if (tg3_flag(tp, 40BIT_DMA_BUG))
7474                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7475         return 0;
7476 #else
7477         return 0;
7478 #endif
7479 }
7480
7481 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7482                                  dma_addr_t mapping, u32 len, u32 flags,
7483                                  u32 mss, u32 vlan)
7484 {
7485         txbd->addr_hi = ((u64) mapping >> 32);
7486         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7487         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7488         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7489 }
7490
7491 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7492                             dma_addr_t map, u32 len, u32 flags,
7493                             u32 mss, u32 vlan)
7494 {
7495         struct tg3 *tp = tnapi->tp;
7496         bool hwbug = false;
7497
7498         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7499                 hwbug = true;
7500
7501         if (tg3_4g_overflow_test(map, len))
7502                 hwbug = true;
7503
7504         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7505                 hwbug = true;
7506
7507         if (tg3_40bit_overflow_test(tp, map, len))
7508                 hwbug = true;
7509
7510         if (tp->dma_limit) {
7511                 u32 prvidx = *entry;
7512                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7513                 while (len > tp->dma_limit && *budget) {
7514                         u32 frag_len = tp->dma_limit;
7515                         len -= tp->dma_limit;
7516
7517                         /* Avoid the 8byte DMA problem */
7518                         if (len <= 8) {
7519                                 len += tp->dma_limit / 2;
7520                                 frag_len = tp->dma_limit / 2;
7521                         }
7522
7523                         tnapi->tx_buffers[*entry].fragmented = true;
7524
7525                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7526                                       frag_len, tmp_flag, mss, vlan);
7527                         *budget -= 1;
7528                         prvidx = *entry;
7529                         *entry = NEXT_TX(*entry);
7530
7531                         map += frag_len;
7532                 }
7533
7534                 if (len) {
7535                         if (*budget) {
7536                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7537                                               len, flags, mss, vlan);
7538                                 *budget -= 1;
7539                                 *entry = NEXT_TX(*entry);
7540                         } else {
7541                                 hwbug = true;
7542                                 tnapi->tx_buffers[prvidx].fragmented = false;
7543                         }
7544                 }
7545         } else {
7546                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7547                               len, flags, mss, vlan);
7548                 *entry = NEXT_TX(*entry);
7549         }
7550
7551         return hwbug;
7552 }
7553
7554 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7555 {
7556         int i;
7557         struct sk_buff *skb;
7558         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7559
7560         skb = txb->skb;
7561         txb->skb = NULL;
7562
7563         pci_unmap_single(tnapi->tp->pdev,
7564                          dma_unmap_addr(txb, mapping),
7565                          skb_headlen(skb),
7566                          PCI_DMA_TODEVICE);
7567
7568         while (txb->fragmented) {
7569                 txb->fragmented = false;
7570                 entry = NEXT_TX(entry);
7571                 txb = &tnapi->tx_buffers[entry];
7572         }
7573
7574         for (i = 0; i <= last; i++) {
7575                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7576
7577                 entry = NEXT_TX(entry);
7578                 txb = &tnapi->tx_buffers[entry];
7579
7580                 pci_unmap_page(tnapi->tp->pdev,
7581                                dma_unmap_addr(txb, mapping),
7582                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7583
7584                 while (txb->fragmented) {
7585                         txb->fragmented = false;
7586                         entry = NEXT_TX(entry);
7587                         txb = &tnapi->tx_buffers[entry];
7588                 }
7589         }
7590 }
7591
7592 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7593 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7594                                        struct sk_buff **pskb,
7595                                        u32 *entry, u32 *budget,
7596                                        u32 base_flags, u32 mss, u32 vlan)
7597 {
7598         struct tg3 *tp = tnapi->tp;
7599         struct sk_buff *new_skb, *skb = *pskb;
7600         dma_addr_t new_addr = 0;
7601         int ret = 0;
7602
7603         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7604                 new_skb = skb_copy(skb, GFP_ATOMIC);
7605         else {
7606                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7607
7608                 new_skb = skb_copy_expand(skb,
7609                                           skb_headroom(skb) + more_headroom,
7610                                           skb_tailroom(skb), GFP_ATOMIC);
7611         }
7612
7613         if (!new_skb) {
7614                 ret = -1;
7615         } else {
7616                 /* New SKB is guaranteed to be linear. */
7617                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7618                                           PCI_DMA_TODEVICE);
7619                 /* Make sure the mapping succeeded */
7620                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7621                         dev_kfree_skb(new_skb);
7622                         ret = -1;
7623                 } else {
7624                         u32 save_entry = *entry;
7625
7626                         base_flags |= TXD_FLAG_END;
7627
7628                         tnapi->tx_buffers[*entry].skb = new_skb;
7629                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7630                                            mapping, new_addr);
7631
7632                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7633                                             new_skb->len, base_flags,
7634                                             mss, vlan)) {
7635                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7636                                 dev_kfree_skb(new_skb);
7637                                 ret = -1;
7638                         }
7639                 }
7640         }
7641
7642         dev_kfree_skb(skb);
7643         *pskb = new_skb;
7644         return ret;
7645 }
7646
7647 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7648
7649 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7650  * TSO header is greater than 80 bytes.
7651  */
7652 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7653 {
7654         struct sk_buff *segs, *nskb;
7655         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7656
7657         /* Estimate the number of fragments in the worst case */
7658         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7659                 netif_stop_queue(tp->dev);
7660
7661                 /* netif_tx_stop_queue() must be done before checking
7662                  * checking tx index in tg3_tx_avail() below, because in
7663                  * tg3_tx(), we update tx index before checking for
7664                  * netif_tx_queue_stopped().
7665                  */
7666                 smp_mb();
7667                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7668                         return NETDEV_TX_BUSY;
7669
7670                 netif_wake_queue(tp->dev);
7671         }
7672
7673         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7674         if (IS_ERR(segs))
7675                 goto tg3_tso_bug_end;
7676
7677         do {
7678                 nskb = segs;
7679                 segs = segs->next;
7680                 nskb->next = NULL;
7681                 tg3_start_xmit(nskb, tp->dev);
7682         } while (segs);
7683
7684 tg3_tso_bug_end:
7685         dev_kfree_skb(skb);
7686
7687         return NETDEV_TX_OK;
7688 }
7689
7690 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7691  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7692  */
7693 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7694 {
7695         struct tg3 *tp = netdev_priv(dev);
7696         u32 len, entry, base_flags, mss, vlan = 0;
7697         u32 budget;
7698         int i = -1, would_hit_hwbug;
7699         dma_addr_t mapping;
7700         struct tg3_napi *tnapi;
7701         struct netdev_queue *txq;
7702         unsigned int last;
7703
7704         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7705         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7706         if (tg3_flag(tp, ENABLE_TSS))
7707                 tnapi++;
7708
7709         budget = tg3_tx_avail(tnapi);
7710
7711         /* We are running in BH disabled context with netif_tx_lock
7712          * and TX reclaim runs via tp->napi.poll inside of a software
7713          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7714          * no IRQ context deadlocks to worry about either.  Rejoice!
7715          */
7716         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7717                 if (!netif_tx_queue_stopped(txq)) {
7718                         netif_tx_stop_queue(txq);
7719
7720                         /* This is a hard error, log it. */
7721                         netdev_err(dev,
7722                                    "BUG! Tx Ring full when queue awake!\n");
7723                 }
7724                 return NETDEV_TX_BUSY;
7725         }
7726
7727         entry = tnapi->tx_prod;
7728         base_flags = 0;
7729         if (skb->ip_summed == CHECKSUM_PARTIAL)
7730                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7731
7732         mss = skb_shinfo(skb)->gso_size;
7733         if (mss) {
7734                 struct iphdr *iph;
7735                 u32 tcp_opt_len, hdr_len;
7736
7737                 if (skb_header_cloned(skb) &&
7738                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7739                         goto drop;
7740
7741                 iph = ip_hdr(skb);
7742                 tcp_opt_len = tcp_optlen(skb);
7743
7744                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7745
7746                 if (!skb_is_gso_v6(skb)) {
7747                         iph->check = 0;
7748                         iph->tot_len = htons(mss + hdr_len);
7749                 }
7750
7751                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7752                     tg3_flag(tp, TSO_BUG))
7753                         return tg3_tso_bug(tp, skb);
7754
7755                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7756                                TXD_FLAG_CPU_POST_DMA);
7757
7758                 if (tg3_flag(tp, HW_TSO_1) ||
7759                     tg3_flag(tp, HW_TSO_2) ||
7760                     tg3_flag(tp, HW_TSO_3)) {
7761                         tcp_hdr(skb)->check = 0;
7762                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7763                 } else
7764                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7765                                                                  iph->daddr, 0,
7766                                                                  IPPROTO_TCP,
7767                                                                  0);
7768
7769                 if (tg3_flag(tp, HW_TSO_3)) {
7770                         mss |= (hdr_len & 0xc) << 12;
7771                         if (hdr_len & 0x10)
7772                                 base_flags |= 0x00000010;
7773                         base_flags |= (hdr_len & 0x3e0) << 5;
7774                 } else if (tg3_flag(tp, HW_TSO_2))
7775                         mss |= hdr_len << 9;
7776                 else if (tg3_flag(tp, HW_TSO_1) ||
7777                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7778                         if (tcp_opt_len || iph->ihl > 5) {
7779                                 int tsflags;
7780
7781                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7782                                 mss |= (tsflags << 11);
7783                         }
7784                 } else {
7785                         if (tcp_opt_len || iph->ihl > 5) {
7786                                 int tsflags;
7787
7788                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7789                                 base_flags |= tsflags << 12;
7790                         }
7791                 }
7792         }
7793
7794         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7795             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7796                 base_flags |= TXD_FLAG_JMB_PKT;
7797
7798         if (vlan_tx_tag_present(skb)) {
7799                 base_flags |= TXD_FLAG_VLAN;
7800                 vlan = vlan_tx_tag_get(skb);
7801         }
7802
7803         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7804             tg3_flag(tp, TX_TSTAMP_EN)) {
7805                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7806                 base_flags |= TXD_FLAG_HWTSTAMP;
7807         }
7808
7809         len = skb_headlen(skb);
7810
7811         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7812         if (pci_dma_mapping_error(tp->pdev, mapping))
7813                 goto drop;
7814
7815
7816         tnapi->tx_buffers[entry].skb = skb;
7817         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7818
7819         would_hit_hwbug = 0;
7820
7821         if (tg3_flag(tp, 5701_DMA_BUG))
7822                 would_hit_hwbug = 1;
7823
7824         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7825                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7826                             mss, vlan)) {
7827                 would_hit_hwbug = 1;
7828         } else if (skb_shinfo(skb)->nr_frags > 0) {
7829                 u32 tmp_mss = mss;
7830
7831                 if (!tg3_flag(tp, HW_TSO_1) &&
7832                     !tg3_flag(tp, HW_TSO_2) &&
7833                     !tg3_flag(tp, HW_TSO_3))
7834                         tmp_mss = 0;
7835
7836                 /* Now loop through additional data
7837                  * fragments, and queue them.
7838                  */
7839                 last = skb_shinfo(skb)->nr_frags - 1;
7840                 for (i = 0; i <= last; i++) {
7841                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7842
7843                         len = skb_frag_size(frag);
7844                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7845                                                    len, DMA_TO_DEVICE);
7846
7847                         tnapi->tx_buffers[entry].skb = NULL;
7848                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7849                                            mapping);
7850                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7851                                 goto dma_error;
7852
7853                         if (!budget ||
7854                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7855                                             len, base_flags |
7856                                             ((i == last) ? TXD_FLAG_END : 0),
7857                                             tmp_mss, vlan)) {
7858                                 would_hit_hwbug = 1;
7859                                 break;
7860                         }
7861                 }
7862         }
7863
7864         if (would_hit_hwbug) {
7865                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7866
7867                 /* If the workaround fails due to memory/mapping
7868                  * failure, silently drop this packet.
7869                  */
7870                 entry = tnapi->tx_prod;
7871                 budget = tg3_tx_avail(tnapi);
7872                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7873                                                 base_flags, mss, vlan))
7874                         goto drop_nofree;
7875         }
7876
7877         skb_tx_timestamp(skb);
7878         netdev_tx_sent_queue(txq, skb->len);
7879
7880         /* Sync BD data before updating mailbox */
7881         wmb();
7882
7883         /* Packets are ready, update Tx producer idx local and on card. */
7884         tw32_tx_mbox(tnapi->prodmbox, entry);
7885
7886         tnapi->tx_prod = entry;
7887         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7888                 netif_tx_stop_queue(txq);
7889
7890                 /* netif_tx_stop_queue() must be done before checking
7891                  * checking tx index in tg3_tx_avail() below, because in
7892                  * tg3_tx(), we update tx index before checking for
7893                  * netif_tx_queue_stopped().
7894                  */
7895                 smp_mb();
7896                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7897                         netif_tx_wake_queue(txq);
7898         }
7899
7900         mmiowb();
7901         return NETDEV_TX_OK;
7902
7903 dma_error:
7904         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7905         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7906 drop:
7907         dev_kfree_skb(skb);
7908 drop_nofree:
7909         tp->tx_dropped++;
7910         return NETDEV_TX_OK;
7911 }
7912
7913 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7914 {
7915         if (enable) {
7916                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7917                                   MAC_MODE_PORT_MODE_MASK);
7918
7919                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7920
7921                 if (!tg3_flag(tp, 5705_PLUS))
7922                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7923
7924                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7925                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7926                 else
7927                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7928         } else {
7929                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7930
7931                 if (tg3_flag(tp, 5705_PLUS) ||
7932                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7933                     tg3_asic_rev(tp) == ASIC_REV_5700)
7934                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7935         }
7936
7937         tw32(MAC_MODE, tp->mac_mode);
7938         udelay(40);
7939 }
7940
7941 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7942 {
7943         u32 val, bmcr, mac_mode, ptest = 0;
7944
7945         tg3_phy_toggle_apd(tp, false);
7946         tg3_phy_toggle_automdix(tp, false);
7947
7948         if (extlpbk && tg3_phy_set_extloopbk(tp))
7949                 return -EIO;
7950
7951         bmcr = BMCR_FULLDPLX;
7952         switch (speed) {
7953         case SPEED_10:
7954                 break;
7955         case SPEED_100:
7956                 bmcr |= BMCR_SPEED100;
7957                 break;
7958         case SPEED_1000:
7959         default:
7960                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7961                         speed = SPEED_100;
7962                         bmcr |= BMCR_SPEED100;
7963                 } else {
7964                         speed = SPEED_1000;
7965                         bmcr |= BMCR_SPEED1000;
7966                 }
7967         }
7968
7969         if (extlpbk) {
7970                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7971                         tg3_readphy(tp, MII_CTRL1000, &val);
7972                         val |= CTL1000_AS_MASTER |
7973                                CTL1000_ENABLE_MASTER;
7974                         tg3_writephy(tp, MII_CTRL1000, val);
7975                 } else {
7976                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7977                                 MII_TG3_FET_PTEST_TRIM_2;
7978                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7979                 }
7980         } else
7981                 bmcr |= BMCR_LOOPBACK;
7982
7983         tg3_writephy(tp, MII_BMCR, bmcr);
7984
7985         /* The write needs to be flushed for the FETs */
7986         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7987                 tg3_readphy(tp, MII_BMCR, &bmcr);
7988
7989         udelay(40);
7990
7991         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7992             tg3_asic_rev(tp) == ASIC_REV_5785) {
7993                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7994                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7995                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7996
7997                 /* The write needs to be flushed for the AC131 */
7998                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7999         }
8000
8001         /* Reset to prevent losing 1st rx packet intermittently */
8002         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8003             tg3_flag(tp, 5780_CLASS)) {
8004                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8005                 udelay(10);
8006                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8007         }
8008
8009         mac_mode = tp->mac_mode &
8010                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8011         if (speed == SPEED_1000)
8012                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8013         else
8014                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8015
8016         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8017                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8018
8019                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8020                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8021                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8022                         mac_mode |= MAC_MODE_LINK_POLARITY;
8023
8024                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8025                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8026         }
8027
8028         tw32(MAC_MODE, mac_mode);
8029         udelay(40);
8030
8031         return 0;
8032 }
8033
8034 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8035 {
8036         struct tg3 *tp = netdev_priv(dev);
8037
8038         if (features & NETIF_F_LOOPBACK) {
8039                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8040                         return;
8041
8042                 spin_lock_bh(&tp->lock);
8043                 tg3_mac_loopback(tp, true);
8044                 netif_carrier_on(tp->dev);
8045                 spin_unlock_bh(&tp->lock);
8046                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8047         } else {
8048                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8049                         return;
8050
8051                 spin_lock_bh(&tp->lock);
8052                 tg3_mac_loopback(tp, false);
8053                 /* Force link status check */
8054                 tg3_setup_phy(tp, true);
8055                 spin_unlock_bh(&tp->lock);
8056                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8057         }
8058 }
8059
8060 static netdev_features_t tg3_fix_features(struct net_device *dev,
8061         netdev_features_t features)
8062 {
8063         struct tg3 *tp = netdev_priv(dev);
8064
8065         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8066                 features &= ~NETIF_F_ALL_TSO;
8067
8068         return features;
8069 }
8070
8071 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8072 {
8073         netdev_features_t changed = dev->features ^ features;
8074
8075         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8076                 tg3_set_loopback(dev, features);
8077
8078         return 0;
8079 }
8080
8081 static void tg3_rx_prodring_free(struct tg3 *tp,
8082                                  struct tg3_rx_prodring_set *tpr)
8083 {
8084         int i;
8085
8086         if (tpr != &tp->napi[0].prodring) {
8087                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8088                      i = (i + 1) & tp->rx_std_ring_mask)
8089                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8090                                         tp->rx_pkt_map_sz);
8091
8092                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8093                         for (i = tpr->rx_jmb_cons_idx;
8094                              i != tpr->rx_jmb_prod_idx;
8095                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8096                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8097                                                 TG3_RX_JMB_MAP_SZ);
8098                         }
8099                 }
8100
8101                 return;
8102         }
8103
8104         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8105                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8106                                 tp->rx_pkt_map_sz);
8107
8108         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8109                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8110                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8111                                         TG3_RX_JMB_MAP_SZ);
8112         }
8113 }
8114
8115 /* Initialize rx rings for packet processing.
8116  *
8117  * The chip has been shut down and the driver detached from
8118  * the networking, so no interrupts or new tx packets will
8119  * end up in the driver.  tp->{tx,}lock are held and thus
8120  * we may not sleep.
8121  */
8122 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8123                                  struct tg3_rx_prodring_set *tpr)
8124 {
8125         u32 i, rx_pkt_dma_sz;
8126
8127         tpr->rx_std_cons_idx = 0;
8128         tpr->rx_std_prod_idx = 0;
8129         tpr->rx_jmb_cons_idx = 0;
8130         tpr->rx_jmb_prod_idx = 0;
8131
8132         if (tpr != &tp->napi[0].prodring) {
8133                 memset(&tpr->rx_std_buffers[0], 0,
8134                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8135                 if (tpr->rx_jmb_buffers)
8136                         memset(&tpr->rx_jmb_buffers[0], 0,
8137                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8138                 goto done;
8139         }
8140
8141         /* Zero out all descriptors. */
8142         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8143
8144         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8145         if (tg3_flag(tp, 5780_CLASS) &&
8146             tp->dev->mtu > ETH_DATA_LEN)
8147                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8148         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8149
8150         /* Initialize invariants of the rings, we only set this
8151          * stuff once.  This works because the card does not
8152          * write into the rx buffer posting rings.
8153          */
8154         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8155                 struct tg3_rx_buffer_desc *rxd;
8156
8157                 rxd = &tpr->rx_std[i];
8158                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8159                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8160                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8161                                (i << RXD_OPAQUE_INDEX_SHIFT));
8162         }
8163
8164         /* Now allocate fresh SKBs for each rx ring. */
8165         for (i = 0; i < tp->rx_pending; i++) {
8166                 unsigned int frag_size;
8167
8168                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8169                                       &frag_size) < 0) {
8170                         netdev_warn(tp->dev,
8171                                     "Using a smaller RX standard ring. Only "
8172                                     "%d out of %d buffers were allocated "
8173                                     "successfully\n", i, tp->rx_pending);
8174                         if (i == 0)
8175                                 goto initfail;
8176                         tp->rx_pending = i;
8177                         break;
8178                 }
8179         }
8180
8181         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8182                 goto done;
8183
8184         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8185
8186         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8187                 goto done;
8188
8189         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8190                 struct tg3_rx_buffer_desc *rxd;
8191
8192                 rxd = &tpr->rx_jmb[i].std;
8193                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8194                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8195                                   RXD_FLAG_JUMBO;
8196                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8197                        (i << RXD_OPAQUE_INDEX_SHIFT));
8198         }
8199
8200         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8201                 unsigned int frag_size;
8202
8203                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8204                                       &frag_size) < 0) {
8205                         netdev_warn(tp->dev,
8206                                     "Using a smaller RX jumbo ring. Only %d "
8207                                     "out of %d buffers were allocated "
8208                                     "successfully\n", i, tp->rx_jumbo_pending);
8209                         if (i == 0)
8210                                 goto initfail;
8211                         tp->rx_jumbo_pending = i;
8212                         break;
8213                 }
8214         }
8215
8216 done:
8217         return 0;
8218
8219 initfail:
8220         tg3_rx_prodring_free(tp, tpr);
8221         return -ENOMEM;
8222 }
8223
8224 static void tg3_rx_prodring_fini(struct tg3 *tp,
8225                                  struct tg3_rx_prodring_set *tpr)
8226 {
8227         kfree(tpr->rx_std_buffers);
8228         tpr->rx_std_buffers = NULL;
8229         kfree(tpr->rx_jmb_buffers);
8230         tpr->rx_jmb_buffers = NULL;
8231         if (tpr->rx_std) {
8232                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8233                                   tpr->rx_std, tpr->rx_std_mapping);
8234                 tpr->rx_std = NULL;
8235         }
8236         if (tpr->rx_jmb) {
8237                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8238                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8239                 tpr->rx_jmb = NULL;
8240         }
8241 }
8242
8243 static int tg3_rx_prodring_init(struct tg3 *tp,
8244                                 struct tg3_rx_prodring_set *tpr)
8245 {
8246         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8247                                       GFP_KERNEL);
8248         if (!tpr->rx_std_buffers)
8249                 return -ENOMEM;
8250
8251         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8252                                          TG3_RX_STD_RING_BYTES(tp),
8253                                          &tpr->rx_std_mapping,
8254                                          GFP_KERNEL);
8255         if (!tpr->rx_std)
8256                 goto err_out;
8257
8258         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8259                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8260                                               GFP_KERNEL);
8261                 if (!tpr->rx_jmb_buffers)
8262                         goto err_out;
8263
8264                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8265                                                  TG3_RX_JMB_RING_BYTES(tp),
8266                                                  &tpr->rx_jmb_mapping,
8267                                                  GFP_KERNEL);
8268                 if (!tpr->rx_jmb)
8269                         goto err_out;
8270         }
8271
8272         return 0;
8273
8274 err_out:
8275         tg3_rx_prodring_fini(tp, tpr);
8276         return -ENOMEM;
8277 }
8278
8279 /* Free up pending packets in all rx/tx rings.
8280  *
8281  * The chip has been shut down and the driver detached from
8282  * the networking, so no interrupts or new tx packets will
8283  * end up in the driver.  tp->{tx,}lock is not held and we are not
8284  * in an interrupt context and thus may sleep.
8285  */
8286 static void tg3_free_rings(struct tg3 *tp)
8287 {
8288         int i, j;
8289
8290         for (j = 0; j < tp->irq_cnt; j++) {
8291                 struct tg3_napi *tnapi = &tp->napi[j];
8292
8293                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8294
8295                 if (!tnapi->tx_buffers)
8296                         continue;
8297
8298                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8299                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8300
8301                         if (!skb)
8302                                 continue;
8303
8304                         tg3_tx_skb_unmap(tnapi, i,
8305                                          skb_shinfo(skb)->nr_frags - 1);
8306
8307                         dev_kfree_skb_any(skb);
8308                 }
8309                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8310         }
8311 }
8312
8313 /* Initialize tx/rx rings for packet processing.
8314  *
8315  * The chip has been shut down and the driver detached from
8316  * the networking, so no interrupts or new tx packets will
8317  * end up in the driver.  tp->{tx,}lock are held and thus
8318  * we may not sleep.
8319  */
8320 static int tg3_init_rings(struct tg3 *tp)
8321 {
8322         int i;
8323
8324         /* Free up all the SKBs. */
8325         tg3_free_rings(tp);
8326
8327         for (i = 0; i < tp->irq_cnt; i++) {
8328                 struct tg3_napi *tnapi = &tp->napi[i];
8329
8330                 tnapi->last_tag = 0;
8331                 tnapi->last_irq_tag = 0;
8332                 tnapi->hw_status->status = 0;
8333                 tnapi->hw_status->status_tag = 0;
8334                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8335
8336                 tnapi->tx_prod = 0;
8337                 tnapi->tx_cons = 0;
8338                 if (tnapi->tx_ring)
8339                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8340
8341                 tnapi->rx_rcb_ptr = 0;
8342                 if (tnapi->rx_rcb)
8343                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8344
8345                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8346                         tg3_free_rings(tp);
8347                         return -ENOMEM;
8348                 }
8349         }
8350
8351         return 0;
8352 }
8353
8354 static void tg3_mem_tx_release(struct tg3 *tp)
8355 {
8356         int i;
8357
8358         for (i = 0; i < tp->irq_max; i++) {
8359                 struct tg3_napi *tnapi = &tp->napi[i];
8360
8361                 if (tnapi->tx_ring) {
8362                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8363                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8364                         tnapi->tx_ring = NULL;
8365                 }
8366
8367                 kfree(tnapi->tx_buffers);
8368                 tnapi->tx_buffers = NULL;
8369         }
8370 }
8371
8372 static int tg3_mem_tx_acquire(struct tg3 *tp)
8373 {
8374         int i;
8375         struct tg3_napi *tnapi = &tp->napi[0];
8376
8377         /* If multivector TSS is enabled, vector 0 does not handle
8378          * tx interrupts.  Don't allocate any resources for it.
8379          */
8380         if (tg3_flag(tp, ENABLE_TSS))
8381                 tnapi++;
8382
8383         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8384                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8385                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8386                 if (!tnapi->tx_buffers)
8387                         goto err_out;
8388
8389                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8390                                                     TG3_TX_RING_BYTES,
8391                                                     &tnapi->tx_desc_mapping,
8392                                                     GFP_KERNEL);
8393                 if (!tnapi->tx_ring)
8394                         goto err_out;
8395         }
8396
8397         return 0;
8398
8399 err_out:
8400         tg3_mem_tx_release(tp);
8401         return -ENOMEM;
8402 }
8403
8404 static void tg3_mem_rx_release(struct tg3 *tp)
8405 {
8406         int i;
8407
8408         for (i = 0; i < tp->irq_max; i++) {
8409                 struct tg3_napi *tnapi = &tp->napi[i];
8410
8411                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8412
8413                 if (!tnapi->rx_rcb)
8414                         continue;
8415
8416                 dma_free_coherent(&tp->pdev->dev,
8417                                   TG3_RX_RCB_RING_BYTES(tp),
8418                                   tnapi->rx_rcb,
8419                                   tnapi->rx_rcb_mapping);
8420                 tnapi->rx_rcb = NULL;
8421         }
8422 }
8423
8424 static int tg3_mem_rx_acquire(struct tg3 *tp)
8425 {
8426         unsigned int i, limit;
8427
8428         limit = tp->rxq_cnt;
8429
8430         /* If RSS is enabled, we need a (dummy) producer ring
8431          * set on vector zero.  This is the true hw prodring.
8432          */
8433         if (tg3_flag(tp, ENABLE_RSS))
8434                 limit++;
8435
8436         for (i = 0; i < limit; i++) {
8437                 struct tg3_napi *tnapi = &tp->napi[i];
8438
8439                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8440                         goto err_out;
8441
8442                 /* If multivector RSS is enabled, vector 0
8443                  * does not handle rx or tx interrupts.
8444                  * Don't allocate any resources for it.
8445                  */
8446                 if (!i && tg3_flag(tp, ENABLE_RSS))
8447                         continue;
8448
8449                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8450                                                    TG3_RX_RCB_RING_BYTES(tp),
8451                                                    &tnapi->rx_rcb_mapping,
8452                                                    GFP_KERNEL | __GFP_ZERO);
8453                 if (!tnapi->rx_rcb)
8454                         goto err_out;
8455         }
8456
8457         return 0;
8458
8459 err_out:
8460         tg3_mem_rx_release(tp);
8461         return -ENOMEM;
8462 }
8463
8464 /*
8465  * Must not be invoked with interrupt sources disabled and
8466  * the hardware shutdown down.
8467  */
8468 static void tg3_free_consistent(struct tg3 *tp)
8469 {
8470         int i;
8471
8472         for (i = 0; i < tp->irq_cnt; i++) {
8473                 struct tg3_napi *tnapi = &tp->napi[i];
8474
8475                 if (tnapi->hw_status) {
8476                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8477                                           tnapi->hw_status,
8478                                           tnapi->status_mapping);
8479                         tnapi->hw_status = NULL;
8480                 }
8481         }
8482
8483         tg3_mem_rx_release(tp);
8484         tg3_mem_tx_release(tp);
8485
8486         if (tp->hw_stats) {
8487                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8488                                   tp->hw_stats, tp->stats_mapping);
8489                 tp->hw_stats = NULL;
8490         }
8491 }
8492
8493 /*
8494  * Must not be invoked with interrupt sources disabled and
8495  * the hardware shutdown down.  Can sleep.
8496  */
8497 static int tg3_alloc_consistent(struct tg3 *tp)
8498 {
8499         int i;
8500
8501         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8502                                           sizeof(struct tg3_hw_stats),
8503                                           &tp->stats_mapping,
8504                                           GFP_KERNEL | __GFP_ZERO);
8505         if (!tp->hw_stats)
8506                 goto err_out;
8507
8508         for (i = 0; i < tp->irq_cnt; i++) {
8509                 struct tg3_napi *tnapi = &tp->napi[i];
8510                 struct tg3_hw_status *sblk;
8511
8512                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8513                                                       TG3_HW_STATUS_SIZE,
8514                                                       &tnapi->status_mapping,
8515                                                       GFP_KERNEL | __GFP_ZERO);
8516                 if (!tnapi->hw_status)
8517                         goto err_out;
8518
8519                 sblk = tnapi->hw_status;
8520
8521                 if (tg3_flag(tp, ENABLE_RSS)) {
8522                         u16 *prodptr = NULL;
8523
8524                         /*
8525                          * When RSS is enabled, the status block format changes
8526                          * slightly.  The "rx_jumbo_consumer", "reserved",
8527                          * and "rx_mini_consumer" members get mapped to the
8528                          * other three rx return ring producer indexes.
8529                          */
8530                         switch (i) {
8531                         case 1:
8532                                 prodptr = &sblk->idx[0].rx_producer;
8533                                 break;
8534                         case 2:
8535                                 prodptr = &sblk->rx_jumbo_consumer;
8536                                 break;
8537                         case 3:
8538                                 prodptr = &sblk->reserved;
8539                                 break;
8540                         case 4:
8541                                 prodptr = &sblk->rx_mini_consumer;
8542                                 break;
8543                         }
8544                         tnapi->rx_rcb_prod_idx = prodptr;
8545                 } else {
8546                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8547                 }
8548         }
8549
8550         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8551                 goto err_out;
8552
8553         return 0;
8554
8555 err_out:
8556         tg3_free_consistent(tp);
8557         return -ENOMEM;
8558 }
8559
8560 #define MAX_WAIT_CNT 1000
8561
8562 /* To stop a block, clear the enable bit and poll till it
8563  * clears.  tp->lock is held.
8564  */
8565 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8566 {
8567         unsigned int i;
8568         u32 val;
8569
8570         if (tg3_flag(tp, 5705_PLUS)) {
8571                 switch (ofs) {
8572                 case RCVLSC_MODE:
8573                 case DMAC_MODE:
8574                 case MBFREE_MODE:
8575                 case BUFMGR_MODE:
8576                 case MEMARB_MODE:
8577                         /* We can't enable/disable these bits of the
8578                          * 5705/5750, just say success.
8579                          */
8580                         return 0;
8581
8582                 default:
8583                         break;
8584                 }
8585         }
8586
8587         val = tr32(ofs);
8588         val &= ~enable_bit;
8589         tw32_f(ofs, val);
8590
8591         for (i = 0; i < MAX_WAIT_CNT; i++) {
8592                 udelay(100);
8593                 val = tr32(ofs);
8594                 if ((val & enable_bit) == 0)
8595                         break;
8596         }
8597
8598         if (i == MAX_WAIT_CNT && !silent) {
8599                 dev_err(&tp->pdev->dev,
8600                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8601                         ofs, enable_bit);
8602                 return -ENODEV;
8603         }
8604
8605         return 0;
8606 }
8607
8608 /* tp->lock is held. */
8609 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8610 {
8611         int i, err;
8612
8613         tg3_disable_ints(tp);
8614
8615         tp->rx_mode &= ~RX_MODE_ENABLE;
8616         tw32_f(MAC_RX_MODE, tp->rx_mode);
8617         udelay(10);
8618
8619         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8620         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8621         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8622         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8623         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8624         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8625
8626         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8627         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8628         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8629         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8630         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8631         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8632         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8633
8634         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8635         tw32_f(MAC_MODE, tp->mac_mode);
8636         udelay(40);
8637
8638         tp->tx_mode &= ~TX_MODE_ENABLE;
8639         tw32_f(MAC_TX_MODE, tp->tx_mode);
8640
8641         for (i = 0; i < MAX_WAIT_CNT; i++) {
8642                 udelay(100);
8643                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8644                         break;
8645         }
8646         if (i >= MAX_WAIT_CNT) {
8647                 dev_err(&tp->pdev->dev,
8648                         "%s timed out, TX_MODE_ENABLE will not clear "
8649                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8650                 err |= -ENODEV;
8651         }
8652
8653         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8654         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8655         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8656
8657         tw32(FTQ_RESET, 0xffffffff);
8658         tw32(FTQ_RESET, 0x00000000);
8659
8660         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8661         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8662
8663         for (i = 0; i < tp->irq_cnt; i++) {
8664                 struct tg3_napi *tnapi = &tp->napi[i];
8665                 if (tnapi->hw_status)
8666                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8667         }
8668
8669         return err;
8670 }
8671
8672 /* Save PCI command register before chip reset */
8673 static void tg3_save_pci_state(struct tg3 *tp)
8674 {
8675         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8676 }
8677
8678 /* Restore PCI state after chip reset */
8679 static void tg3_restore_pci_state(struct tg3 *tp)
8680 {
8681         u32 val;
8682
8683         /* Re-enable indirect register accesses. */
8684         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8685                                tp->misc_host_ctrl);
8686
8687         /* Set MAX PCI retry to zero. */
8688         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8689         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8690             tg3_flag(tp, PCIX_MODE))
8691                 val |= PCISTATE_RETRY_SAME_DMA;
8692         /* Allow reads and writes to the APE register and memory space. */
8693         if (tg3_flag(tp, ENABLE_APE))
8694                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8695                        PCISTATE_ALLOW_APE_SHMEM_WR |
8696                        PCISTATE_ALLOW_APE_PSPACE_WR;
8697         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8698
8699         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8700
8701         if (!tg3_flag(tp, PCI_EXPRESS)) {
8702                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8703                                       tp->pci_cacheline_sz);
8704                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8705                                       tp->pci_lat_timer);
8706         }
8707
8708         /* Make sure PCI-X relaxed ordering bit is clear. */
8709         if (tg3_flag(tp, PCIX_MODE)) {
8710                 u16 pcix_cmd;
8711
8712                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8713                                      &pcix_cmd);
8714                 pcix_cmd &= ~PCI_X_CMD_ERO;
8715                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8716                                       pcix_cmd);
8717         }
8718
8719         if (tg3_flag(tp, 5780_CLASS)) {
8720
8721                 /* Chip reset on 5780 will reset MSI enable bit,
8722                  * so need to restore it.
8723                  */
8724                 if (tg3_flag(tp, USING_MSI)) {
8725                         u16 ctrl;
8726
8727                         pci_read_config_word(tp->pdev,
8728                                              tp->msi_cap + PCI_MSI_FLAGS,
8729                                              &ctrl);
8730                         pci_write_config_word(tp->pdev,
8731                                               tp->msi_cap + PCI_MSI_FLAGS,
8732                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8733                         val = tr32(MSGINT_MODE);
8734                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8735                 }
8736         }
8737 }
8738
8739 /* tp->lock is held. */
8740 static int tg3_chip_reset(struct tg3 *tp)
8741 {
8742         u32 val;
8743         void (*write_op)(struct tg3 *, u32, u32);
8744         int i, err;
8745
8746         tg3_nvram_lock(tp);
8747
8748         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8749
8750         /* No matching tg3_nvram_unlock() after this because
8751          * chip reset below will undo the nvram lock.
8752          */
8753         tp->nvram_lock_cnt = 0;
8754
8755         /* GRC_MISC_CFG core clock reset will clear the memory
8756          * enable bit in PCI register 4 and the MSI enable bit
8757          * on some chips, so we save relevant registers here.
8758          */
8759         tg3_save_pci_state(tp);
8760
8761         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8762             tg3_flag(tp, 5755_PLUS))
8763                 tw32(GRC_FASTBOOT_PC, 0);
8764
8765         /*
8766          * We must avoid the readl() that normally takes place.
8767          * It locks machines, causes machine checks, and other
8768          * fun things.  So, temporarily disable the 5701
8769          * hardware workaround, while we do the reset.
8770          */
8771         write_op = tp->write32;
8772         if (write_op == tg3_write_flush_reg32)
8773                 tp->write32 = tg3_write32;
8774
8775         /* Prevent the irq handler from reading or writing PCI registers
8776          * during chip reset when the memory enable bit in the PCI command
8777          * register may be cleared.  The chip does not generate interrupt
8778          * at this time, but the irq handler may still be called due to irq
8779          * sharing or irqpoll.
8780          */
8781         tg3_flag_set(tp, CHIP_RESETTING);
8782         for (i = 0; i < tp->irq_cnt; i++) {
8783                 struct tg3_napi *tnapi = &tp->napi[i];
8784                 if (tnapi->hw_status) {
8785                         tnapi->hw_status->status = 0;
8786                         tnapi->hw_status->status_tag = 0;
8787                 }
8788                 tnapi->last_tag = 0;
8789                 tnapi->last_irq_tag = 0;
8790         }
8791         smp_mb();
8792
8793         for (i = 0; i < tp->irq_cnt; i++)
8794                 synchronize_irq(tp->napi[i].irq_vec);
8795
8796         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8797                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8798                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8799         }
8800
8801         /* do the reset */
8802         val = GRC_MISC_CFG_CORECLK_RESET;
8803
8804         if (tg3_flag(tp, PCI_EXPRESS)) {
8805                 /* Force PCIe 1.0a mode */
8806                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8807                     !tg3_flag(tp, 57765_PLUS) &&
8808                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8809                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8810                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8811
8812                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8813                         tw32(GRC_MISC_CFG, (1 << 29));
8814                         val |= (1 << 29);
8815                 }
8816         }
8817
8818         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8819                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8820                 tw32(GRC_VCPU_EXT_CTRL,
8821                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8822         }
8823
8824         /* Manage gphy power for all CPMU absent PCIe devices. */
8825         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8826                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8827
8828         tw32(GRC_MISC_CFG, val);
8829
8830         /* restore 5701 hardware bug workaround write method */
8831         tp->write32 = write_op;
8832
8833         /* Unfortunately, we have to delay before the PCI read back.
8834          * Some 575X chips even will not respond to a PCI cfg access
8835          * when the reset command is given to the chip.
8836          *
8837          * How do these hardware designers expect things to work
8838          * properly if the PCI write is posted for a long period
8839          * of time?  It is always necessary to have some method by
8840          * which a register read back can occur to push the write
8841          * out which does the reset.
8842          *
8843          * For most tg3 variants the trick below was working.
8844          * Ho hum...
8845          */
8846         udelay(120);
8847
8848         /* Flush PCI posted writes.  The normal MMIO registers
8849          * are inaccessible at this time so this is the only
8850          * way to make this reliably (actually, this is no longer
8851          * the case, see above).  I tried to use indirect
8852          * register read/write but this upset some 5701 variants.
8853          */
8854         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8855
8856         udelay(120);
8857
8858         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8859                 u16 val16;
8860
8861                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8862                         int j;
8863                         u32 cfg_val;
8864
8865                         /* Wait for link training to complete.  */
8866                         for (j = 0; j < 5000; j++)
8867                                 udelay(100);
8868
8869                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8870                         pci_write_config_dword(tp->pdev, 0xc4,
8871                                                cfg_val | (1 << 15));
8872                 }
8873
8874                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8875                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8876                 /*
8877                  * Older PCIe devices only support the 128 byte
8878                  * MPS setting.  Enforce the restriction.
8879                  */
8880                 if (!tg3_flag(tp, CPMU_PRESENT))
8881                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8882                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8883
8884                 /* Clear error status */
8885                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8886                                       PCI_EXP_DEVSTA_CED |
8887                                       PCI_EXP_DEVSTA_NFED |
8888                                       PCI_EXP_DEVSTA_FED |
8889                                       PCI_EXP_DEVSTA_URD);
8890         }
8891
8892         tg3_restore_pci_state(tp);
8893
8894         tg3_flag_clear(tp, CHIP_RESETTING);
8895         tg3_flag_clear(tp, ERROR_PROCESSED);
8896
8897         val = 0;
8898         if (tg3_flag(tp, 5780_CLASS))
8899                 val = tr32(MEMARB_MODE);
8900         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8901
8902         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8903                 tg3_stop_fw(tp);
8904                 tw32(0x5000, 0x400);
8905         }
8906
8907         if (tg3_flag(tp, IS_SSB_CORE)) {
8908                 /*
8909                  * BCM4785: In order to avoid repercussions from using
8910                  * potentially defective internal ROM, stop the Rx RISC CPU,
8911                  * which is not required.
8912                  */
8913                 tg3_stop_fw(tp);
8914                 tg3_halt_cpu(tp, RX_CPU_BASE);
8915         }
8916
8917         err = tg3_poll_fw(tp);
8918         if (err)
8919                 return err;
8920
8921         tw32(GRC_MODE, tp->grc_mode);
8922
8923         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8924                 val = tr32(0xc4);
8925
8926                 tw32(0xc4, val | (1 << 15));
8927         }
8928
8929         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8930             tg3_asic_rev(tp) == ASIC_REV_5705) {
8931                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8932                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8933                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8934                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8935         }
8936
8937         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8938                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8939                 val = tp->mac_mode;
8940         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8941                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8942                 val = tp->mac_mode;
8943         } else
8944                 val = 0;
8945
8946         tw32_f(MAC_MODE, val);
8947         udelay(40);
8948
8949         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8950
8951         tg3_mdio_start(tp);
8952
8953         if (tg3_flag(tp, PCI_EXPRESS) &&
8954             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8955             tg3_asic_rev(tp) != ASIC_REV_5785 &&
8956             !tg3_flag(tp, 57765_PLUS)) {
8957                 val = tr32(0x7c00);
8958
8959                 tw32(0x7c00, val | (1 << 25));
8960         }
8961
8962         if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8963                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8964                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8965         }
8966
8967         /* Reprobe ASF enable state.  */
8968         tg3_flag_clear(tp, ENABLE_ASF);
8969         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
8970                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
8971
8972         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8973         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8974         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8975                 u32 nic_cfg;
8976
8977                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8978                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8979                         tg3_flag_set(tp, ENABLE_ASF);
8980                         tp->last_event_jiffies = jiffies;
8981                         if (tg3_flag(tp, 5750_PLUS))
8982                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8983
8984                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
8985                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
8986                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
8987                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
8988                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
8989                 }
8990         }
8991
8992         return 0;
8993 }
8994
8995 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8996 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8997
8998 /* tp->lock is held. */
8999 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9000 {
9001         int err;
9002
9003         tg3_stop_fw(tp);
9004
9005         tg3_write_sig_pre_reset(tp, kind);
9006
9007         tg3_abort_hw(tp, silent);
9008         err = tg3_chip_reset(tp);
9009
9010         __tg3_set_mac_addr(tp, false);
9011
9012         tg3_write_sig_legacy(tp, kind);
9013         tg3_write_sig_post_reset(tp, kind);
9014
9015         if (tp->hw_stats) {
9016                 /* Save the stats across chip resets... */
9017                 tg3_get_nstats(tp, &tp->net_stats_prev);
9018                 tg3_get_estats(tp, &tp->estats_prev);
9019
9020                 /* And make sure the next sample is new data */
9021                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9022         }
9023
9024         if (err)
9025                 return err;
9026
9027         return 0;
9028 }
9029
9030 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9031 {
9032         struct tg3 *tp = netdev_priv(dev);
9033         struct sockaddr *addr = p;
9034         int err = 0;
9035         bool skip_mac_1 = false;
9036
9037         if (!is_valid_ether_addr(addr->sa_data))
9038                 return -EADDRNOTAVAIL;
9039
9040         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9041
9042         if (!netif_running(dev))
9043                 return 0;
9044
9045         if (tg3_flag(tp, ENABLE_ASF)) {
9046                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9047
9048                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9049                 addr0_low = tr32(MAC_ADDR_0_LOW);
9050                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9051                 addr1_low = tr32(MAC_ADDR_1_LOW);
9052
9053                 /* Skip MAC addr 1 if ASF is using it. */
9054                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9055                     !(addr1_high == 0 && addr1_low == 0))
9056                         skip_mac_1 = true;
9057         }
9058         spin_lock_bh(&tp->lock);
9059         __tg3_set_mac_addr(tp, skip_mac_1);
9060         spin_unlock_bh(&tp->lock);
9061
9062         return err;
9063 }
9064
9065 /* tp->lock is held. */
9066 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9067                            dma_addr_t mapping, u32 maxlen_flags,
9068                            u32 nic_addr)
9069 {
9070         tg3_write_mem(tp,
9071                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9072                       ((u64) mapping >> 32));
9073         tg3_write_mem(tp,
9074                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9075                       ((u64) mapping & 0xffffffff));
9076         tg3_write_mem(tp,
9077                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9078                        maxlen_flags);
9079
9080         if (!tg3_flag(tp, 5705_PLUS))
9081                 tg3_write_mem(tp,
9082                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9083                               nic_addr);
9084 }
9085
9086
9087 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9088 {
9089         int i = 0;
9090
9091         if (!tg3_flag(tp, ENABLE_TSS)) {
9092                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9093                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9094                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9095         } else {
9096                 tw32(HOSTCC_TXCOL_TICKS, 0);
9097                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9098                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9099
9100                 for (; i < tp->txq_cnt; i++) {
9101                         u32 reg;
9102
9103                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9104                         tw32(reg, ec->tx_coalesce_usecs);
9105                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9106                         tw32(reg, ec->tx_max_coalesced_frames);
9107                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9108                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9109                 }
9110         }
9111
9112         for (; i < tp->irq_max - 1; i++) {
9113                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9114                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9115                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9116         }
9117 }
9118
9119 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9120 {
9121         int i = 0;
9122         u32 limit = tp->rxq_cnt;
9123
9124         if (!tg3_flag(tp, ENABLE_RSS)) {
9125                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9126                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9127                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9128                 limit--;
9129         } else {
9130                 tw32(HOSTCC_RXCOL_TICKS, 0);
9131                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9132                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9133         }
9134
9135         for (; i < limit; i++) {
9136                 u32 reg;
9137
9138                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9139                 tw32(reg, ec->rx_coalesce_usecs);
9140                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9141                 tw32(reg, ec->rx_max_coalesced_frames);
9142                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9143                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9144         }
9145
9146         for (; i < tp->irq_max - 1; i++) {
9147                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9148                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9149                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9150         }
9151 }
9152
9153 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9154 {
9155         tg3_coal_tx_init(tp, ec);
9156         tg3_coal_rx_init(tp, ec);
9157
9158         if (!tg3_flag(tp, 5705_PLUS)) {
9159                 u32 val = ec->stats_block_coalesce_usecs;
9160
9161                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9162                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9163
9164                 if (!tp->link_up)
9165                         val = 0;
9166
9167                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9168         }
9169 }
9170
9171 /* tp->lock is held. */
9172 static void tg3_rings_reset(struct tg3 *tp)
9173 {
9174         int i;
9175         u32 stblk, txrcb, rxrcb, limit;
9176         struct tg3_napi *tnapi = &tp->napi[0];
9177
9178         /* Disable all transmit rings but the first. */
9179         if (!tg3_flag(tp, 5705_PLUS))
9180                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9181         else if (tg3_flag(tp, 5717_PLUS))
9182                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9183         else if (tg3_flag(tp, 57765_CLASS) ||
9184                  tg3_asic_rev(tp) == ASIC_REV_5762)
9185                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9186         else
9187                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9188
9189         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9190              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9191                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9192                               BDINFO_FLAGS_DISABLED);
9193
9194
9195         /* Disable all receive return rings but the first. */
9196         if (tg3_flag(tp, 5717_PLUS))
9197                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9198         else if (!tg3_flag(tp, 5705_PLUS))
9199                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9200         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9201                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9202                  tg3_flag(tp, 57765_CLASS))
9203                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9204         else
9205                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9206
9207         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9208              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9209                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9210                               BDINFO_FLAGS_DISABLED);
9211
9212         /* Disable interrupts */
9213         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9214         tp->napi[0].chk_msi_cnt = 0;
9215         tp->napi[0].last_rx_cons = 0;
9216         tp->napi[0].last_tx_cons = 0;
9217
9218         /* Zero mailbox registers. */
9219         if (tg3_flag(tp, SUPPORT_MSIX)) {
9220                 for (i = 1; i < tp->irq_max; i++) {
9221                         tp->napi[i].tx_prod = 0;
9222                         tp->napi[i].tx_cons = 0;
9223                         if (tg3_flag(tp, ENABLE_TSS))
9224                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9225                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9226                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9227                         tp->napi[i].chk_msi_cnt = 0;
9228                         tp->napi[i].last_rx_cons = 0;
9229                         tp->napi[i].last_tx_cons = 0;
9230                 }
9231                 if (!tg3_flag(tp, ENABLE_TSS))
9232                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9233         } else {
9234                 tp->napi[0].tx_prod = 0;
9235                 tp->napi[0].tx_cons = 0;
9236                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9237                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9238         }
9239
9240         /* Make sure the NIC-based send BD rings are disabled. */
9241         if (!tg3_flag(tp, 5705_PLUS)) {
9242                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9243                 for (i = 0; i < 16; i++)
9244                         tw32_tx_mbox(mbox + i * 8, 0);
9245         }
9246
9247         txrcb = NIC_SRAM_SEND_RCB;
9248         rxrcb = NIC_SRAM_RCV_RET_RCB;
9249
9250         /* Clear status block in ram. */
9251         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9252
9253         /* Set status block DMA address */
9254         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9255              ((u64) tnapi->status_mapping >> 32));
9256         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9257              ((u64) tnapi->status_mapping & 0xffffffff));
9258
9259         if (tnapi->tx_ring) {
9260                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9261                                (TG3_TX_RING_SIZE <<
9262                                 BDINFO_FLAGS_MAXLEN_SHIFT),
9263                                NIC_SRAM_TX_BUFFER_DESC);
9264                 txrcb += TG3_BDINFO_SIZE;
9265         }
9266
9267         if (tnapi->rx_rcb) {
9268                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9269                                (tp->rx_ret_ring_mask + 1) <<
9270                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9271                 rxrcb += TG3_BDINFO_SIZE;
9272         }
9273
9274         stblk = HOSTCC_STATBLCK_RING1;
9275
9276         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9277                 u64 mapping = (u64)tnapi->status_mapping;
9278                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9279                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9280
9281                 /* Clear status block in ram. */
9282                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9283
9284                 if (tnapi->tx_ring) {
9285                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9286                                        (TG3_TX_RING_SIZE <<
9287                                         BDINFO_FLAGS_MAXLEN_SHIFT),
9288                                        NIC_SRAM_TX_BUFFER_DESC);
9289                         txrcb += TG3_BDINFO_SIZE;
9290                 }
9291
9292                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9293                                ((tp->rx_ret_ring_mask + 1) <<
9294                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9295
9296                 stblk += 8;
9297                 rxrcb += TG3_BDINFO_SIZE;
9298         }
9299 }
9300
9301 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9302 {
9303         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9304
9305         if (!tg3_flag(tp, 5750_PLUS) ||
9306             tg3_flag(tp, 5780_CLASS) ||
9307             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9308             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9309             tg3_flag(tp, 57765_PLUS))
9310                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9311         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9312                  tg3_asic_rev(tp) == ASIC_REV_5787)
9313                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9314         else
9315                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9316
9317         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9318         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9319
9320         val = min(nic_rep_thresh, host_rep_thresh);
9321         tw32(RCVBDI_STD_THRESH, val);
9322
9323         if (tg3_flag(tp, 57765_PLUS))
9324                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9325
9326         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9327                 return;
9328
9329         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9330
9331         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9332
9333         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9334         tw32(RCVBDI_JUMBO_THRESH, val);
9335
9336         if (tg3_flag(tp, 57765_PLUS))
9337                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9338 }
9339
9340 static inline u32 calc_crc(unsigned char *buf, int len)
9341 {
9342         u32 reg;
9343         u32 tmp;
9344         int j, k;
9345
9346         reg = 0xffffffff;
9347
9348         for (j = 0; j < len; j++) {
9349                 reg ^= buf[j];
9350
9351                 for (k = 0; k < 8; k++) {
9352                         tmp = reg & 0x01;
9353
9354                         reg >>= 1;
9355
9356                         if (tmp)
9357                                 reg ^= 0xedb88320;
9358                 }
9359         }
9360
9361         return ~reg;
9362 }
9363
9364 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9365 {
9366         /* accept or reject all multicast frames */
9367         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9368         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9369         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9370         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9371 }
9372
9373 static void __tg3_set_rx_mode(struct net_device *dev)
9374 {
9375         struct tg3 *tp = netdev_priv(dev);
9376         u32 rx_mode;
9377
9378         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9379                                   RX_MODE_KEEP_VLAN_TAG);
9380
9381 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9382         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9383          * flag clear.
9384          */
9385         if (!tg3_flag(tp, ENABLE_ASF))
9386                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9387 #endif
9388
9389         if (dev->flags & IFF_PROMISC) {
9390                 /* Promiscuous mode. */
9391                 rx_mode |= RX_MODE_PROMISC;
9392         } else if (dev->flags & IFF_ALLMULTI) {
9393                 /* Accept all multicast. */
9394                 tg3_set_multi(tp, 1);
9395         } else if (netdev_mc_empty(dev)) {
9396                 /* Reject all multicast. */
9397                 tg3_set_multi(tp, 0);
9398         } else {
9399                 /* Accept one or more multicast(s). */
9400                 struct netdev_hw_addr *ha;
9401                 u32 mc_filter[4] = { 0, };
9402                 u32 regidx;
9403                 u32 bit;
9404                 u32 crc;
9405
9406                 netdev_for_each_mc_addr(ha, dev) {
9407                         crc = calc_crc(ha->addr, ETH_ALEN);
9408                         bit = ~crc & 0x7f;
9409                         regidx = (bit & 0x60) >> 5;
9410                         bit &= 0x1f;
9411                         mc_filter[regidx] |= (1 << bit);
9412                 }
9413
9414                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9415                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9416                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9417                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9418         }
9419
9420         if (rx_mode != tp->rx_mode) {
9421                 tp->rx_mode = rx_mode;
9422                 tw32_f(MAC_RX_MODE, rx_mode);
9423                 udelay(10);
9424         }
9425 }
9426
9427 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9428 {
9429         int i;
9430
9431         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9432                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9433 }
9434
9435 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9436 {
9437         int i;
9438
9439         if (!tg3_flag(tp, SUPPORT_MSIX))
9440                 return;
9441
9442         if (tp->rxq_cnt == 1) {
9443                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9444                 return;
9445         }
9446
9447         /* Validate table against current IRQ count */
9448         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9449                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9450                         break;
9451         }
9452
9453         if (i != TG3_RSS_INDIR_TBL_SIZE)
9454                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9455 }
9456
9457 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9458 {
9459         int i = 0;
9460         u32 reg = MAC_RSS_INDIR_TBL_0;
9461
9462         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9463                 u32 val = tp->rss_ind_tbl[i];
9464                 i++;
9465                 for (; i % 8; i++) {
9466                         val <<= 4;
9467                         val |= tp->rss_ind_tbl[i];
9468                 }
9469                 tw32(reg, val);
9470                 reg += 4;
9471         }
9472 }
9473
9474 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9475 {
9476         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9477                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9478         else
9479                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9480 }
9481
9482 /* tp->lock is held. */
9483 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9484 {
9485         u32 val, rdmac_mode;
9486         int i, err, limit;
9487         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9488
9489         tg3_disable_ints(tp);
9490
9491         tg3_stop_fw(tp);
9492
9493         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9494
9495         if (tg3_flag(tp, INIT_COMPLETE))
9496                 tg3_abort_hw(tp, 1);
9497
9498         /* Enable MAC control of LPI */
9499         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9500                 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9501                       TG3_CPMU_EEE_LNKIDL_UART_IDL;
9502                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9503                         val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9504
9505                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9506
9507                 tw32_f(TG3_CPMU_EEE_CTRL,
9508                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9509
9510                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9511                       TG3_CPMU_EEEMD_LPI_IN_TX |
9512                       TG3_CPMU_EEEMD_LPI_IN_RX |
9513                       TG3_CPMU_EEEMD_EEE_ENABLE;
9514
9515                 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9516                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9517
9518                 if (tg3_flag(tp, ENABLE_APE))
9519                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9520
9521                 tw32_f(TG3_CPMU_EEE_MODE, val);
9522
9523                 tw32_f(TG3_CPMU_EEE_DBTMR1,
9524                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9525                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9526
9527                 tw32_f(TG3_CPMU_EEE_DBTMR2,
9528                        TG3_CPMU_DBTMR2_APE_TX_2047US |
9529                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9530         }
9531
9532         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9533             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9534                 tg3_phy_pull_config(tp);
9535                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9536         }
9537
9538         if (reset_phy)
9539                 tg3_phy_reset(tp);
9540
9541         err = tg3_chip_reset(tp);
9542         if (err)
9543                 return err;
9544
9545         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9546
9547         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9548                 val = tr32(TG3_CPMU_CTRL);
9549                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9550                 tw32(TG3_CPMU_CTRL, val);
9551
9552                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9553                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9554                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9555                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9556
9557                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9558                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9559                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9560                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9561
9562                 val = tr32(TG3_CPMU_HST_ACC);
9563                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9564                 val |= CPMU_HST_ACC_MACCLK_6_25;
9565                 tw32(TG3_CPMU_HST_ACC, val);
9566         }
9567
9568         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9569                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9570                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9571                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9572                 tw32(PCIE_PWR_MGMT_THRESH, val);
9573
9574                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9575                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9576
9577                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9578
9579                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9580                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9581         }
9582
9583         if (tg3_flag(tp, L1PLLPD_EN)) {
9584                 u32 grc_mode = tr32(GRC_MODE);
9585
9586                 /* Access the lower 1K of PL PCIE block registers. */
9587                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9588                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9589
9590                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9591                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9592                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9593
9594                 tw32(GRC_MODE, grc_mode);
9595         }
9596
9597         if (tg3_flag(tp, 57765_CLASS)) {
9598                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9599                         u32 grc_mode = tr32(GRC_MODE);
9600
9601                         /* Access the lower 1K of PL PCIE block registers. */
9602                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9603                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9604
9605                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9606                                    TG3_PCIE_PL_LO_PHYCTL5);
9607                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9608                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9609
9610                         tw32(GRC_MODE, grc_mode);
9611                 }
9612
9613                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9614                         u32 grc_mode;
9615
9616                         /* Fix transmit hangs */
9617                         val = tr32(TG3_CPMU_PADRNG_CTL);
9618                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9619                         tw32(TG3_CPMU_PADRNG_CTL, val);
9620
9621                         grc_mode = tr32(GRC_MODE);
9622
9623                         /* Access the lower 1K of DL PCIE block registers. */
9624                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9625                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9626
9627                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9628                                    TG3_PCIE_DL_LO_FTSMAX);
9629                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9630                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9631                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9632
9633                         tw32(GRC_MODE, grc_mode);
9634                 }
9635
9636                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9637                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9638                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9639                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9640         }
9641
9642         /* This works around an issue with Athlon chipsets on
9643          * B3 tigon3 silicon.  This bit has no effect on any
9644          * other revision.  But do not set this on PCI Express
9645          * chips and don't even touch the clocks if the CPMU is present.
9646          */
9647         if (!tg3_flag(tp, CPMU_PRESENT)) {
9648                 if (!tg3_flag(tp, PCI_EXPRESS))
9649                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9650                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9651         }
9652
9653         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9654             tg3_flag(tp, PCIX_MODE)) {
9655                 val = tr32(TG3PCI_PCISTATE);
9656                 val |= PCISTATE_RETRY_SAME_DMA;
9657                 tw32(TG3PCI_PCISTATE, val);
9658         }
9659
9660         if (tg3_flag(tp, ENABLE_APE)) {
9661                 /* Allow reads and writes to the
9662                  * APE register and memory space.
9663                  */
9664                 val = tr32(TG3PCI_PCISTATE);
9665                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9666                        PCISTATE_ALLOW_APE_SHMEM_WR |
9667                        PCISTATE_ALLOW_APE_PSPACE_WR;
9668                 tw32(TG3PCI_PCISTATE, val);
9669         }
9670
9671         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9672                 /* Enable some hw fixes.  */
9673                 val = tr32(TG3PCI_MSI_DATA);
9674                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9675                 tw32(TG3PCI_MSI_DATA, val);
9676         }
9677
9678         /* Descriptor ring init may make accesses to the
9679          * NIC SRAM area to setup the TX descriptors, so we
9680          * can only do this after the hardware has been
9681          * successfully reset.
9682          */
9683         err = tg3_init_rings(tp);
9684         if (err)
9685                 return err;
9686
9687         if (tg3_flag(tp, 57765_PLUS)) {
9688                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9689                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9690                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9691                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9692                 if (!tg3_flag(tp, 57765_CLASS) &&
9693                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
9694                     tg3_asic_rev(tp) != ASIC_REV_5762)
9695                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9696                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9697         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9698                    tg3_asic_rev(tp) != ASIC_REV_5761) {
9699                 /* This value is determined during the probe time DMA
9700                  * engine test, tg3_test_dma.
9701                  */
9702                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9703         }
9704
9705         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9706                           GRC_MODE_4X_NIC_SEND_RINGS |
9707                           GRC_MODE_NO_TX_PHDR_CSUM |
9708                           GRC_MODE_NO_RX_PHDR_CSUM);
9709         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9710
9711         /* Pseudo-header checksum is done by hardware logic and not
9712          * the offload processers, so make the chip do the pseudo-
9713          * header checksums on receive.  For transmit it is more
9714          * convenient to do the pseudo-header checksum in software
9715          * as Linux does that on transmit for us in all cases.
9716          */
9717         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9718
9719         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9720         if (tp->rxptpctl)
9721                 tw32(TG3_RX_PTP_CTL,
9722                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9723
9724         if (tg3_flag(tp, PTP_CAPABLE))
9725                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9726
9727         tw32(GRC_MODE, tp->grc_mode | val);
9728
9729         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9730         val = tr32(GRC_MISC_CFG);
9731         val &= ~0xff;
9732         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9733         tw32(GRC_MISC_CFG, val);
9734
9735         /* Initialize MBUF/DESC pool. */
9736         if (tg3_flag(tp, 5750_PLUS)) {
9737                 /* Do nothing.  */
9738         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9739                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9740                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9741                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9742                 else
9743                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9744                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9745                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9746         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9747                 int fw_len;
9748
9749                 fw_len = tp->fw_len;
9750                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9751                 tw32(BUFMGR_MB_POOL_ADDR,
9752                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9753                 tw32(BUFMGR_MB_POOL_SIZE,
9754                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9755         }
9756
9757         if (tp->dev->mtu <= ETH_DATA_LEN) {
9758                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9759                      tp->bufmgr_config.mbuf_read_dma_low_water);
9760                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9761                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9762                 tw32(BUFMGR_MB_HIGH_WATER,
9763                      tp->bufmgr_config.mbuf_high_water);
9764         } else {
9765                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9766                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9767                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9768                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9769                 tw32(BUFMGR_MB_HIGH_WATER,
9770                      tp->bufmgr_config.mbuf_high_water_jumbo);
9771         }
9772         tw32(BUFMGR_DMA_LOW_WATER,
9773              tp->bufmgr_config.dma_low_water);
9774         tw32(BUFMGR_DMA_HIGH_WATER,
9775              tp->bufmgr_config.dma_high_water);
9776
9777         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9778         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9779                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9780         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9781             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9782             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9783                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9784         tw32(BUFMGR_MODE, val);
9785         for (i = 0; i < 2000; i++) {
9786                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9787                         break;
9788                 udelay(10);
9789         }
9790         if (i >= 2000) {
9791                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9792                 return -ENODEV;
9793         }
9794
9795         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9796                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9797
9798         tg3_setup_rxbd_thresholds(tp);
9799
9800         /* Initialize TG3_BDINFO's at:
9801          *  RCVDBDI_STD_BD:     standard eth size rx ring
9802          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9803          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9804          *
9805          * like so:
9806          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9807          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9808          *                              ring attribute flags
9809          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9810          *
9811          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9812          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9813          *
9814          * The size of each ring is fixed in the firmware, but the location is
9815          * configurable.
9816          */
9817         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9818              ((u64) tpr->rx_std_mapping >> 32));
9819         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9820              ((u64) tpr->rx_std_mapping & 0xffffffff));
9821         if (!tg3_flag(tp, 5717_PLUS))
9822                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9823                      NIC_SRAM_RX_BUFFER_DESC);
9824
9825         /* Disable the mini ring */
9826         if (!tg3_flag(tp, 5705_PLUS))
9827                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9828                      BDINFO_FLAGS_DISABLED);
9829
9830         /* Program the jumbo buffer descriptor ring control
9831          * blocks on those devices that have them.
9832          */
9833         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9834             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9835
9836                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9837                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9838                              ((u64) tpr->rx_jmb_mapping >> 32));
9839                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9840                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9841                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9842                               BDINFO_FLAGS_MAXLEN_SHIFT;
9843                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9844                              val | BDINFO_FLAGS_USE_EXT_RECV);
9845                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9846                             tg3_flag(tp, 57765_CLASS) ||
9847                             tg3_asic_rev(tp) == ASIC_REV_5762)
9848                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9849                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9850                 } else {
9851                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9852                              BDINFO_FLAGS_DISABLED);
9853                 }
9854
9855                 if (tg3_flag(tp, 57765_PLUS)) {
9856                         val = TG3_RX_STD_RING_SIZE(tp);
9857                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9858                         val |= (TG3_RX_STD_DMA_SZ << 2);
9859                 } else
9860                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9861         } else
9862                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9863
9864         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9865
9866         tpr->rx_std_prod_idx = tp->rx_pending;
9867         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9868
9869         tpr->rx_jmb_prod_idx =
9870                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9871         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9872
9873         tg3_rings_reset(tp);
9874
9875         /* Initialize MAC address and backoff seed. */
9876         __tg3_set_mac_addr(tp, false);
9877
9878         /* MTU + ethernet header + FCS + optional VLAN tag */
9879         tw32(MAC_RX_MTU_SIZE,
9880              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9881
9882         /* The slot time is changed by tg3_setup_phy if we
9883          * run at gigabit with half duplex.
9884          */
9885         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9886               (6 << TX_LENGTHS_IPG_SHIFT) |
9887               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9888
9889         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9890             tg3_asic_rev(tp) == ASIC_REV_5762)
9891                 val |= tr32(MAC_TX_LENGTHS) &
9892                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9893                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9894
9895         tw32(MAC_TX_LENGTHS, val);
9896
9897         /* Receive rules. */
9898         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9899         tw32(RCVLPC_CONFIG, 0x0181);
9900
9901         /* Calculate RDMAC_MODE setting early, we need it to determine
9902          * the RCVLPC_STATE_ENABLE mask.
9903          */
9904         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9905                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9906                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9907                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9908                       RDMAC_MODE_LNGREAD_ENAB);
9909
9910         if (tg3_asic_rev(tp) == ASIC_REV_5717)
9911                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9912
9913         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9914             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9915             tg3_asic_rev(tp) == ASIC_REV_57780)
9916                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9917                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9918                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9919
9920         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9921             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9922                 if (tg3_flag(tp, TSO_CAPABLE) &&
9923                     tg3_asic_rev(tp) == ASIC_REV_5705) {
9924                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9925                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9926                            !tg3_flag(tp, IS_5788)) {
9927                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9928                 }
9929         }
9930
9931         if (tg3_flag(tp, PCI_EXPRESS))
9932                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9933
9934         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9935                 tp->dma_limit = 0;
9936                 if (tp->dev->mtu <= ETH_DATA_LEN) {
9937                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9938                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9939                 }
9940         }
9941
9942         if (tg3_flag(tp, HW_TSO_1) ||
9943             tg3_flag(tp, HW_TSO_2) ||
9944             tg3_flag(tp, HW_TSO_3))
9945                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9946
9947         if (tg3_flag(tp, 57765_PLUS) ||
9948             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9949             tg3_asic_rev(tp) == ASIC_REV_57780)
9950                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9951
9952         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9953             tg3_asic_rev(tp) == ASIC_REV_5762)
9954                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9955
9956         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9957             tg3_asic_rev(tp) == ASIC_REV_5784 ||
9958             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9959             tg3_asic_rev(tp) == ASIC_REV_57780 ||
9960             tg3_flag(tp, 57765_PLUS)) {
9961                 u32 tgtreg;
9962
9963                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9964                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9965                 else
9966                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
9967
9968                 val = tr32(tgtreg);
9969                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9970                     tg3_asic_rev(tp) == ASIC_REV_5762) {
9971                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9972                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9973                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9974                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9975                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9976                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9977                 }
9978                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9979         }
9980
9981         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9982             tg3_asic_rev(tp) == ASIC_REV_5720 ||
9983             tg3_asic_rev(tp) == ASIC_REV_5762) {
9984                 u32 tgtreg;
9985
9986                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9987                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9988                 else
9989                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9990
9991                 val = tr32(tgtreg);
9992                 tw32(tgtreg, val |
9993                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9994                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9995         }
9996
9997         /* Receive/send statistics. */
9998         if (tg3_flag(tp, 5750_PLUS)) {
9999                 val = tr32(RCVLPC_STATS_ENABLE);
10000                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10001                 tw32(RCVLPC_STATS_ENABLE, val);
10002         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10003                    tg3_flag(tp, TSO_CAPABLE)) {
10004                 val = tr32(RCVLPC_STATS_ENABLE);
10005                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10006                 tw32(RCVLPC_STATS_ENABLE, val);
10007         } else {
10008                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10009         }
10010         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10011         tw32(SNDDATAI_STATSENAB, 0xffffff);
10012         tw32(SNDDATAI_STATSCTRL,
10013              (SNDDATAI_SCTRL_ENABLE |
10014               SNDDATAI_SCTRL_FASTUPD));
10015
10016         /* Setup host coalescing engine. */
10017         tw32(HOSTCC_MODE, 0);
10018         for (i = 0; i < 2000; i++) {
10019                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10020                         break;
10021                 udelay(10);
10022         }
10023
10024         __tg3_set_coalesce(tp, &tp->coal);
10025
10026         if (!tg3_flag(tp, 5705_PLUS)) {
10027                 /* Status/statistics block address.  See tg3_timer,
10028                  * the tg3_periodic_fetch_stats call there, and
10029                  * tg3_get_stats to see how this works for 5705/5750 chips.
10030                  */
10031                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10032                      ((u64) tp->stats_mapping >> 32));
10033                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10034                      ((u64) tp->stats_mapping & 0xffffffff));
10035                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10036
10037                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10038
10039                 /* Clear statistics and status block memory areas */
10040                 for (i = NIC_SRAM_STATS_BLK;
10041                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10042                      i += sizeof(u32)) {
10043                         tg3_write_mem(tp, i, 0);
10044                         udelay(40);
10045                 }
10046         }
10047
10048         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10049
10050         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10051         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10052         if (!tg3_flag(tp, 5705_PLUS))
10053                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10054
10055         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10056                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10057                 /* reset to prevent losing 1st rx packet intermittently */
10058                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10059                 udelay(10);
10060         }
10061
10062         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10063                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10064                         MAC_MODE_FHDE_ENABLE;
10065         if (tg3_flag(tp, ENABLE_APE))
10066                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10067         if (!tg3_flag(tp, 5705_PLUS) &&
10068             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10069             tg3_asic_rev(tp) != ASIC_REV_5700)
10070                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10071         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10072         udelay(40);
10073
10074         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10075          * If TG3_FLAG_IS_NIC is zero, we should read the
10076          * register to preserve the GPIO settings for LOMs. The GPIOs,
10077          * whether used as inputs or outputs, are set by boot code after
10078          * reset.
10079          */
10080         if (!tg3_flag(tp, IS_NIC)) {
10081                 u32 gpio_mask;
10082
10083                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10084                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10085                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10086
10087                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10088                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10089                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10090
10091                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10092                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10093
10094                 tp->grc_local_ctrl &= ~gpio_mask;
10095                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10096
10097                 /* GPIO1 must be driven high for eeprom write protect */
10098                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10099                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10100                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10101         }
10102         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10103         udelay(100);
10104
10105         if (tg3_flag(tp, USING_MSIX)) {
10106                 val = tr32(MSGINT_MODE);
10107                 val |= MSGINT_MODE_ENABLE;
10108                 if (tp->irq_cnt > 1)
10109                         val |= MSGINT_MODE_MULTIVEC_EN;
10110                 if (!tg3_flag(tp, 1SHOT_MSI))
10111                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10112                 tw32(MSGINT_MODE, val);
10113         }
10114
10115         if (!tg3_flag(tp, 5705_PLUS)) {
10116                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10117                 udelay(40);
10118         }
10119
10120         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10121                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10122                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10123                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10124                WDMAC_MODE_LNGREAD_ENAB);
10125
10126         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10127             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10128                 if (tg3_flag(tp, TSO_CAPABLE) &&
10129                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10130                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10131                         /* nothing */
10132                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10133                            !tg3_flag(tp, IS_5788)) {
10134                         val |= WDMAC_MODE_RX_ACCEL;
10135                 }
10136         }
10137
10138         /* Enable host coalescing bug fix */
10139         if (tg3_flag(tp, 5755_PLUS))
10140                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10141
10142         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10143                 val |= WDMAC_MODE_BURST_ALL_DATA;
10144
10145         tw32_f(WDMAC_MODE, val);
10146         udelay(40);
10147
10148         if (tg3_flag(tp, PCIX_MODE)) {
10149                 u16 pcix_cmd;
10150
10151                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10152                                      &pcix_cmd);
10153                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10154                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10155                         pcix_cmd |= PCI_X_CMD_READ_2K;
10156                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10157                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10158                         pcix_cmd |= PCI_X_CMD_READ_2K;
10159                 }
10160                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10161                                       pcix_cmd);
10162         }
10163
10164         tw32_f(RDMAC_MODE, rdmac_mode);
10165         udelay(40);
10166
10167         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10168             tg3_asic_rev(tp) == ASIC_REV_5720) {
10169                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10170                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10171                                 break;
10172                 }
10173                 if (i < TG3_NUM_RDMA_CHANNELS) {
10174                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10175                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10176                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10177                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10178                 }
10179         }
10180
10181         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10182         if (!tg3_flag(tp, 5705_PLUS))
10183                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10184
10185         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10186                 tw32(SNDDATAC_MODE,
10187                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10188         else
10189                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10190
10191         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10192         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10193         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10194         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10195                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10196         tw32(RCVDBDI_MODE, val);
10197         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10198         if (tg3_flag(tp, HW_TSO_1) ||
10199             tg3_flag(tp, HW_TSO_2) ||
10200             tg3_flag(tp, HW_TSO_3))
10201                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10202         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10203         if (tg3_flag(tp, ENABLE_TSS))
10204                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10205         tw32(SNDBDI_MODE, val);
10206         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10207
10208         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10209                 err = tg3_load_5701_a0_firmware_fix(tp);
10210                 if (err)
10211                         return err;
10212         }
10213
10214         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10215                 /* Ignore any errors for the firmware download. If download
10216                  * fails, the device will operate with EEE disabled
10217                  */
10218                 tg3_load_57766_firmware(tp);
10219         }
10220
10221         if (tg3_flag(tp, TSO_CAPABLE)) {
10222                 err = tg3_load_tso_firmware(tp);
10223                 if (err)
10224                         return err;
10225         }
10226
10227         tp->tx_mode = TX_MODE_ENABLE;
10228
10229         if (tg3_flag(tp, 5755_PLUS) ||
10230             tg3_asic_rev(tp) == ASIC_REV_5906)
10231                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10232
10233         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10234             tg3_asic_rev(tp) == ASIC_REV_5762) {
10235                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10236                 tp->tx_mode &= ~val;
10237                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10238         }
10239
10240         tw32_f(MAC_TX_MODE, tp->tx_mode);
10241         udelay(100);
10242
10243         if (tg3_flag(tp, ENABLE_RSS)) {
10244                 tg3_rss_write_indir_tbl(tp);
10245
10246                 /* Setup the "secret" hash key. */
10247                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10248                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10249                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10250                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10251                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10252                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10253                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10254                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10255                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10256                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10257         }
10258
10259         tp->rx_mode = RX_MODE_ENABLE;
10260         if (tg3_flag(tp, 5755_PLUS))
10261                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10262
10263         if (tg3_flag(tp, ENABLE_RSS))
10264                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10265                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10266                                RX_MODE_RSS_IPV6_HASH_EN |
10267                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10268                                RX_MODE_RSS_IPV4_HASH_EN |
10269                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10270
10271         tw32_f(MAC_RX_MODE, tp->rx_mode);
10272         udelay(10);
10273
10274         tw32(MAC_LED_CTRL, tp->led_ctrl);
10275
10276         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10277         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10278                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10279                 udelay(10);
10280         }
10281         tw32_f(MAC_RX_MODE, tp->rx_mode);
10282         udelay(10);
10283
10284         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10285                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10286                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10287                         /* Set drive transmission level to 1.2V  */
10288                         /* only if the signal pre-emphasis bit is not set  */
10289                         val = tr32(MAC_SERDES_CFG);
10290                         val &= 0xfffff000;
10291                         val |= 0x880;
10292                         tw32(MAC_SERDES_CFG, val);
10293                 }
10294                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10295                         tw32(MAC_SERDES_CFG, 0x616000);
10296         }
10297
10298         /* Prevent chip from dropping frames when flow control
10299          * is enabled.
10300          */
10301         if (tg3_flag(tp, 57765_CLASS))
10302                 val = 1;
10303         else
10304                 val = 2;
10305         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10306
10307         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10308             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10309                 /* Use hardware link auto-negotiation */
10310                 tg3_flag_set(tp, HW_AUTONEG);
10311         }
10312
10313         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10314             tg3_asic_rev(tp) == ASIC_REV_5714) {
10315                 u32 tmp;
10316
10317                 tmp = tr32(SERDES_RX_CTRL);
10318                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10319                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10320                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10321                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10322         }
10323
10324         if (!tg3_flag(tp, USE_PHYLIB)) {
10325                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10326                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10327
10328                 err = tg3_setup_phy(tp, false);
10329                 if (err)
10330                         return err;
10331
10332                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10333                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10334                         u32 tmp;
10335
10336                         /* Clear CRC stats. */
10337                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10338                                 tg3_writephy(tp, MII_TG3_TEST1,
10339                                              tmp | MII_TG3_TEST1_CRC_EN);
10340                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10341                         }
10342                 }
10343         }
10344
10345         __tg3_set_rx_mode(tp->dev);
10346
10347         /* Initialize receive rules. */
10348         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10349         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10350         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10351         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10352
10353         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10354                 limit = 8;
10355         else
10356                 limit = 16;
10357         if (tg3_flag(tp, ENABLE_ASF))
10358                 limit -= 4;
10359         switch (limit) {
10360         case 16:
10361                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10362         case 15:
10363                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10364         case 14:
10365                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10366         case 13:
10367                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10368         case 12:
10369                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10370         case 11:
10371                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10372         case 10:
10373                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10374         case 9:
10375                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10376         case 8:
10377                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10378         case 7:
10379                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10380         case 6:
10381                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10382         case 5:
10383                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10384         case 4:
10385                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10386         case 3:
10387                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10388         case 2:
10389         case 1:
10390
10391         default:
10392                 break;
10393         }
10394
10395         if (tg3_flag(tp, ENABLE_APE))
10396                 /* Write our heartbeat update interval to APE. */
10397                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10398                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10399
10400         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10401
10402         return 0;
10403 }
10404
10405 /* Called at device open time to get the chip ready for
10406  * packet processing.  Invoked with tp->lock held.
10407  */
10408 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10409 {
10410         /* Chip may have been just powered on. If so, the boot code may still
10411          * be running initialization. Wait for it to finish to avoid races in
10412          * accessing the hardware.
10413          */
10414         tg3_enable_register_access(tp);
10415         tg3_poll_fw(tp);
10416
10417         tg3_switch_clocks(tp);
10418
10419         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10420
10421         return tg3_reset_hw(tp, reset_phy);
10422 }
10423
10424 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10425 {
10426         int i;
10427
10428         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10429                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10430
10431                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10432                 off += len;
10433
10434                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10435                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10436                         memset(ocir, 0, TG3_OCIR_LEN);
10437         }
10438 }
10439
10440 /* sysfs attributes for hwmon */
10441 static ssize_t tg3_show_temp(struct device *dev,
10442                              struct device_attribute *devattr, char *buf)
10443 {
10444         struct pci_dev *pdev = to_pci_dev(dev);
10445         struct net_device *netdev = pci_get_drvdata(pdev);
10446         struct tg3 *tp = netdev_priv(netdev);
10447         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10448         u32 temperature;
10449
10450         spin_lock_bh(&tp->lock);
10451         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10452                                 sizeof(temperature));
10453         spin_unlock_bh(&tp->lock);
10454         return sprintf(buf, "%u\n", temperature);
10455 }
10456
10457
10458 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10459                           TG3_TEMP_SENSOR_OFFSET);
10460 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10461                           TG3_TEMP_CAUTION_OFFSET);
10462 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10463                           TG3_TEMP_MAX_OFFSET);
10464
10465 static struct attribute *tg3_attributes[] = {
10466         &sensor_dev_attr_temp1_input.dev_attr.attr,
10467         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10468         &sensor_dev_attr_temp1_max.dev_attr.attr,
10469         NULL
10470 };
10471
10472 static const struct attribute_group tg3_group = {
10473         .attrs = tg3_attributes,
10474 };
10475
10476 static void tg3_hwmon_close(struct tg3 *tp)
10477 {
10478         if (tp->hwmon_dev) {
10479                 hwmon_device_unregister(tp->hwmon_dev);
10480                 tp->hwmon_dev = NULL;
10481                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10482         }
10483 }
10484
10485 static void tg3_hwmon_open(struct tg3 *tp)
10486 {
10487         int i, err;
10488         u32 size = 0;
10489         struct pci_dev *pdev = tp->pdev;
10490         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10491
10492         tg3_sd_scan_scratchpad(tp, ocirs);
10493
10494         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10495                 if (!ocirs[i].src_data_length)
10496                         continue;
10497
10498                 size += ocirs[i].src_hdr_length;
10499                 size += ocirs[i].src_data_length;
10500         }
10501
10502         if (!size)
10503                 return;
10504
10505         /* Register hwmon sysfs hooks */
10506         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10507         if (err) {
10508                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10509                 return;
10510         }
10511
10512         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10513         if (IS_ERR(tp->hwmon_dev)) {
10514                 tp->hwmon_dev = NULL;
10515                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10516                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10517         }
10518 }
10519
10520
10521 #define TG3_STAT_ADD32(PSTAT, REG) \
10522 do {    u32 __val = tr32(REG); \
10523         (PSTAT)->low += __val; \
10524         if ((PSTAT)->low < __val) \
10525                 (PSTAT)->high += 1; \
10526 } while (0)
10527
10528 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10529 {
10530         struct tg3_hw_stats *sp = tp->hw_stats;
10531
10532         if (!tp->link_up)
10533                 return;
10534
10535         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10536         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10537         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10538         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10539         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10540         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10541         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10542         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10543         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10544         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10545         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10546         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10547         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10548         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10549                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10550                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10551                 u32 val;
10552
10553                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10554                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10555                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10556                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10557         }
10558
10559         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10560         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10561         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10562         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10563         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10564         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10565         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10566         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10567         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10568         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10569         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10570         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10571         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10572         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10573
10574         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10575         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10576             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10577             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10578                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10579         } else {
10580                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10581                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10582                 if (val) {
10583                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10584                         sp->rx_discards.low += val;
10585                         if (sp->rx_discards.low < val)
10586                                 sp->rx_discards.high += 1;
10587                 }
10588                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10589         }
10590         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10591 }
10592
10593 static void tg3_chk_missed_msi(struct tg3 *tp)
10594 {
10595         u32 i;
10596
10597         for (i = 0; i < tp->irq_cnt; i++) {
10598                 struct tg3_napi *tnapi = &tp->napi[i];
10599
10600                 if (tg3_has_work(tnapi)) {
10601                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10602                             tnapi->last_tx_cons == tnapi->tx_cons) {
10603                                 if (tnapi->chk_msi_cnt < 1) {
10604                                         tnapi->chk_msi_cnt++;
10605                                         return;
10606                                 }
10607                                 tg3_msi(0, tnapi);
10608                         }
10609                 }
10610                 tnapi->chk_msi_cnt = 0;
10611                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10612                 tnapi->last_tx_cons = tnapi->tx_cons;
10613         }
10614 }
10615
10616 static void tg3_timer(unsigned long __opaque)
10617 {
10618         struct tg3 *tp = (struct tg3 *) __opaque;
10619
10620         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10621                 goto restart_timer;
10622
10623         spin_lock(&tp->lock);
10624
10625         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10626             tg3_flag(tp, 57765_CLASS))
10627                 tg3_chk_missed_msi(tp);
10628
10629         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10630                 /* BCM4785: Flush posted writes from GbE to host memory. */
10631                 tr32(HOSTCC_MODE);
10632         }
10633
10634         if (!tg3_flag(tp, TAGGED_STATUS)) {
10635                 /* All of this garbage is because when using non-tagged
10636                  * IRQ status the mailbox/status_block protocol the chip
10637                  * uses with the cpu is race prone.
10638                  */
10639                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10640                         tw32(GRC_LOCAL_CTRL,
10641                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10642                 } else {
10643                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10644                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10645                 }
10646
10647                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10648                         spin_unlock(&tp->lock);
10649                         tg3_reset_task_schedule(tp);
10650                         goto restart_timer;
10651                 }
10652         }
10653
10654         /* This part only runs once per second. */
10655         if (!--tp->timer_counter) {
10656                 if (tg3_flag(tp, 5705_PLUS))
10657                         tg3_periodic_fetch_stats(tp);
10658
10659                 if (tp->setlpicnt && !--tp->setlpicnt)
10660                         tg3_phy_eee_enable(tp);
10661
10662                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10663                         u32 mac_stat;
10664                         int phy_event;
10665
10666                         mac_stat = tr32(MAC_STATUS);
10667
10668                         phy_event = 0;
10669                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10670                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10671                                         phy_event = 1;
10672                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10673                                 phy_event = 1;
10674
10675                         if (phy_event)
10676                                 tg3_setup_phy(tp, false);
10677                 } else if (tg3_flag(tp, POLL_SERDES)) {
10678                         u32 mac_stat = tr32(MAC_STATUS);
10679                         int need_setup = 0;
10680
10681                         if (tp->link_up &&
10682                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10683                                 need_setup = 1;
10684                         }
10685                         if (!tp->link_up &&
10686                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10687                                          MAC_STATUS_SIGNAL_DET))) {
10688                                 need_setup = 1;
10689                         }
10690                         if (need_setup) {
10691                                 if (!tp->serdes_counter) {
10692                                         tw32_f(MAC_MODE,
10693                                              (tp->mac_mode &
10694                                               ~MAC_MODE_PORT_MODE_MASK));
10695                                         udelay(40);
10696                                         tw32_f(MAC_MODE, tp->mac_mode);
10697                                         udelay(40);
10698                                 }
10699                                 tg3_setup_phy(tp, false);
10700                         }
10701                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10702                            tg3_flag(tp, 5780_CLASS)) {
10703                         tg3_serdes_parallel_detect(tp);
10704                 }
10705
10706                 tp->timer_counter = tp->timer_multiplier;
10707         }
10708
10709         /* Heartbeat is only sent once every 2 seconds.
10710          *
10711          * The heartbeat is to tell the ASF firmware that the host
10712          * driver is still alive.  In the event that the OS crashes,
10713          * ASF needs to reset the hardware to free up the FIFO space
10714          * that may be filled with rx packets destined for the host.
10715          * If the FIFO is full, ASF will no longer function properly.
10716          *
10717          * Unintended resets have been reported on real time kernels
10718          * where the timer doesn't run on time.  Netpoll will also have
10719          * same problem.
10720          *
10721          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10722          * to check the ring condition when the heartbeat is expiring
10723          * before doing the reset.  This will prevent most unintended
10724          * resets.
10725          */
10726         if (!--tp->asf_counter) {
10727                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10728                         tg3_wait_for_event_ack(tp);
10729
10730                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10731                                       FWCMD_NICDRV_ALIVE3);
10732                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10733                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10734                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10735
10736                         tg3_generate_fw_event(tp);
10737                 }
10738                 tp->asf_counter = tp->asf_multiplier;
10739         }
10740
10741         spin_unlock(&tp->lock);
10742
10743 restart_timer:
10744         tp->timer.expires = jiffies + tp->timer_offset;
10745         add_timer(&tp->timer);
10746 }
10747
10748 static void tg3_timer_init(struct tg3 *tp)
10749 {
10750         if (tg3_flag(tp, TAGGED_STATUS) &&
10751             tg3_asic_rev(tp) != ASIC_REV_5717 &&
10752             !tg3_flag(tp, 57765_CLASS))
10753                 tp->timer_offset = HZ;
10754         else
10755                 tp->timer_offset = HZ / 10;
10756
10757         BUG_ON(tp->timer_offset > HZ);
10758
10759         tp->timer_multiplier = (HZ / tp->timer_offset);
10760         tp->asf_multiplier = (HZ / tp->timer_offset) *
10761                              TG3_FW_UPDATE_FREQ_SEC;
10762
10763         init_timer(&tp->timer);
10764         tp->timer.data = (unsigned long) tp;
10765         tp->timer.function = tg3_timer;
10766 }
10767
10768 static void tg3_timer_start(struct tg3 *tp)
10769 {
10770         tp->asf_counter   = tp->asf_multiplier;
10771         tp->timer_counter = tp->timer_multiplier;
10772
10773         tp->timer.expires = jiffies + tp->timer_offset;
10774         add_timer(&tp->timer);
10775 }
10776
10777 static void tg3_timer_stop(struct tg3 *tp)
10778 {
10779         del_timer_sync(&tp->timer);
10780 }
10781
10782 /* Restart hardware after configuration changes, self-test, etc.
10783  * Invoked with tp->lock held.
10784  */
10785 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10786         __releases(tp->lock)
10787         __acquires(tp->lock)
10788 {
10789         int err;
10790
10791         err = tg3_init_hw(tp, reset_phy);
10792         if (err) {
10793                 netdev_err(tp->dev,
10794                            "Failed to re-initialize device, aborting\n");
10795                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10796                 tg3_full_unlock(tp);
10797                 tg3_timer_stop(tp);
10798                 tp->irq_sync = 0;
10799                 tg3_napi_enable(tp);
10800                 dev_close(tp->dev);
10801                 tg3_full_lock(tp, 0);
10802         }
10803         return err;
10804 }
10805
10806 static void tg3_reset_task(struct work_struct *work)
10807 {
10808         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10809         int err;
10810
10811         tg3_full_lock(tp, 0);
10812
10813         if (!netif_running(tp->dev)) {
10814                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10815                 tg3_full_unlock(tp);
10816                 return;
10817         }
10818
10819         tg3_full_unlock(tp);
10820
10821         tg3_phy_stop(tp);
10822
10823         tg3_netif_stop(tp);
10824
10825         tg3_full_lock(tp, 1);
10826
10827         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10828                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10829                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10830                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10831                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10832         }
10833
10834         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10835         err = tg3_init_hw(tp, true);
10836         if (err)
10837                 goto out;
10838
10839         tg3_netif_start(tp);
10840
10841 out:
10842         tg3_full_unlock(tp);
10843
10844         if (!err)
10845                 tg3_phy_start(tp);
10846
10847         tg3_flag_clear(tp, RESET_TASK_PENDING);
10848 }
10849
10850 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10851 {
10852         irq_handler_t fn;
10853         unsigned long flags;
10854         char *name;
10855         struct tg3_napi *tnapi = &tp->napi[irq_num];
10856
10857         if (tp->irq_cnt == 1)
10858                 name = tp->dev->name;
10859         else {
10860                 name = &tnapi->irq_lbl[0];
10861                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10862                 name[IFNAMSIZ-1] = 0;
10863         }
10864
10865         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10866                 fn = tg3_msi;
10867                 if (tg3_flag(tp, 1SHOT_MSI))
10868                         fn = tg3_msi_1shot;
10869                 flags = 0;
10870         } else {
10871                 fn = tg3_interrupt;
10872                 if (tg3_flag(tp, TAGGED_STATUS))
10873                         fn = tg3_interrupt_tagged;
10874                 flags = IRQF_SHARED;
10875         }
10876
10877         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10878 }
10879
10880 static int tg3_test_interrupt(struct tg3 *tp)
10881 {
10882         struct tg3_napi *tnapi = &tp->napi[0];
10883         struct net_device *dev = tp->dev;
10884         int err, i, intr_ok = 0;
10885         u32 val;
10886
10887         if (!netif_running(dev))
10888                 return -ENODEV;
10889
10890         tg3_disable_ints(tp);
10891
10892         free_irq(tnapi->irq_vec, tnapi);
10893
10894         /*
10895          * Turn off MSI one shot mode.  Otherwise this test has no
10896          * observable way to know whether the interrupt was delivered.
10897          */
10898         if (tg3_flag(tp, 57765_PLUS)) {
10899                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10900                 tw32(MSGINT_MODE, val);
10901         }
10902
10903         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10904                           IRQF_SHARED, dev->name, tnapi);
10905         if (err)
10906                 return err;
10907
10908         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10909         tg3_enable_ints(tp);
10910
10911         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10912                tnapi->coal_now);
10913
10914         for (i = 0; i < 5; i++) {
10915                 u32 int_mbox, misc_host_ctrl;
10916
10917                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10918                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10919
10920                 if ((int_mbox != 0) ||
10921                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10922                         intr_ok = 1;
10923                         break;
10924                 }
10925
10926                 if (tg3_flag(tp, 57765_PLUS) &&
10927                     tnapi->hw_status->status_tag != tnapi->last_tag)
10928                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10929
10930                 msleep(10);
10931         }
10932
10933         tg3_disable_ints(tp);
10934
10935         free_irq(tnapi->irq_vec, tnapi);
10936
10937         err = tg3_request_irq(tp, 0);
10938
10939         if (err)
10940                 return err;
10941
10942         if (intr_ok) {
10943                 /* Reenable MSI one shot mode. */
10944                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10945                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10946                         tw32(MSGINT_MODE, val);
10947                 }
10948                 return 0;
10949         }
10950
10951         return -EIO;
10952 }
10953
10954 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10955  * successfully restored
10956  */
10957 static int tg3_test_msi(struct tg3 *tp)
10958 {
10959         int err;
10960         u16 pci_cmd;
10961
10962         if (!tg3_flag(tp, USING_MSI))
10963                 return 0;
10964
10965         /* Turn off SERR reporting in case MSI terminates with Master
10966          * Abort.
10967          */
10968         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10969         pci_write_config_word(tp->pdev, PCI_COMMAND,
10970                               pci_cmd & ~PCI_COMMAND_SERR);
10971
10972         err = tg3_test_interrupt(tp);
10973
10974         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10975
10976         if (!err)
10977                 return 0;
10978
10979         /* other failures */
10980         if (err != -EIO)
10981                 return err;
10982
10983         /* MSI test failed, go back to INTx mode */
10984         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10985                     "to INTx mode. Please report this failure to the PCI "
10986                     "maintainer and include system chipset information\n");
10987
10988         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10989
10990         pci_disable_msi(tp->pdev);
10991
10992         tg3_flag_clear(tp, USING_MSI);
10993         tp->napi[0].irq_vec = tp->pdev->irq;
10994
10995         err = tg3_request_irq(tp, 0);
10996         if (err)
10997                 return err;
10998
10999         /* Need to reset the chip because the MSI cycle may have terminated
11000          * with Master Abort.
11001          */
11002         tg3_full_lock(tp, 1);
11003
11004         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11005         err = tg3_init_hw(tp, true);
11006
11007         tg3_full_unlock(tp);
11008
11009         if (err)
11010                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11011
11012         return err;
11013 }
11014
11015 static int tg3_request_firmware(struct tg3 *tp)
11016 {
11017         const struct tg3_firmware_hdr *fw_hdr;
11018
11019         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11020                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11021                            tp->fw_needed);
11022                 return -ENOENT;
11023         }
11024
11025         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11026
11027         /* Firmware blob starts with version numbers, followed by
11028          * start address and _full_ length including BSS sections
11029          * (which must be longer than the actual data, of course
11030          */
11031
11032         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11033         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11034                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11035                            tp->fw_len, tp->fw_needed);
11036                 release_firmware(tp->fw);
11037                 tp->fw = NULL;
11038                 return -EINVAL;
11039         }
11040
11041         /* We no longer need firmware; we have it. */
11042         tp->fw_needed = NULL;
11043         return 0;
11044 }
11045
11046 static u32 tg3_irq_count(struct tg3 *tp)
11047 {
11048         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11049
11050         if (irq_cnt > 1) {
11051                 /* We want as many rx rings enabled as there are cpus.
11052                  * In multiqueue MSI-X mode, the first MSI-X vector
11053                  * only deals with link interrupts, etc, so we add
11054                  * one to the number of vectors we are requesting.
11055                  */
11056                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11057         }
11058
11059         return irq_cnt;
11060 }
11061
11062 static bool tg3_enable_msix(struct tg3 *tp)
11063 {
11064         int i, rc;
11065         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11066
11067         tp->txq_cnt = tp->txq_req;
11068         tp->rxq_cnt = tp->rxq_req;
11069         if (!tp->rxq_cnt)
11070                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11071         if (tp->rxq_cnt > tp->rxq_max)
11072                 tp->rxq_cnt = tp->rxq_max;
11073
11074         /* Disable multiple TX rings by default.  Simple round-robin hardware
11075          * scheduling of the TX rings can cause starvation of rings with
11076          * small packets when other rings have TSO or jumbo packets.
11077          */
11078         if (!tp->txq_req)
11079                 tp->txq_cnt = 1;
11080
11081         tp->irq_cnt = tg3_irq_count(tp);
11082
11083         for (i = 0; i < tp->irq_max; i++) {
11084                 msix_ent[i].entry  = i;
11085                 msix_ent[i].vector = 0;
11086         }
11087
11088         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11089         if (rc < 0) {
11090                 return false;
11091         } else if (rc != 0) {
11092                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11093                         return false;
11094                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11095                               tp->irq_cnt, rc);
11096                 tp->irq_cnt = rc;
11097                 tp->rxq_cnt = max(rc - 1, 1);
11098                 if (tp->txq_cnt)
11099                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11100         }
11101
11102         for (i = 0; i < tp->irq_max; i++)
11103                 tp->napi[i].irq_vec = msix_ent[i].vector;
11104
11105         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11106                 pci_disable_msix(tp->pdev);
11107                 return false;
11108         }
11109
11110         if (tp->irq_cnt == 1)
11111                 return true;
11112
11113         tg3_flag_set(tp, ENABLE_RSS);
11114
11115         if (tp->txq_cnt > 1)
11116                 tg3_flag_set(tp, ENABLE_TSS);
11117
11118         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11119
11120         return true;
11121 }
11122
11123 static void tg3_ints_init(struct tg3 *tp)
11124 {
11125         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11126             !tg3_flag(tp, TAGGED_STATUS)) {
11127                 /* All MSI supporting chips should support tagged
11128                  * status.  Assert that this is the case.
11129                  */
11130                 netdev_warn(tp->dev,
11131                             "MSI without TAGGED_STATUS? Not using MSI\n");
11132                 goto defcfg;
11133         }
11134
11135         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11136                 tg3_flag_set(tp, USING_MSIX);
11137         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11138                 tg3_flag_set(tp, USING_MSI);
11139
11140         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11141                 u32 msi_mode = tr32(MSGINT_MODE);
11142                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11143                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11144                 if (!tg3_flag(tp, 1SHOT_MSI))
11145                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11146                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11147         }
11148 defcfg:
11149         if (!tg3_flag(tp, USING_MSIX)) {
11150                 tp->irq_cnt = 1;
11151                 tp->napi[0].irq_vec = tp->pdev->irq;
11152         }
11153
11154         if (tp->irq_cnt == 1) {
11155                 tp->txq_cnt = 1;
11156                 tp->rxq_cnt = 1;
11157                 netif_set_real_num_tx_queues(tp->dev, 1);
11158                 netif_set_real_num_rx_queues(tp->dev, 1);
11159         }
11160 }
11161
11162 static void tg3_ints_fini(struct tg3 *tp)
11163 {
11164         if (tg3_flag(tp, USING_MSIX))
11165                 pci_disable_msix(tp->pdev);
11166         else if (tg3_flag(tp, USING_MSI))
11167                 pci_disable_msi(tp->pdev);
11168         tg3_flag_clear(tp, USING_MSI);
11169         tg3_flag_clear(tp, USING_MSIX);
11170         tg3_flag_clear(tp, ENABLE_RSS);
11171         tg3_flag_clear(tp, ENABLE_TSS);
11172 }
11173
11174 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11175                      bool init)
11176 {
11177         struct net_device *dev = tp->dev;
11178         int i, err;
11179
11180         /*
11181          * Setup interrupts first so we know how
11182          * many NAPI resources to allocate
11183          */
11184         tg3_ints_init(tp);
11185
11186         tg3_rss_check_indir_tbl(tp);
11187
11188         /* The placement of this call is tied
11189          * to the setup and use of Host TX descriptors.
11190          */
11191         err = tg3_alloc_consistent(tp);
11192         if (err)
11193                 goto err_out1;
11194
11195         tg3_napi_init(tp);
11196
11197         tg3_napi_enable(tp);
11198
11199         for (i = 0; i < tp->irq_cnt; i++) {
11200                 struct tg3_napi *tnapi = &tp->napi[i];
11201                 err = tg3_request_irq(tp, i);
11202                 if (err) {
11203                         for (i--; i >= 0; i--) {
11204                                 tnapi = &tp->napi[i];
11205                                 free_irq(tnapi->irq_vec, tnapi);
11206                         }
11207                         goto err_out2;
11208                 }
11209         }
11210
11211         tg3_full_lock(tp, 0);
11212
11213         err = tg3_init_hw(tp, reset_phy);
11214         if (err) {
11215                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11216                 tg3_free_rings(tp);
11217         }
11218
11219         tg3_full_unlock(tp);
11220
11221         if (err)
11222                 goto err_out3;
11223
11224         if (test_irq && tg3_flag(tp, USING_MSI)) {
11225                 err = tg3_test_msi(tp);
11226
11227                 if (err) {
11228                         tg3_full_lock(tp, 0);
11229                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11230                         tg3_free_rings(tp);
11231                         tg3_full_unlock(tp);
11232
11233                         goto err_out2;
11234                 }
11235
11236                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11237                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11238
11239                         tw32(PCIE_TRANSACTION_CFG,
11240                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11241                 }
11242         }
11243
11244         tg3_phy_start(tp);
11245
11246         tg3_hwmon_open(tp);
11247
11248         tg3_full_lock(tp, 0);
11249
11250         tg3_timer_start(tp);
11251         tg3_flag_set(tp, INIT_COMPLETE);
11252         tg3_enable_ints(tp);
11253
11254         if (init)
11255                 tg3_ptp_init(tp);
11256         else
11257                 tg3_ptp_resume(tp);
11258
11259
11260         tg3_full_unlock(tp);
11261
11262         netif_tx_start_all_queues(dev);
11263
11264         /*
11265          * Reset loopback feature if it was turned on while the device was down
11266          * make sure that it's installed properly now.
11267          */
11268         if (dev->features & NETIF_F_LOOPBACK)
11269                 tg3_set_loopback(dev, dev->features);
11270
11271         return 0;
11272
11273 err_out3:
11274         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11275                 struct tg3_napi *tnapi = &tp->napi[i];
11276                 free_irq(tnapi->irq_vec, tnapi);
11277         }
11278
11279 err_out2:
11280         tg3_napi_disable(tp);
11281         tg3_napi_fini(tp);
11282         tg3_free_consistent(tp);
11283
11284 err_out1:
11285         tg3_ints_fini(tp);
11286
11287         return err;
11288 }
11289
11290 static void tg3_stop(struct tg3 *tp)
11291 {
11292         int i;
11293
11294         tg3_reset_task_cancel(tp);
11295         tg3_netif_stop(tp);
11296
11297         tg3_timer_stop(tp);
11298
11299         tg3_hwmon_close(tp);
11300
11301         tg3_phy_stop(tp);
11302
11303         tg3_full_lock(tp, 1);
11304
11305         tg3_disable_ints(tp);
11306
11307         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11308         tg3_free_rings(tp);
11309         tg3_flag_clear(tp, INIT_COMPLETE);
11310
11311         tg3_full_unlock(tp);
11312
11313         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11314                 struct tg3_napi *tnapi = &tp->napi[i];
11315                 free_irq(tnapi->irq_vec, tnapi);
11316         }
11317
11318         tg3_ints_fini(tp);
11319
11320         tg3_napi_fini(tp);
11321
11322         tg3_free_consistent(tp);
11323 }
11324
11325 static int tg3_open(struct net_device *dev)
11326 {
11327         struct tg3 *tp = netdev_priv(dev);
11328         int err;
11329
11330         if (tp->fw_needed) {
11331                 err = tg3_request_firmware(tp);
11332                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11333                         if (err) {
11334                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11335                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11336                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11337                                 netdev_warn(tp->dev, "EEE capability restored\n");
11338                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11339                         }
11340                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11341                         if (err)
11342                                 return err;
11343                 } else if (err) {
11344                         netdev_warn(tp->dev, "TSO capability disabled\n");
11345                         tg3_flag_clear(tp, TSO_CAPABLE);
11346                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11347                         netdev_notice(tp->dev, "TSO capability restored\n");
11348                         tg3_flag_set(tp, TSO_CAPABLE);
11349                 }
11350         }
11351
11352         tg3_carrier_off(tp);
11353
11354         err = tg3_power_up(tp);
11355         if (err)
11356                 return err;
11357
11358         tg3_full_lock(tp, 0);
11359
11360         tg3_disable_ints(tp);
11361         tg3_flag_clear(tp, INIT_COMPLETE);
11362
11363         tg3_full_unlock(tp);
11364
11365         err = tg3_start(tp,
11366                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11367                         true, true);
11368         if (err) {
11369                 tg3_frob_aux_power(tp, false);
11370                 pci_set_power_state(tp->pdev, PCI_D3hot);
11371         }
11372
11373         if (tg3_flag(tp, PTP_CAPABLE)) {
11374                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11375                                                    &tp->pdev->dev);
11376                 if (IS_ERR(tp->ptp_clock))
11377                         tp->ptp_clock = NULL;
11378         }
11379
11380         return err;
11381 }
11382
11383 static int tg3_close(struct net_device *dev)
11384 {
11385         struct tg3 *tp = netdev_priv(dev);
11386
11387         tg3_ptp_fini(tp);
11388
11389         tg3_stop(tp);
11390
11391         /* Clear stats across close / open calls */
11392         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11393         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11394
11395         tg3_power_down(tp);
11396
11397         tg3_carrier_off(tp);
11398
11399         return 0;
11400 }
11401
11402 static inline u64 get_stat64(tg3_stat64_t *val)
11403 {
11404        return ((u64)val->high << 32) | ((u64)val->low);
11405 }
11406
11407 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11408 {
11409         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11410
11411         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11412             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11413              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11414                 u32 val;
11415
11416                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11417                         tg3_writephy(tp, MII_TG3_TEST1,
11418                                      val | MII_TG3_TEST1_CRC_EN);
11419                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11420                 } else
11421                         val = 0;
11422
11423                 tp->phy_crc_errors += val;
11424
11425                 return tp->phy_crc_errors;
11426         }
11427
11428         return get_stat64(&hw_stats->rx_fcs_errors);
11429 }
11430
11431 #define ESTAT_ADD(member) \
11432         estats->member =        old_estats->member + \
11433                                 get_stat64(&hw_stats->member)
11434
11435 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11436 {
11437         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11438         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11439
11440         ESTAT_ADD(rx_octets);
11441         ESTAT_ADD(rx_fragments);
11442         ESTAT_ADD(rx_ucast_packets);
11443         ESTAT_ADD(rx_mcast_packets);
11444         ESTAT_ADD(rx_bcast_packets);
11445         ESTAT_ADD(rx_fcs_errors);
11446         ESTAT_ADD(rx_align_errors);
11447         ESTAT_ADD(rx_xon_pause_rcvd);
11448         ESTAT_ADD(rx_xoff_pause_rcvd);
11449         ESTAT_ADD(rx_mac_ctrl_rcvd);
11450         ESTAT_ADD(rx_xoff_entered);
11451         ESTAT_ADD(rx_frame_too_long_errors);
11452         ESTAT_ADD(rx_jabbers);
11453         ESTAT_ADD(rx_undersize_packets);
11454         ESTAT_ADD(rx_in_length_errors);
11455         ESTAT_ADD(rx_out_length_errors);
11456         ESTAT_ADD(rx_64_or_less_octet_packets);
11457         ESTAT_ADD(rx_65_to_127_octet_packets);
11458         ESTAT_ADD(rx_128_to_255_octet_packets);
11459         ESTAT_ADD(rx_256_to_511_octet_packets);
11460         ESTAT_ADD(rx_512_to_1023_octet_packets);
11461         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11462         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11463         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11464         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11465         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11466
11467         ESTAT_ADD(tx_octets);
11468         ESTAT_ADD(tx_collisions);
11469         ESTAT_ADD(tx_xon_sent);
11470         ESTAT_ADD(tx_xoff_sent);
11471         ESTAT_ADD(tx_flow_control);
11472         ESTAT_ADD(tx_mac_errors);
11473         ESTAT_ADD(tx_single_collisions);
11474         ESTAT_ADD(tx_mult_collisions);
11475         ESTAT_ADD(tx_deferred);
11476         ESTAT_ADD(tx_excessive_collisions);
11477         ESTAT_ADD(tx_late_collisions);
11478         ESTAT_ADD(tx_collide_2times);
11479         ESTAT_ADD(tx_collide_3times);
11480         ESTAT_ADD(tx_collide_4times);
11481         ESTAT_ADD(tx_collide_5times);
11482         ESTAT_ADD(tx_collide_6times);
11483         ESTAT_ADD(tx_collide_7times);
11484         ESTAT_ADD(tx_collide_8times);
11485         ESTAT_ADD(tx_collide_9times);
11486         ESTAT_ADD(tx_collide_10times);
11487         ESTAT_ADD(tx_collide_11times);
11488         ESTAT_ADD(tx_collide_12times);
11489         ESTAT_ADD(tx_collide_13times);
11490         ESTAT_ADD(tx_collide_14times);
11491         ESTAT_ADD(tx_collide_15times);
11492         ESTAT_ADD(tx_ucast_packets);
11493         ESTAT_ADD(tx_mcast_packets);
11494         ESTAT_ADD(tx_bcast_packets);
11495         ESTAT_ADD(tx_carrier_sense_errors);
11496         ESTAT_ADD(tx_discards);
11497         ESTAT_ADD(tx_errors);
11498
11499         ESTAT_ADD(dma_writeq_full);
11500         ESTAT_ADD(dma_write_prioq_full);
11501         ESTAT_ADD(rxbds_empty);
11502         ESTAT_ADD(rx_discards);
11503         ESTAT_ADD(rx_errors);
11504         ESTAT_ADD(rx_threshold_hit);
11505
11506         ESTAT_ADD(dma_readq_full);
11507         ESTAT_ADD(dma_read_prioq_full);
11508         ESTAT_ADD(tx_comp_queue_full);
11509
11510         ESTAT_ADD(ring_set_send_prod_index);
11511         ESTAT_ADD(ring_status_update);
11512         ESTAT_ADD(nic_irqs);
11513         ESTAT_ADD(nic_avoided_irqs);
11514         ESTAT_ADD(nic_tx_threshold_hit);
11515
11516         ESTAT_ADD(mbuf_lwm_thresh_hit);
11517 }
11518
11519 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11520 {
11521         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11522         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11523
11524         stats->rx_packets = old_stats->rx_packets +
11525                 get_stat64(&hw_stats->rx_ucast_packets) +
11526                 get_stat64(&hw_stats->rx_mcast_packets) +
11527                 get_stat64(&hw_stats->rx_bcast_packets);
11528
11529         stats->tx_packets = old_stats->tx_packets +
11530                 get_stat64(&hw_stats->tx_ucast_packets) +
11531                 get_stat64(&hw_stats->tx_mcast_packets) +
11532                 get_stat64(&hw_stats->tx_bcast_packets);
11533
11534         stats->rx_bytes = old_stats->rx_bytes +
11535                 get_stat64(&hw_stats->rx_octets);
11536         stats->tx_bytes = old_stats->tx_bytes +
11537                 get_stat64(&hw_stats->tx_octets);
11538
11539         stats->rx_errors = old_stats->rx_errors +
11540                 get_stat64(&hw_stats->rx_errors);
11541         stats->tx_errors = old_stats->tx_errors +
11542                 get_stat64(&hw_stats->tx_errors) +
11543                 get_stat64(&hw_stats->tx_mac_errors) +
11544                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11545                 get_stat64(&hw_stats->tx_discards);
11546
11547         stats->multicast = old_stats->multicast +
11548                 get_stat64(&hw_stats->rx_mcast_packets);
11549         stats->collisions = old_stats->collisions +
11550                 get_stat64(&hw_stats->tx_collisions);
11551
11552         stats->rx_length_errors = old_stats->rx_length_errors +
11553                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11554                 get_stat64(&hw_stats->rx_undersize_packets);
11555
11556         stats->rx_over_errors = old_stats->rx_over_errors +
11557                 get_stat64(&hw_stats->rxbds_empty);
11558         stats->rx_frame_errors = old_stats->rx_frame_errors +
11559                 get_stat64(&hw_stats->rx_align_errors);
11560         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11561                 get_stat64(&hw_stats->tx_discards);
11562         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11563                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11564
11565         stats->rx_crc_errors = old_stats->rx_crc_errors +
11566                 tg3_calc_crc_errors(tp);
11567
11568         stats->rx_missed_errors = old_stats->rx_missed_errors +
11569                 get_stat64(&hw_stats->rx_discards);
11570
11571         stats->rx_dropped = tp->rx_dropped;
11572         stats->tx_dropped = tp->tx_dropped;
11573 }
11574
11575 static int tg3_get_regs_len(struct net_device *dev)
11576 {
11577         return TG3_REG_BLK_SIZE;
11578 }
11579
11580 static void tg3_get_regs(struct net_device *dev,
11581                 struct ethtool_regs *regs, void *_p)
11582 {
11583         struct tg3 *tp = netdev_priv(dev);
11584
11585         regs->version = 0;
11586
11587         memset(_p, 0, TG3_REG_BLK_SIZE);
11588
11589         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11590                 return;
11591
11592         tg3_full_lock(tp, 0);
11593
11594         tg3_dump_legacy_regs(tp, (u32 *)_p);
11595
11596         tg3_full_unlock(tp);
11597 }
11598
11599 static int tg3_get_eeprom_len(struct net_device *dev)
11600 {
11601         struct tg3 *tp = netdev_priv(dev);
11602
11603         return tp->nvram_size;
11604 }
11605
11606 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11607 {
11608         struct tg3 *tp = netdev_priv(dev);
11609         int ret;
11610         u8  *pd;
11611         u32 i, offset, len, b_offset, b_count;
11612         __be32 val;
11613
11614         if (tg3_flag(tp, NO_NVRAM))
11615                 return -EINVAL;
11616
11617         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11618                 return -EAGAIN;
11619
11620         offset = eeprom->offset;
11621         len = eeprom->len;
11622         eeprom->len = 0;
11623
11624         eeprom->magic = TG3_EEPROM_MAGIC;
11625
11626         if (offset & 3) {
11627                 /* adjustments to start on required 4 byte boundary */
11628                 b_offset = offset & 3;
11629                 b_count = 4 - b_offset;
11630                 if (b_count > len) {
11631                         /* i.e. offset=1 len=2 */
11632                         b_count = len;
11633                 }
11634                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11635                 if (ret)
11636                         return ret;
11637                 memcpy(data, ((char *)&val) + b_offset, b_count);
11638                 len -= b_count;
11639                 offset += b_count;
11640                 eeprom->len += b_count;
11641         }
11642
11643         /* read bytes up to the last 4 byte boundary */
11644         pd = &data[eeprom->len];
11645         for (i = 0; i < (len - (len & 3)); i += 4) {
11646                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11647                 if (ret) {
11648                         eeprom->len += i;
11649                         return ret;
11650                 }
11651                 memcpy(pd + i, &val, 4);
11652         }
11653         eeprom->len += i;
11654
11655         if (len & 3) {
11656                 /* read last bytes not ending on 4 byte boundary */
11657                 pd = &data[eeprom->len];
11658                 b_count = len & 3;
11659                 b_offset = offset + len - b_count;
11660                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11661                 if (ret)
11662                         return ret;
11663                 memcpy(pd, &val, b_count);
11664                 eeprom->len += b_count;
11665         }
11666         return 0;
11667 }
11668
11669 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11670 {
11671         struct tg3 *tp = netdev_priv(dev);
11672         int ret;
11673         u32 offset, len, b_offset, odd_len;
11674         u8 *buf;
11675         __be32 start, end;
11676
11677         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11678                 return -EAGAIN;
11679
11680         if (tg3_flag(tp, NO_NVRAM) ||
11681             eeprom->magic != TG3_EEPROM_MAGIC)
11682                 return -EINVAL;
11683
11684         offset = eeprom->offset;
11685         len = eeprom->len;
11686
11687         if ((b_offset = (offset & 3))) {
11688                 /* adjustments to start on required 4 byte boundary */
11689                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11690                 if (ret)
11691                         return ret;
11692                 len += b_offset;
11693                 offset &= ~3;
11694                 if (len < 4)
11695                         len = 4;
11696         }
11697
11698         odd_len = 0;
11699         if (len & 3) {
11700                 /* adjustments to end on required 4 byte boundary */
11701                 odd_len = 1;
11702                 len = (len + 3) & ~3;
11703                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11704                 if (ret)
11705                         return ret;
11706         }
11707
11708         buf = data;
11709         if (b_offset || odd_len) {
11710                 buf = kmalloc(len, GFP_KERNEL);
11711                 if (!buf)
11712                         return -ENOMEM;
11713                 if (b_offset)
11714                         memcpy(buf, &start, 4);
11715                 if (odd_len)
11716                         memcpy(buf+len-4, &end, 4);
11717                 memcpy(buf + b_offset, data, eeprom->len);
11718         }
11719
11720         ret = tg3_nvram_write_block(tp, offset, len, buf);
11721
11722         if (buf != data)
11723                 kfree(buf);
11724
11725         return ret;
11726 }
11727
11728 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11729 {
11730         struct tg3 *tp = netdev_priv(dev);
11731
11732         if (tg3_flag(tp, USE_PHYLIB)) {
11733                 struct phy_device *phydev;
11734                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11735                         return -EAGAIN;
11736                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11737                 return phy_ethtool_gset(phydev, cmd);
11738         }
11739
11740         cmd->supported = (SUPPORTED_Autoneg);
11741
11742         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11743                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11744                                    SUPPORTED_1000baseT_Full);
11745
11746         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11747                 cmd->supported |= (SUPPORTED_100baseT_Half |
11748                                   SUPPORTED_100baseT_Full |
11749                                   SUPPORTED_10baseT_Half |
11750                                   SUPPORTED_10baseT_Full |
11751                                   SUPPORTED_TP);
11752                 cmd->port = PORT_TP;
11753         } else {
11754                 cmd->supported |= SUPPORTED_FIBRE;
11755                 cmd->port = PORT_FIBRE;
11756         }
11757
11758         cmd->advertising = tp->link_config.advertising;
11759         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11760                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11761                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11762                                 cmd->advertising |= ADVERTISED_Pause;
11763                         } else {
11764                                 cmd->advertising |= ADVERTISED_Pause |
11765                                                     ADVERTISED_Asym_Pause;
11766                         }
11767                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11768                         cmd->advertising |= ADVERTISED_Asym_Pause;
11769                 }
11770         }
11771         if (netif_running(dev) && tp->link_up) {
11772                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11773                 cmd->duplex = tp->link_config.active_duplex;
11774                 cmd->lp_advertising = tp->link_config.rmt_adv;
11775                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11776                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11777                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11778                         else
11779                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11780                 }
11781         } else {
11782                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11783                 cmd->duplex = DUPLEX_UNKNOWN;
11784                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11785         }
11786         cmd->phy_address = tp->phy_addr;
11787         cmd->transceiver = XCVR_INTERNAL;
11788         cmd->autoneg = tp->link_config.autoneg;
11789         cmd->maxtxpkt = 0;
11790         cmd->maxrxpkt = 0;
11791         return 0;
11792 }
11793
11794 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11795 {
11796         struct tg3 *tp = netdev_priv(dev);
11797         u32 speed = ethtool_cmd_speed(cmd);
11798
11799         if (tg3_flag(tp, USE_PHYLIB)) {
11800                 struct phy_device *phydev;
11801                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11802                         return -EAGAIN;
11803                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11804                 return phy_ethtool_sset(phydev, cmd);
11805         }
11806
11807         if (cmd->autoneg != AUTONEG_ENABLE &&
11808             cmd->autoneg != AUTONEG_DISABLE)
11809                 return -EINVAL;
11810
11811         if (cmd->autoneg == AUTONEG_DISABLE &&
11812             cmd->duplex != DUPLEX_FULL &&
11813             cmd->duplex != DUPLEX_HALF)
11814                 return -EINVAL;
11815
11816         if (cmd->autoneg == AUTONEG_ENABLE) {
11817                 u32 mask = ADVERTISED_Autoneg |
11818                            ADVERTISED_Pause |
11819                            ADVERTISED_Asym_Pause;
11820
11821                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11822                         mask |= ADVERTISED_1000baseT_Half |
11823                                 ADVERTISED_1000baseT_Full;
11824
11825                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11826                         mask |= ADVERTISED_100baseT_Half |
11827                                 ADVERTISED_100baseT_Full |
11828                                 ADVERTISED_10baseT_Half |
11829                                 ADVERTISED_10baseT_Full |
11830                                 ADVERTISED_TP;
11831                 else
11832                         mask |= ADVERTISED_FIBRE;
11833
11834                 if (cmd->advertising & ~mask)
11835                         return -EINVAL;
11836
11837                 mask &= (ADVERTISED_1000baseT_Half |
11838                          ADVERTISED_1000baseT_Full |
11839                          ADVERTISED_100baseT_Half |
11840                          ADVERTISED_100baseT_Full |
11841                          ADVERTISED_10baseT_Half |
11842                          ADVERTISED_10baseT_Full);
11843
11844                 cmd->advertising &= mask;
11845         } else {
11846                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11847                         if (speed != SPEED_1000)
11848                                 return -EINVAL;
11849
11850                         if (cmd->duplex != DUPLEX_FULL)
11851                                 return -EINVAL;
11852                 } else {
11853                         if (speed != SPEED_100 &&
11854                             speed != SPEED_10)
11855                                 return -EINVAL;
11856                 }
11857         }
11858
11859         tg3_full_lock(tp, 0);
11860
11861         tp->link_config.autoneg = cmd->autoneg;
11862         if (cmd->autoneg == AUTONEG_ENABLE) {
11863                 tp->link_config.advertising = (cmd->advertising |
11864                                               ADVERTISED_Autoneg);
11865                 tp->link_config.speed = SPEED_UNKNOWN;
11866                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11867         } else {
11868                 tp->link_config.advertising = 0;
11869                 tp->link_config.speed = speed;
11870                 tp->link_config.duplex = cmd->duplex;
11871         }
11872
11873         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11874
11875         tg3_warn_mgmt_link_flap(tp);
11876
11877         if (netif_running(dev))
11878                 tg3_setup_phy(tp, true);
11879
11880         tg3_full_unlock(tp);
11881
11882         return 0;
11883 }
11884
11885 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11886 {
11887         struct tg3 *tp = netdev_priv(dev);
11888
11889         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11890         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11891         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11892         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11893 }
11894
11895 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11896 {
11897         struct tg3 *tp = netdev_priv(dev);
11898
11899         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11900                 wol->supported = WAKE_MAGIC;
11901         else
11902                 wol->supported = 0;
11903         wol->wolopts = 0;
11904         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11905                 wol->wolopts = WAKE_MAGIC;
11906         memset(&wol->sopass, 0, sizeof(wol->sopass));
11907 }
11908
11909 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11910 {
11911         struct tg3 *tp = netdev_priv(dev);
11912         struct device *dp = &tp->pdev->dev;
11913
11914         if (wol->wolopts & ~WAKE_MAGIC)
11915                 return -EINVAL;
11916         if ((wol->wolopts & WAKE_MAGIC) &&
11917             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11918                 return -EINVAL;
11919
11920         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11921
11922         spin_lock_bh(&tp->lock);
11923         if (device_may_wakeup(dp))
11924                 tg3_flag_set(tp, WOL_ENABLE);
11925         else
11926                 tg3_flag_clear(tp, WOL_ENABLE);
11927         spin_unlock_bh(&tp->lock);
11928
11929         return 0;
11930 }
11931
11932 static u32 tg3_get_msglevel(struct net_device *dev)
11933 {
11934         struct tg3 *tp = netdev_priv(dev);
11935         return tp->msg_enable;
11936 }
11937
11938 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11939 {
11940         struct tg3 *tp = netdev_priv(dev);
11941         tp->msg_enable = value;
11942 }
11943
11944 static int tg3_nway_reset(struct net_device *dev)
11945 {
11946         struct tg3 *tp = netdev_priv(dev);
11947         int r;
11948
11949         if (!netif_running(dev))
11950                 return -EAGAIN;
11951
11952         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11953                 return -EINVAL;
11954
11955         tg3_warn_mgmt_link_flap(tp);
11956
11957         if (tg3_flag(tp, USE_PHYLIB)) {
11958                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11959                         return -EAGAIN;
11960                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11961         } else {
11962                 u32 bmcr;
11963
11964                 spin_lock_bh(&tp->lock);
11965                 r = -EINVAL;
11966                 tg3_readphy(tp, MII_BMCR, &bmcr);
11967                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11968                     ((bmcr & BMCR_ANENABLE) ||
11969                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11970                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11971                                                    BMCR_ANENABLE);
11972                         r = 0;
11973                 }
11974                 spin_unlock_bh(&tp->lock);
11975         }
11976
11977         return r;
11978 }
11979
11980 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11981 {
11982         struct tg3 *tp = netdev_priv(dev);
11983
11984         ering->rx_max_pending = tp->rx_std_ring_mask;
11985         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11986                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11987         else
11988                 ering->rx_jumbo_max_pending = 0;
11989
11990         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11991
11992         ering->rx_pending = tp->rx_pending;
11993         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11994                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11995         else
11996                 ering->rx_jumbo_pending = 0;
11997
11998         ering->tx_pending = tp->napi[0].tx_pending;
11999 }
12000
12001 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12002 {
12003         struct tg3 *tp = netdev_priv(dev);
12004         int i, irq_sync = 0, err = 0;
12005
12006         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12007             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12008             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12009             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12010             (tg3_flag(tp, TSO_BUG) &&
12011              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12012                 return -EINVAL;
12013
12014         if (netif_running(dev)) {
12015                 tg3_phy_stop(tp);
12016                 tg3_netif_stop(tp);
12017                 irq_sync = 1;
12018         }
12019
12020         tg3_full_lock(tp, irq_sync);
12021
12022         tp->rx_pending = ering->rx_pending;
12023
12024         if (tg3_flag(tp, MAX_RXPEND_64) &&
12025             tp->rx_pending > 63)
12026                 tp->rx_pending = 63;
12027         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12028
12029         for (i = 0; i < tp->irq_max; i++)
12030                 tp->napi[i].tx_pending = ering->tx_pending;
12031
12032         if (netif_running(dev)) {
12033                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12034                 err = tg3_restart_hw(tp, false);
12035                 if (!err)
12036                         tg3_netif_start(tp);
12037         }
12038
12039         tg3_full_unlock(tp);
12040
12041         if (irq_sync && !err)
12042                 tg3_phy_start(tp);
12043
12044         return err;
12045 }
12046
12047 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12048 {
12049         struct tg3 *tp = netdev_priv(dev);
12050
12051         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12052
12053         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12054                 epause->rx_pause = 1;
12055         else
12056                 epause->rx_pause = 0;
12057
12058         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12059                 epause->tx_pause = 1;
12060         else
12061                 epause->tx_pause = 0;
12062 }
12063
12064 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12065 {
12066         struct tg3 *tp = netdev_priv(dev);
12067         int err = 0;
12068
12069         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12070                 tg3_warn_mgmt_link_flap(tp);
12071
12072         if (tg3_flag(tp, USE_PHYLIB)) {
12073                 u32 newadv;
12074                 struct phy_device *phydev;
12075
12076                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12077
12078                 if (!(phydev->supported & SUPPORTED_Pause) ||
12079                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12080                      (epause->rx_pause != epause->tx_pause)))
12081                         return -EINVAL;
12082
12083                 tp->link_config.flowctrl = 0;
12084                 if (epause->rx_pause) {
12085                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12086
12087                         if (epause->tx_pause) {
12088                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12089                                 newadv = ADVERTISED_Pause;
12090                         } else
12091                                 newadv = ADVERTISED_Pause |
12092                                          ADVERTISED_Asym_Pause;
12093                 } else if (epause->tx_pause) {
12094                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12095                         newadv = ADVERTISED_Asym_Pause;
12096                 } else
12097                         newadv = 0;
12098
12099                 if (epause->autoneg)
12100                         tg3_flag_set(tp, PAUSE_AUTONEG);
12101                 else
12102                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12103
12104                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12105                         u32 oldadv = phydev->advertising &
12106                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12107                         if (oldadv != newadv) {
12108                                 phydev->advertising &=
12109                                         ~(ADVERTISED_Pause |
12110                                           ADVERTISED_Asym_Pause);
12111                                 phydev->advertising |= newadv;
12112                                 if (phydev->autoneg) {
12113                                         /*
12114                                          * Always renegotiate the link to
12115                                          * inform our link partner of our
12116                                          * flow control settings, even if the
12117                                          * flow control is forced.  Let
12118                                          * tg3_adjust_link() do the final
12119                                          * flow control setup.
12120                                          */
12121                                         return phy_start_aneg(phydev);
12122                                 }
12123                         }
12124
12125                         if (!epause->autoneg)
12126                                 tg3_setup_flow_control(tp, 0, 0);
12127                 } else {
12128                         tp->link_config.advertising &=
12129                                         ~(ADVERTISED_Pause |
12130                                           ADVERTISED_Asym_Pause);
12131                         tp->link_config.advertising |= newadv;
12132                 }
12133         } else {
12134                 int irq_sync = 0;
12135
12136                 if (netif_running(dev)) {
12137                         tg3_netif_stop(tp);
12138                         irq_sync = 1;
12139                 }
12140
12141                 tg3_full_lock(tp, irq_sync);
12142
12143                 if (epause->autoneg)
12144                         tg3_flag_set(tp, PAUSE_AUTONEG);
12145                 else
12146                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12147                 if (epause->rx_pause)
12148                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12149                 else
12150                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12151                 if (epause->tx_pause)
12152                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12153                 else
12154                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12155
12156                 if (netif_running(dev)) {
12157                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12158                         err = tg3_restart_hw(tp, false);
12159                         if (!err)
12160                                 tg3_netif_start(tp);
12161                 }
12162
12163                 tg3_full_unlock(tp);
12164         }
12165
12166         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12167
12168         return err;
12169 }
12170
12171 static int tg3_get_sset_count(struct net_device *dev, int sset)
12172 {
12173         switch (sset) {
12174         case ETH_SS_TEST:
12175                 return TG3_NUM_TEST;
12176         case ETH_SS_STATS:
12177                 return TG3_NUM_STATS;
12178         default:
12179                 return -EOPNOTSUPP;
12180         }
12181 }
12182
12183 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12184                          u32 *rules __always_unused)
12185 {
12186         struct tg3 *tp = netdev_priv(dev);
12187
12188         if (!tg3_flag(tp, SUPPORT_MSIX))
12189                 return -EOPNOTSUPP;
12190
12191         switch (info->cmd) {
12192         case ETHTOOL_GRXRINGS:
12193                 if (netif_running(tp->dev))
12194                         info->data = tp->rxq_cnt;
12195                 else {
12196                         info->data = num_online_cpus();
12197                         if (info->data > TG3_RSS_MAX_NUM_QS)
12198                                 info->data = TG3_RSS_MAX_NUM_QS;
12199                 }
12200
12201                 /* The first interrupt vector only
12202                  * handles link interrupts.
12203                  */
12204                 info->data -= 1;
12205                 return 0;
12206
12207         default:
12208                 return -EOPNOTSUPP;
12209         }
12210 }
12211
12212 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12213 {
12214         u32 size = 0;
12215         struct tg3 *tp = netdev_priv(dev);
12216
12217         if (tg3_flag(tp, SUPPORT_MSIX))
12218                 size = TG3_RSS_INDIR_TBL_SIZE;
12219
12220         return size;
12221 }
12222
12223 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12224 {
12225         struct tg3 *tp = netdev_priv(dev);
12226         int i;
12227
12228         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12229                 indir[i] = tp->rss_ind_tbl[i];
12230
12231         return 0;
12232 }
12233
12234 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12235 {
12236         struct tg3 *tp = netdev_priv(dev);
12237         size_t i;
12238
12239         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12240                 tp->rss_ind_tbl[i] = indir[i];
12241
12242         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12243                 return 0;
12244
12245         /* It is legal to write the indirection
12246          * table while the device is running.
12247          */
12248         tg3_full_lock(tp, 0);
12249         tg3_rss_write_indir_tbl(tp);
12250         tg3_full_unlock(tp);
12251
12252         return 0;
12253 }
12254
12255 static void tg3_get_channels(struct net_device *dev,
12256                              struct ethtool_channels *channel)
12257 {
12258         struct tg3 *tp = netdev_priv(dev);
12259         u32 deflt_qs = netif_get_num_default_rss_queues();
12260
12261         channel->max_rx = tp->rxq_max;
12262         channel->max_tx = tp->txq_max;
12263
12264         if (netif_running(dev)) {
12265                 channel->rx_count = tp->rxq_cnt;
12266                 channel->tx_count = tp->txq_cnt;
12267         } else {
12268                 if (tp->rxq_req)
12269                         channel->rx_count = tp->rxq_req;
12270                 else
12271                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12272
12273                 if (tp->txq_req)
12274                         channel->tx_count = tp->txq_req;
12275                 else
12276                         channel->tx_count = min(deflt_qs, tp->txq_max);
12277         }
12278 }
12279
12280 static int tg3_set_channels(struct net_device *dev,
12281                             struct ethtool_channels *channel)
12282 {
12283         struct tg3 *tp = netdev_priv(dev);
12284
12285         if (!tg3_flag(tp, SUPPORT_MSIX))
12286                 return -EOPNOTSUPP;
12287
12288         if (channel->rx_count > tp->rxq_max ||
12289             channel->tx_count > tp->txq_max)
12290                 return -EINVAL;
12291
12292         tp->rxq_req = channel->rx_count;
12293         tp->txq_req = channel->tx_count;
12294
12295         if (!netif_running(dev))
12296                 return 0;
12297
12298         tg3_stop(tp);
12299
12300         tg3_carrier_off(tp);
12301
12302         tg3_start(tp, true, false, false);
12303
12304         return 0;
12305 }
12306
12307 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12308 {
12309         switch (stringset) {
12310         case ETH_SS_STATS:
12311                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12312                 break;
12313         case ETH_SS_TEST:
12314                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12315                 break;
12316         default:
12317                 WARN_ON(1);     /* we need a WARN() */
12318                 break;
12319         }
12320 }
12321
12322 static int tg3_set_phys_id(struct net_device *dev,
12323                             enum ethtool_phys_id_state state)
12324 {
12325         struct tg3 *tp = netdev_priv(dev);
12326
12327         if (!netif_running(tp->dev))
12328                 return -EAGAIN;
12329
12330         switch (state) {
12331         case ETHTOOL_ID_ACTIVE:
12332                 return 1;       /* cycle on/off once per second */
12333
12334         case ETHTOOL_ID_ON:
12335                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12336                      LED_CTRL_1000MBPS_ON |
12337                      LED_CTRL_100MBPS_ON |
12338                      LED_CTRL_10MBPS_ON |
12339                      LED_CTRL_TRAFFIC_OVERRIDE |
12340                      LED_CTRL_TRAFFIC_BLINK |
12341                      LED_CTRL_TRAFFIC_LED);
12342                 break;
12343
12344         case ETHTOOL_ID_OFF:
12345                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12346                      LED_CTRL_TRAFFIC_OVERRIDE);
12347                 break;
12348
12349         case ETHTOOL_ID_INACTIVE:
12350                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12351                 break;
12352         }
12353
12354         return 0;
12355 }
12356
12357 static void tg3_get_ethtool_stats(struct net_device *dev,
12358                                    struct ethtool_stats *estats, u64 *tmp_stats)
12359 {
12360         struct tg3 *tp = netdev_priv(dev);
12361
12362         if (tp->hw_stats)
12363                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12364         else
12365                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12366 }
12367
12368 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12369 {
12370         int i;
12371         __be32 *buf;
12372         u32 offset = 0, len = 0;
12373         u32 magic, val;
12374
12375         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12376                 return NULL;
12377
12378         if (magic == TG3_EEPROM_MAGIC) {
12379                 for (offset = TG3_NVM_DIR_START;
12380                      offset < TG3_NVM_DIR_END;
12381                      offset += TG3_NVM_DIRENT_SIZE) {
12382                         if (tg3_nvram_read(tp, offset, &val))
12383                                 return NULL;
12384
12385                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12386                             TG3_NVM_DIRTYPE_EXTVPD)
12387                                 break;
12388                 }
12389
12390                 if (offset != TG3_NVM_DIR_END) {
12391                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12392                         if (tg3_nvram_read(tp, offset + 4, &offset))
12393                                 return NULL;
12394
12395                         offset = tg3_nvram_logical_addr(tp, offset);
12396                 }
12397         }
12398
12399         if (!offset || !len) {
12400                 offset = TG3_NVM_VPD_OFF;
12401                 len = TG3_NVM_VPD_LEN;
12402         }
12403
12404         buf = kmalloc(len, GFP_KERNEL);
12405         if (buf == NULL)
12406                 return NULL;
12407
12408         if (magic == TG3_EEPROM_MAGIC) {
12409                 for (i = 0; i < len; i += 4) {
12410                         /* The data is in little-endian format in NVRAM.
12411                          * Use the big-endian read routines to preserve
12412                          * the byte order as it exists in NVRAM.
12413                          */
12414                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12415                                 goto error;
12416                 }
12417         } else {
12418                 u8 *ptr;
12419                 ssize_t cnt;
12420                 unsigned int pos = 0;
12421
12422                 ptr = (u8 *)&buf[0];
12423                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12424                         cnt = pci_read_vpd(tp->pdev, pos,
12425                                            len - pos, ptr);
12426                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12427                                 cnt = 0;
12428                         else if (cnt < 0)
12429                                 goto error;
12430                 }
12431                 if (pos != len)
12432                         goto error;
12433         }
12434
12435         *vpdlen = len;
12436
12437         return buf;
12438
12439 error:
12440         kfree(buf);
12441         return NULL;
12442 }
12443
12444 #define NVRAM_TEST_SIZE 0x100
12445 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12446 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12447 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12448 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12449 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12450 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12451 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12452 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12453
12454 static int tg3_test_nvram(struct tg3 *tp)
12455 {
12456         u32 csum, magic, len;
12457         __be32 *buf;
12458         int i, j, k, err = 0, size;
12459
12460         if (tg3_flag(tp, NO_NVRAM))
12461                 return 0;
12462
12463         if (tg3_nvram_read(tp, 0, &magic) != 0)
12464                 return -EIO;
12465
12466         if (magic == TG3_EEPROM_MAGIC)
12467                 size = NVRAM_TEST_SIZE;
12468         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12469                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12470                     TG3_EEPROM_SB_FORMAT_1) {
12471                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12472                         case TG3_EEPROM_SB_REVISION_0:
12473                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12474                                 break;
12475                         case TG3_EEPROM_SB_REVISION_2:
12476                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12477                                 break;
12478                         case TG3_EEPROM_SB_REVISION_3:
12479                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12480                                 break;
12481                         case TG3_EEPROM_SB_REVISION_4:
12482                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12483                                 break;
12484                         case TG3_EEPROM_SB_REVISION_5:
12485                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12486                                 break;
12487                         case TG3_EEPROM_SB_REVISION_6:
12488                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12489                                 break;
12490                         default:
12491                                 return -EIO;
12492                         }
12493                 } else
12494                         return 0;
12495         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12496                 size = NVRAM_SELFBOOT_HW_SIZE;
12497         else
12498                 return -EIO;
12499
12500         buf = kmalloc(size, GFP_KERNEL);
12501         if (buf == NULL)
12502                 return -ENOMEM;
12503
12504         err = -EIO;
12505         for (i = 0, j = 0; i < size; i += 4, j++) {
12506                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12507                 if (err)
12508                         break;
12509         }
12510         if (i < size)
12511                 goto out;
12512
12513         /* Selfboot format */
12514         magic = be32_to_cpu(buf[0]);
12515         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12516             TG3_EEPROM_MAGIC_FW) {
12517                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12518
12519                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12520                     TG3_EEPROM_SB_REVISION_2) {
12521                         /* For rev 2, the csum doesn't include the MBA. */
12522                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12523                                 csum8 += buf8[i];
12524                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12525                                 csum8 += buf8[i];
12526                 } else {
12527                         for (i = 0; i < size; i++)
12528                                 csum8 += buf8[i];
12529                 }
12530
12531                 if (csum8 == 0) {
12532                         err = 0;
12533                         goto out;
12534                 }
12535
12536                 err = -EIO;
12537                 goto out;
12538         }
12539
12540         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12541             TG3_EEPROM_MAGIC_HW) {
12542                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12543                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12544                 u8 *buf8 = (u8 *) buf;
12545
12546                 /* Separate the parity bits and the data bytes.  */
12547                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12548                         if ((i == 0) || (i == 8)) {
12549                                 int l;
12550                                 u8 msk;
12551
12552                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12553                                         parity[k++] = buf8[i] & msk;
12554                                 i++;
12555                         } else if (i == 16) {
12556                                 int l;
12557                                 u8 msk;
12558
12559                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12560                                         parity[k++] = buf8[i] & msk;
12561                                 i++;
12562
12563                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12564                                         parity[k++] = buf8[i] & msk;
12565                                 i++;
12566                         }
12567                         data[j++] = buf8[i];
12568                 }
12569
12570                 err = -EIO;
12571                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12572                         u8 hw8 = hweight8(data[i]);
12573
12574                         if ((hw8 & 0x1) && parity[i])
12575                                 goto out;
12576                         else if (!(hw8 & 0x1) && !parity[i])
12577                                 goto out;
12578                 }
12579                 err = 0;
12580                 goto out;
12581         }
12582
12583         err = -EIO;
12584
12585         /* Bootstrap checksum at offset 0x10 */
12586         csum = calc_crc((unsigned char *) buf, 0x10);
12587         if (csum != le32_to_cpu(buf[0x10/4]))
12588                 goto out;
12589
12590         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12591         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12592         if (csum != le32_to_cpu(buf[0xfc/4]))
12593                 goto out;
12594
12595         kfree(buf);
12596
12597         buf = tg3_vpd_readblock(tp, &len);
12598         if (!buf)
12599                 return -ENOMEM;
12600
12601         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12602         if (i > 0) {
12603                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12604                 if (j < 0)
12605                         goto out;
12606
12607                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12608                         goto out;
12609
12610                 i += PCI_VPD_LRDT_TAG_SIZE;
12611                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12612                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12613                 if (j > 0) {
12614                         u8 csum8 = 0;
12615
12616                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12617
12618                         for (i = 0; i <= j; i++)
12619                                 csum8 += ((u8 *)buf)[i];
12620
12621                         if (csum8)
12622                                 goto out;
12623                 }
12624         }
12625
12626         err = 0;
12627
12628 out:
12629         kfree(buf);
12630         return err;
12631 }
12632
12633 #define TG3_SERDES_TIMEOUT_SEC  2
12634 #define TG3_COPPER_TIMEOUT_SEC  6
12635
12636 static int tg3_test_link(struct tg3 *tp)
12637 {
12638         int i, max;
12639
12640         if (!netif_running(tp->dev))
12641                 return -ENODEV;
12642
12643         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12644                 max = TG3_SERDES_TIMEOUT_SEC;
12645         else
12646                 max = TG3_COPPER_TIMEOUT_SEC;
12647
12648         for (i = 0; i < max; i++) {
12649                 if (tp->link_up)
12650                         return 0;
12651
12652                 if (msleep_interruptible(1000))
12653                         break;
12654         }
12655
12656         return -EIO;
12657 }
12658
12659 /* Only test the commonly used registers */
12660 static int tg3_test_registers(struct tg3 *tp)
12661 {
12662         int i, is_5705, is_5750;
12663         u32 offset, read_mask, write_mask, val, save_val, read_val;
12664         static struct {
12665                 u16 offset;
12666                 u16 flags;
12667 #define TG3_FL_5705     0x1
12668 #define TG3_FL_NOT_5705 0x2
12669 #define TG3_FL_NOT_5788 0x4
12670 #define TG3_FL_NOT_5750 0x8
12671                 u32 read_mask;
12672                 u32 write_mask;
12673         } reg_tbl[] = {
12674                 /* MAC Control Registers */
12675                 { MAC_MODE, TG3_FL_NOT_5705,
12676                         0x00000000, 0x00ef6f8c },
12677                 { MAC_MODE, TG3_FL_5705,
12678                         0x00000000, 0x01ef6b8c },
12679                 { MAC_STATUS, TG3_FL_NOT_5705,
12680                         0x03800107, 0x00000000 },
12681                 { MAC_STATUS, TG3_FL_5705,
12682                         0x03800100, 0x00000000 },
12683                 { MAC_ADDR_0_HIGH, 0x0000,
12684                         0x00000000, 0x0000ffff },
12685                 { MAC_ADDR_0_LOW, 0x0000,
12686                         0x00000000, 0xffffffff },
12687                 { MAC_RX_MTU_SIZE, 0x0000,
12688                         0x00000000, 0x0000ffff },
12689                 { MAC_TX_MODE, 0x0000,
12690                         0x00000000, 0x00000070 },
12691                 { MAC_TX_LENGTHS, 0x0000,
12692                         0x00000000, 0x00003fff },
12693                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12694                         0x00000000, 0x000007fc },
12695                 { MAC_RX_MODE, TG3_FL_5705,
12696                         0x00000000, 0x000007dc },
12697                 { MAC_HASH_REG_0, 0x0000,
12698                         0x00000000, 0xffffffff },
12699                 { MAC_HASH_REG_1, 0x0000,
12700                         0x00000000, 0xffffffff },
12701                 { MAC_HASH_REG_2, 0x0000,
12702                         0x00000000, 0xffffffff },
12703                 { MAC_HASH_REG_3, 0x0000,
12704                         0x00000000, 0xffffffff },
12705
12706                 /* Receive Data and Receive BD Initiator Control Registers. */
12707                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12708                         0x00000000, 0xffffffff },
12709                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12710                         0x00000000, 0xffffffff },
12711                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12712                         0x00000000, 0x00000003 },
12713                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12714                         0x00000000, 0xffffffff },
12715                 { RCVDBDI_STD_BD+0, 0x0000,
12716                         0x00000000, 0xffffffff },
12717                 { RCVDBDI_STD_BD+4, 0x0000,
12718                         0x00000000, 0xffffffff },
12719                 { RCVDBDI_STD_BD+8, 0x0000,
12720                         0x00000000, 0xffff0002 },
12721                 { RCVDBDI_STD_BD+0xc, 0x0000,
12722                         0x00000000, 0xffffffff },
12723
12724                 /* Receive BD Initiator Control Registers. */
12725                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12726                         0x00000000, 0xffffffff },
12727                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12728                         0x00000000, 0x000003ff },
12729                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12730                         0x00000000, 0xffffffff },
12731
12732                 /* Host Coalescing Control Registers. */
12733                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12734                         0x00000000, 0x00000004 },
12735                 { HOSTCC_MODE, TG3_FL_5705,
12736                         0x00000000, 0x000000f6 },
12737                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12738                         0x00000000, 0xffffffff },
12739                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12740                         0x00000000, 0x000003ff },
12741                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12742                         0x00000000, 0xffffffff },
12743                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12744                         0x00000000, 0x000003ff },
12745                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12746                         0x00000000, 0xffffffff },
12747                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12748                         0x00000000, 0x000000ff },
12749                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12750                         0x00000000, 0xffffffff },
12751                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12752                         0x00000000, 0x000000ff },
12753                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12754                         0x00000000, 0xffffffff },
12755                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12756                         0x00000000, 0xffffffff },
12757                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12758                         0x00000000, 0xffffffff },
12759                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12760                         0x00000000, 0x000000ff },
12761                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12762                         0x00000000, 0xffffffff },
12763                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12764                         0x00000000, 0x000000ff },
12765                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12766                         0x00000000, 0xffffffff },
12767                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12768                         0x00000000, 0xffffffff },
12769                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12770                         0x00000000, 0xffffffff },
12771                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12772                         0x00000000, 0xffffffff },
12773                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12774                         0x00000000, 0xffffffff },
12775                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12776                         0xffffffff, 0x00000000 },
12777                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12778                         0xffffffff, 0x00000000 },
12779
12780                 /* Buffer Manager Control Registers. */
12781                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12782                         0x00000000, 0x007fff80 },
12783                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12784                         0x00000000, 0x007fffff },
12785                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12786                         0x00000000, 0x0000003f },
12787                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12788                         0x00000000, 0x000001ff },
12789                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12790                         0x00000000, 0x000001ff },
12791                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12792                         0xffffffff, 0x00000000 },
12793                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12794                         0xffffffff, 0x00000000 },
12795
12796                 /* Mailbox Registers */
12797                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12798                         0x00000000, 0x000001ff },
12799                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12800                         0x00000000, 0x000001ff },
12801                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12802                         0x00000000, 0x000007ff },
12803                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12804                         0x00000000, 0x000001ff },
12805
12806                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12807         };
12808
12809         is_5705 = is_5750 = 0;
12810         if (tg3_flag(tp, 5705_PLUS)) {
12811                 is_5705 = 1;
12812                 if (tg3_flag(tp, 5750_PLUS))
12813                         is_5750 = 1;
12814         }
12815
12816         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12817                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12818                         continue;
12819
12820                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12821                         continue;
12822
12823                 if (tg3_flag(tp, IS_5788) &&
12824                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12825                         continue;
12826
12827                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12828                         continue;
12829
12830                 offset = (u32) reg_tbl[i].offset;
12831                 read_mask = reg_tbl[i].read_mask;
12832                 write_mask = reg_tbl[i].write_mask;
12833
12834                 /* Save the original register content */
12835                 save_val = tr32(offset);
12836
12837                 /* Determine the read-only value. */
12838                 read_val = save_val & read_mask;
12839
12840                 /* Write zero to the register, then make sure the read-only bits
12841                  * are not changed and the read/write bits are all zeros.
12842                  */
12843                 tw32(offset, 0);
12844
12845                 val = tr32(offset);
12846
12847                 /* Test the read-only and read/write bits. */
12848                 if (((val & read_mask) != read_val) || (val & write_mask))
12849                         goto out;
12850
12851                 /* Write ones to all the bits defined by RdMask and WrMask, then
12852                  * make sure the read-only bits are not changed and the
12853                  * read/write bits are all ones.
12854                  */
12855                 tw32(offset, read_mask | write_mask);
12856
12857                 val = tr32(offset);
12858
12859                 /* Test the read-only bits. */
12860                 if ((val & read_mask) != read_val)
12861                         goto out;
12862
12863                 /* Test the read/write bits. */
12864                 if ((val & write_mask) != write_mask)
12865                         goto out;
12866
12867                 tw32(offset, save_val);
12868         }
12869
12870         return 0;
12871
12872 out:
12873         if (netif_msg_hw(tp))
12874                 netdev_err(tp->dev,
12875                            "Register test failed at offset %x\n", offset);
12876         tw32(offset, save_val);
12877         return -EIO;
12878 }
12879
12880 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12881 {
12882         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12883         int i;
12884         u32 j;
12885
12886         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12887                 for (j = 0; j < len; j += 4) {
12888                         u32 val;
12889
12890                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12891                         tg3_read_mem(tp, offset + j, &val);
12892                         if (val != test_pattern[i])
12893                                 return -EIO;
12894                 }
12895         }
12896         return 0;
12897 }
12898
12899 static int tg3_test_memory(struct tg3 *tp)
12900 {
12901         static struct mem_entry {
12902                 u32 offset;
12903                 u32 len;
12904         } mem_tbl_570x[] = {
12905                 { 0x00000000, 0x00b50},
12906                 { 0x00002000, 0x1c000},
12907                 { 0xffffffff, 0x00000}
12908         }, mem_tbl_5705[] = {
12909                 { 0x00000100, 0x0000c},
12910                 { 0x00000200, 0x00008},
12911                 { 0x00004000, 0x00800},
12912                 { 0x00006000, 0x01000},
12913                 { 0x00008000, 0x02000},
12914                 { 0x00010000, 0x0e000},
12915                 { 0xffffffff, 0x00000}
12916         }, mem_tbl_5755[] = {
12917                 { 0x00000200, 0x00008},
12918                 { 0x00004000, 0x00800},
12919                 { 0x00006000, 0x00800},
12920                 { 0x00008000, 0x02000},
12921                 { 0x00010000, 0x0c000},
12922                 { 0xffffffff, 0x00000}
12923         }, mem_tbl_5906[] = {
12924                 { 0x00000200, 0x00008},
12925                 { 0x00004000, 0x00400},
12926                 { 0x00006000, 0x00400},
12927                 { 0x00008000, 0x01000},
12928                 { 0x00010000, 0x01000},
12929                 { 0xffffffff, 0x00000}
12930         }, mem_tbl_5717[] = {
12931                 { 0x00000200, 0x00008},
12932                 { 0x00010000, 0x0a000},
12933                 { 0x00020000, 0x13c00},
12934                 { 0xffffffff, 0x00000}
12935         }, mem_tbl_57765[] = {
12936                 { 0x00000200, 0x00008},
12937                 { 0x00004000, 0x00800},
12938                 { 0x00006000, 0x09800},
12939                 { 0x00010000, 0x0a000},
12940                 { 0xffffffff, 0x00000}
12941         };
12942         struct mem_entry *mem_tbl;
12943         int err = 0;
12944         int i;
12945
12946         if (tg3_flag(tp, 5717_PLUS))
12947                 mem_tbl = mem_tbl_5717;
12948         else if (tg3_flag(tp, 57765_CLASS) ||
12949                  tg3_asic_rev(tp) == ASIC_REV_5762)
12950                 mem_tbl = mem_tbl_57765;
12951         else if (tg3_flag(tp, 5755_PLUS))
12952                 mem_tbl = mem_tbl_5755;
12953         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12954                 mem_tbl = mem_tbl_5906;
12955         else if (tg3_flag(tp, 5705_PLUS))
12956                 mem_tbl = mem_tbl_5705;
12957         else
12958                 mem_tbl = mem_tbl_570x;
12959
12960         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12961                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12962                 if (err)
12963                         break;
12964         }
12965
12966         return err;
12967 }
12968
12969 #define TG3_TSO_MSS             500
12970
12971 #define TG3_TSO_IP_HDR_LEN      20
12972 #define TG3_TSO_TCP_HDR_LEN     20
12973 #define TG3_TSO_TCP_OPT_LEN     12
12974
12975 static const u8 tg3_tso_header[] = {
12976 0x08, 0x00,
12977 0x45, 0x00, 0x00, 0x00,
12978 0x00, 0x00, 0x40, 0x00,
12979 0x40, 0x06, 0x00, 0x00,
12980 0x0a, 0x00, 0x00, 0x01,
12981 0x0a, 0x00, 0x00, 0x02,
12982 0x0d, 0x00, 0xe0, 0x00,
12983 0x00, 0x00, 0x01, 0x00,
12984 0x00, 0x00, 0x02, 0x00,
12985 0x80, 0x10, 0x10, 0x00,
12986 0x14, 0x09, 0x00, 0x00,
12987 0x01, 0x01, 0x08, 0x0a,
12988 0x11, 0x11, 0x11, 0x11,
12989 0x11, 0x11, 0x11, 0x11,
12990 };
12991
12992 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12993 {
12994         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12995         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12996         u32 budget;
12997         struct sk_buff *skb;
12998         u8 *tx_data, *rx_data;
12999         dma_addr_t map;
13000         int num_pkts, tx_len, rx_len, i, err;
13001         struct tg3_rx_buffer_desc *desc;
13002         struct tg3_napi *tnapi, *rnapi;
13003         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13004
13005         tnapi = &tp->napi[0];
13006         rnapi = &tp->napi[0];
13007         if (tp->irq_cnt > 1) {
13008                 if (tg3_flag(tp, ENABLE_RSS))
13009                         rnapi = &tp->napi[1];
13010                 if (tg3_flag(tp, ENABLE_TSS))
13011                         tnapi = &tp->napi[1];
13012         }
13013         coal_now = tnapi->coal_now | rnapi->coal_now;
13014
13015         err = -EIO;
13016
13017         tx_len = pktsz;
13018         skb = netdev_alloc_skb(tp->dev, tx_len);
13019         if (!skb)
13020                 return -ENOMEM;
13021
13022         tx_data = skb_put(skb, tx_len);
13023         memcpy(tx_data, tp->dev->dev_addr, 6);
13024         memset(tx_data + 6, 0x0, 8);
13025
13026         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13027
13028         if (tso_loopback) {
13029                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13030
13031                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13032                               TG3_TSO_TCP_OPT_LEN;
13033
13034                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13035                        sizeof(tg3_tso_header));
13036                 mss = TG3_TSO_MSS;
13037
13038                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13039                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13040
13041                 /* Set the total length field in the IP header */
13042                 iph->tot_len = htons((u16)(mss + hdr_len));
13043
13044                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13045                               TXD_FLAG_CPU_POST_DMA);
13046
13047                 if (tg3_flag(tp, HW_TSO_1) ||
13048                     tg3_flag(tp, HW_TSO_2) ||
13049                     tg3_flag(tp, HW_TSO_3)) {
13050                         struct tcphdr *th;
13051                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13052                         th = (struct tcphdr *)&tx_data[val];
13053                         th->check = 0;
13054                 } else
13055                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13056
13057                 if (tg3_flag(tp, HW_TSO_3)) {
13058                         mss |= (hdr_len & 0xc) << 12;
13059                         if (hdr_len & 0x10)
13060                                 base_flags |= 0x00000010;
13061                         base_flags |= (hdr_len & 0x3e0) << 5;
13062                 } else if (tg3_flag(tp, HW_TSO_2))
13063                         mss |= hdr_len << 9;
13064                 else if (tg3_flag(tp, HW_TSO_1) ||
13065                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13066                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13067                 } else {
13068                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13069                 }
13070
13071                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13072         } else {
13073                 num_pkts = 1;
13074                 data_off = ETH_HLEN;
13075
13076                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13077                     tx_len > VLAN_ETH_FRAME_LEN)
13078                         base_flags |= TXD_FLAG_JMB_PKT;
13079         }
13080
13081         for (i = data_off; i < tx_len; i++)
13082                 tx_data[i] = (u8) (i & 0xff);
13083
13084         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13085         if (pci_dma_mapping_error(tp->pdev, map)) {
13086                 dev_kfree_skb(skb);
13087                 return -EIO;
13088         }
13089
13090         val = tnapi->tx_prod;
13091         tnapi->tx_buffers[val].skb = skb;
13092         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13093
13094         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13095                rnapi->coal_now);
13096
13097         udelay(10);
13098
13099         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13100
13101         budget = tg3_tx_avail(tnapi);
13102         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13103                             base_flags | TXD_FLAG_END, mss, 0)) {
13104                 tnapi->tx_buffers[val].skb = NULL;
13105                 dev_kfree_skb(skb);
13106                 return -EIO;
13107         }
13108
13109         tnapi->tx_prod++;
13110
13111         /* Sync BD data before updating mailbox */
13112         wmb();
13113
13114         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13115         tr32_mailbox(tnapi->prodmbox);
13116
13117         udelay(10);
13118
13119         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13120         for (i = 0; i < 35; i++) {
13121                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13122                        coal_now);
13123
13124                 udelay(10);
13125
13126                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13127                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13128                 if ((tx_idx == tnapi->tx_prod) &&
13129                     (rx_idx == (rx_start_idx + num_pkts)))
13130                         break;
13131         }
13132
13133         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13134         dev_kfree_skb(skb);
13135
13136         if (tx_idx != tnapi->tx_prod)
13137                 goto out;
13138
13139         if (rx_idx != rx_start_idx + num_pkts)
13140                 goto out;
13141
13142         val = data_off;
13143         while (rx_idx != rx_start_idx) {
13144                 desc = &rnapi->rx_rcb[rx_start_idx++];
13145                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13146                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13147
13148                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13149                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13150                         goto out;
13151
13152                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13153                          - ETH_FCS_LEN;
13154
13155                 if (!tso_loopback) {
13156                         if (rx_len != tx_len)
13157                                 goto out;
13158
13159                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13160                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13161                                         goto out;
13162                         } else {
13163                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13164                                         goto out;
13165                         }
13166                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13167                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13168                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13169                         goto out;
13170                 }
13171
13172                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13173                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13174                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13175                                              mapping);
13176                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13177                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13178                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13179                                              mapping);
13180                 } else
13181                         goto out;
13182
13183                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13184                                             PCI_DMA_FROMDEVICE);
13185
13186                 rx_data += TG3_RX_OFFSET(tp);
13187                 for (i = data_off; i < rx_len; i++, val++) {
13188                         if (*(rx_data + i) != (u8) (val & 0xff))
13189                                 goto out;
13190                 }
13191         }
13192
13193         err = 0;
13194
13195         /* tg3_free_rings will unmap and free the rx_data */
13196 out:
13197         return err;
13198 }
13199
13200 #define TG3_STD_LOOPBACK_FAILED         1
13201 #define TG3_JMB_LOOPBACK_FAILED         2
13202 #define TG3_TSO_LOOPBACK_FAILED         4
13203 #define TG3_LOOPBACK_FAILED \
13204         (TG3_STD_LOOPBACK_FAILED | \
13205          TG3_JMB_LOOPBACK_FAILED | \
13206          TG3_TSO_LOOPBACK_FAILED)
13207
13208 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13209 {
13210         int err = -EIO;
13211         u32 eee_cap;
13212         u32 jmb_pkt_sz = 9000;
13213
13214         if (tp->dma_limit)
13215                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13216
13217         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13218         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13219
13220         if (!netif_running(tp->dev)) {
13221                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13222                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13223                 if (do_extlpbk)
13224                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13225                 goto done;
13226         }
13227
13228         err = tg3_reset_hw(tp, true);
13229         if (err) {
13230                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13231                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13232                 if (do_extlpbk)
13233                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13234                 goto done;
13235         }
13236
13237         if (tg3_flag(tp, ENABLE_RSS)) {
13238                 int i;
13239
13240                 /* Reroute all rx packets to the 1st queue */
13241                 for (i = MAC_RSS_INDIR_TBL_0;
13242                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13243                         tw32(i, 0x0);
13244         }
13245
13246         /* HW errata - mac loopback fails in some cases on 5780.
13247          * Normal traffic and PHY loopback are not affected by
13248          * errata.  Also, the MAC loopback test is deprecated for
13249          * all newer ASIC revisions.
13250          */
13251         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13252             !tg3_flag(tp, CPMU_PRESENT)) {
13253                 tg3_mac_loopback(tp, true);
13254
13255                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13256                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13257
13258                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13259                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13260                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13261
13262                 tg3_mac_loopback(tp, false);
13263         }
13264
13265         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13266             !tg3_flag(tp, USE_PHYLIB)) {
13267                 int i;
13268
13269                 tg3_phy_lpbk_set(tp, 0, false);
13270
13271                 /* Wait for link */
13272                 for (i = 0; i < 100; i++) {
13273                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13274                                 break;
13275                         mdelay(1);
13276                 }
13277
13278                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13279                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13280                 if (tg3_flag(tp, TSO_CAPABLE) &&
13281                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13282                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13283                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13284                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13285                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13286
13287                 if (do_extlpbk) {
13288                         tg3_phy_lpbk_set(tp, 0, true);
13289
13290                         /* All link indications report up, but the hardware
13291                          * isn't really ready for about 20 msec.  Double it
13292                          * to be sure.
13293                          */
13294                         mdelay(40);
13295
13296                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13297                                 data[TG3_EXT_LOOPB_TEST] |=
13298                                                         TG3_STD_LOOPBACK_FAILED;
13299                         if (tg3_flag(tp, TSO_CAPABLE) &&
13300                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13301                                 data[TG3_EXT_LOOPB_TEST] |=
13302                                                         TG3_TSO_LOOPBACK_FAILED;
13303                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13304                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13305                                 data[TG3_EXT_LOOPB_TEST] |=
13306                                                         TG3_JMB_LOOPBACK_FAILED;
13307                 }
13308
13309                 /* Re-enable gphy autopowerdown. */
13310                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13311                         tg3_phy_toggle_apd(tp, true);
13312         }
13313
13314         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13315                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13316
13317 done:
13318         tp->phy_flags |= eee_cap;
13319
13320         return err;
13321 }
13322
13323 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13324                           u64 *data)
13325 {
13326         struct tg3 *tp = netdev_priv(dev);
13327         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13328
13329         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13330             tg3_power_up(tp)) {
13331                 etest->flags |= ETH_TEST_FL_FAILED;
13332                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13333                 return;
13334         }
13335
13336         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13337
13338         if (tg3_test_nvram(tp) != 0) {
13339                 etest->flags |= ETH_TEST_FL_FAILED;
13340                 data[TG3_NVRAM_TEST] = 1;
13341         }
13342         if (!doextlpbk && tg3_test_link(tp)) {
13343                 etest->flags |= ETH_TEST_FL_FAILED;
13344                 data[TG3_LINK_TEST] = 1;
13345         }
13346         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13347                 int err, err2 = 0, irq_sync = 0;
13348
13349                 if (netif_running(dev)) {
13350                         tg3_phy_stop(tp);
13351                         tg3_netif_stop(tp);
13352                         irq_sync = 1;
13353                 }
13354
13355                 tg3_full_lock(tp, irq_sync);
13356                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13357                 err = tg3_nvram_lock(tp);
13358                 tg3_halt_cpu(tp, RX_CPU_BASE);
13359                 if (!tg3_flag(tp, 5705_PLUS))
13360                         tg3_halt_cpu(tp, TX_CPU_BASE);
13361                 if (!err)
13362                         tg3_nvram_unlock(tp);
13363
13364                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13365                         tg3_phy_reset(tp);
13366
13367                 if (tg3_test_registers(tp) != 0) {
13368                         etest->flags |= ETH_TEST_FL_FAILED;
13369                         data[TG3_REGISTER_TEST] = 1;
13370                 }
13371
13372                 if (tg3_test_memory(tp) != 0) {
13373                         etest->flags |= ETH_TEST_FL_FAILED;
13374                         data[TG3_MEMORY_TEST] = 1;
13375                 }
13376
13377                 if (doextlpbk)
13378                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13379
13380                 if (tg3_test_loopback(tp, data, doextlpbk))
13381                         etest->flags |= ETH_TEST_FL_FAILED;
13382
13383                 tg3_full_unlock(tp);
13384
13385                 if (tg3_test_interrupt(tp) != 0) {
13386                         etest->flags |= ETH_TEST_FL_FAILED;
13387                         data[TG3_INTERRUPT_TEST] = 1;
13388                 }
13389
13390                 tg3_full_lock(tp, 0);
13391
13392                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13393                 if (netif_running(dev)) {
13394                         tg3_flag_set(tp, INIT_COMPLETE);
13395                         err2 = tg3_restart_hw(tp, true);
13396                         if (!err2)
13397                                 tg3_netif_start(tp);
13398                 }
13399
13400                 tg3_full_unlock(tp);
13401
13402                 if (irq_sync && !err2)
13403                         tg3_phy_start(tp);
13404         }
13405         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13406                 tg3_power_down(tp);
13407
13408 }
13409
13410 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13411                               struct ifreq *ifr, int cmd)
13412 {
13413         struct tg3 *tp = netdev_priv(dev);
13414         struct hwtstamp_config stmpconf;
13415
13416         if (!tg3_flag(tp, PTP_CAPABLE))
13417                 return -EINVAL;
13418
13419         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13420                 return -EFAULT;
13421
13422         if (stmpconf.flags)
13423                 return -EINVAL;
13424
13425         switch (stmpconf.tx_type) {
13426         case HWTSTAMP_TX_ON:
13427                 tg3_flag_set(tp, TX_TSTAMP_EN);
13428                 break;
13429         case HWTSTAMP_TX_OFF:
13430                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13431                 break;
13432         default:
13433                 return -ERANGE;
13434         }
13435
13436         switch (stmpconf.rx_filter) {
13437         case HWTSTAMP_FILTER_NONE:
13438                 tp->rxptpctl = 0;
13439                 break;
13440         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13441                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13442                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13443                 break;
13444         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13445                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13446                                TG3_RX_PTP_CTL_SYNC_EVNT;
13447                 break;
13448         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13449                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13450                                TG3_RX_PTP_CTL_DELAY_REQ;
13451                 break;
13452         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13453                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13454                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13455                 break;
13456         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13457                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13458                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13459                 break;
13460         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13461                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13462                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13463                 break;
13464         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13465                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13466                                TG3_RX_PTP_CTL_SYNC_EVNT;
13467                 break;
13468         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13469                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13470                                TG3_RX_PTP_CTL_SYNC_EVNT;
13471                 break;
13472         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13473                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13474                                TG3_RX_PTP_CTL_SYNC_EVNT;
13475                 break;
13476         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13477                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13478                                TG3_RX_PTP_CTL_DELAY_REQ;
13479                 break;
13480         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13481                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13482                                TG3_RX_PTP_CTL_DELAY_REQ;
13483                 break;
13484         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13485                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13486                                TG3_RX_PTP_CTL_DELAY_REQ;
13487                 break;
13488         default:
13489                 return -ERANGE;
13490         }
13491
13492         if (netif_running(dev) && tp->rxptpctl)
13493                 tw32(TG3_RX_PTP_CTL,
13494                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13495
13496         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13497                 -EFAULT : 0;
13498 }
13499
13500 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13501 {
13502         struct mii_ioctl_data *data = if_mii(ifr);
13503         struct tg3 *tp = netdev_priv(dev);
13504         int err;
13505
13506         if (tg3_flag(tp, USE_PHYLIB)) {
13507                 struct phy_device *phydev;
13508                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13509                         return -EAGAIN;
13510                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13511                 return phy_mii_ioctl(phydev, ifr, cmd);
13512         }
13513
13514         switch (cmd) {
13515         case SIOCGMIIPHY:
13516                 data->phy_id = tp->phy_addr;
13517
13518                 /* fallthru */
13519         case SIOCGMIIREG: {
13520                 u32 mii_regval;
13521
13522                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13523                         break;                  /* We have no PHY */
13524
13525                 if (!netif_running(dev))
13526                         return -EAGAIN;
13527
13528                 spin_lock_bh(&tp->lock);
13529                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13530                                     data->reg_num & 0x1f, &mii_regval);
13531                 spin_unlock_bh(&tp->lock);
13532
13533                 data->val_out = mii_regval;
13534
13535                 return err;
13536         }
13537
13538         case SIOCSMIIREG:
13539                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13540                         break;                  /* We have no PHY */
13541
13542                 if (!netif_running(dev))
13543                         return -EAGAIN;
13544
13545                 spin_lock_bh(&tp->lock);
13546                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13547                                      data->reg_num & 0x1f, data->val_in);
13548                 spin_unlock_bh(&tp->lock);
13549
13550                 return err;
13551
13552         case SIOCSHWTSTAMP:
13553                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13554
13555         default:
13556                 /* do nothing */
13557                 break;
13558         }
13559         return -EOPNOTSUPP;
13560 }
13561
13562 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13563 {
13564         struct tg3 *tp = netdev_priv(dev);
13565
13566         memcpy(ec, &tp->coal, sizeof(*ec));
13567         return 0;
13568 }
13569
13570 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13571 {
13572         struct tg3 *tp = netdev_priv(dev);
13573         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13574         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13575
13576         if (!tg3_flag(tp, 5705_PLUS)) {
13577                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13578                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13579                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13580                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13581         }
13582
13583         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13584             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13585             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13586             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13587             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13588             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13589             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13590             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13591             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13592             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13593                 return -EINVAL;
13594
13595         /* No rx interrupts will be generated if both are zero */
13596         if ((ec->rx_coalesce_usecs == 0) &&
13597             (ec->rx_max_coalesced_frames == 0))
13598                 return -EINVAL;
13599
13600         /* No tx interrupts will be generated if both are zero */
13601         if ((ec->tx_coalesce_usecs == 0) &&
13602             (ec->tx_max_coalesced_frames == 0))
13603                 return -EINVAL;
13604
13605         /* Only copy relevant parameters, ignore all others. */
13606         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13607         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13608         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13609         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13610         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13611         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13612         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13613         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13614         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13615
13616         if (netif_running(dev)) {
13617                 tg3_full_lock(tp, 0);
13618                 __tg3_set_coalesce(tp, &tp->coal);
13619                 tg3_full_unlock(tp);
13620         }
13621         return 0;
13622 }
13623
13624 static const struct ethtool_ops tg3_ethtool_ops = {
13625         .get_settings           = tg3_get_settings,
13626         .set_settings           = tg3_set_settings,
13627         .get_drvinfo            = tg3_get_drvinfo,
13628         .get_regs_len           = tg3_get_regs_len,
13629         .get_regs               = tg3_get_regs,
13630         .get_wol                = tg3_get_wol,
13631         .set_wol                = tg3_set_wol,
13632         .get_msglevel           = tg3_get_msglevel,
13633         .set_msglevel           = tg3_set_msglevel,
13634         .nway_reset             = tg3_nway_reset,
13635         .get_link               = ethtool_op_get_link,
13636         .get_eeprom_len         = tg3_get_eeprom_len,
13637         .get_eeprom             = tg3_get_eeprom,
13638         .set_eeprom             = tg3_set_eeprom,
13639         .get_ringparam          = tg3_get_ringparam,
13640         .set_ringparam          = tg3_set_ringparam,
13641         .get_pauseparam         = tg3_get_pauseparam,
13642         .set_pauseparam         = tg3_set_pauseparam,
13643         .self_test              = tg3_self_test,
13644         .get_strings            = tg3_get_strings,
13645         .set_phys_id            = tg3_set_phys_id,
13646         .get_ethtool_stats      = tg3_get_ethtool_stats,
13647         .get_coalesce           = tg3_get_coalesce,
13648         .set_coalesce           = tg3_set_coalesce,
13649         .get_sset_count         = tg3_get_sset_count,
13650         .get_rxnfc              = tg3_get_rxnfc,
13651         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13652         .get_rxfh_indir         = tg3_get_rxfh_indir,
13653         .set_rxfh_indir         = tg3_set_rxfh_indir,
13654         .get_channels           = tg3_get_channels,
13655         .set_channels           = tg3_set_channels,
13656         .get_ts_info            = tg3_get_ts_info,
13657 };
13658
13659 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13660                                                 struct rtnl_link_stats64 *stats)
13661 {
13662         struct tg3 *tp = netdev_priv(dev);
13663
13664         spin_lock_bh(&tp->lock);
13665         if (!tp->hw_stats) {
13666                 spin_unlock_bh(&tp->lock);
13667                 return &tp->net_stats_prev;
13668         }
13669
13670         tg3_get_nstats(tp, stats);
13671         spin_unlock_bh(&tp->lock);
13672
13673         return stats;
13674 }
13675
13676 static void tg3_set_rx_mode(struct net_device *dev)
13677 {
13678         struct tg3 *tp = netdev_priv(dev);
13679
13680         if (!netif_running(dev))
13681                 return;
13682
13683         tg3_full_lock(tp, 0);
13684         __tg3_set_rx_mode(dev);
13685         tg3_full_unlock(tp);
13686 }
13687
13688 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13689                                int new_mtu)
13690 {
13691         dev->mtu = new_mtu;
13692
13693         if (new_mtu > ETH_DATA_LEN) {
13694                 if (tg3_flag(tp, 5780_CLASS)) {
13695                         netdev_update_features(dev);
13696                         tg3_flag_clear(tp, TSO_CAPABLE);
13697                 } else {
13698                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13699                 }
13700         } else {
13701                 if (tg3_flag(tp, 5780_CLASS)) {
13702                         tg3_flag_set(tp, TSO_CAPABLE);
13703                         netdev_update_features(dev);
13704                 }
13705                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13706         }
13707 }
13708
13709 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13710 {
13711         struct tg3 *tp = netdev_priv(dev);
13712         int err;
13713         bool reset_phy = false;
13714
13715         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13716                 return -EINVAL;
13717
13718         if (!netif_running(dev)) {
13719                 /* We'll just catch it later when the
13720                  * device is up'd.
13721                  */
13722                 tg3_set_mtu(dev, tp, new_mtu);
13723                 return 0;
13724         }
13725
13726         tg3_phy_stop(tp);
13727
13728         tg3_netif_stop(tp);
13729
13730         tg3_full_lock(tp, 1);
13731
13732         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13733
13734         tg3_set_mtu(dev, tp, new_mtu);
13735
13736         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13737          * breaks all requests to 256 bytes.
13738          */
13739         if (tg3_asic_rev(tp) == ASIC_REV_57766)
13740                 reset_phy = true;
13741
13742         err = tg3_restart_hw(tp, reset_phy);
13743
13744         if (!err)
13745                 tg3_netif_start(tp);
13746
13747         tg3_full_unlock(tp);
13748
13749         if (!err)
13750                 tg3_phy_start(tp);
13751
13752         return err;
13753 }
13754
13755 static const struct net_device_ops tg3_netdev_ops = {
13756         .ndo_open               = tg3_open,
13757         .ndo_stop               = tg3_close,
13758         .ndo_start_xmit         = tg3_start_xmit,
13759         .ndo_get_stats64        = tg3_get_stats64,
13760         .ndo_validate_addr      = eth_validate_addr,
13761         .ndo_set_rx_mode        = tg3_set_rx_mode,
13762         .ndo_set_mac_address    = tg3_set_mac_addr,
13763         .ndo_do_ioctl           = tg3_ioctl,
13764         .ndo_tx_timeout         = tg3_tx_timeout,
13765         .ndo_change_mtu         = tg3_change_mtu,
13766         .ndo_fix_features       = tg3_fix_features,
13767         .ndo_set_features       = tg3_set_features,
13768 #ifdef CONFIG_NET_POLL_CONTROLLER
13769         .ndo_poll_controller    = tg3_poll_controller,
13770 #endif
13771 };
13772
13773 static void tg3_get_eeprom_size(struct tg3 *tp)
13774 {
13775         u32 cursize, val, magic;
13776
13777         tp->nvram_size = EEPROM_CHIP_SIZE;
13778
13779         if (tg3_nvram_read(tp, 0, &magic) != 0)
13780                 return;
13781
13782         if ((magic != TG3_EEPROM_MAGIC) &&
13783             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13784             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13785                 return;
13786
13787         /*
13788          * Size the chip by reading offsets at increasing powers of two.
13789          * When we encounter our validation signature, we know the addressing
13790          * has wrapped around, and thus have our chip size.
13791          */
13792         cursize = 0x10;
13793
13794         while (cursize < tp->nvram_size) {
13795                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13796                         return;
13797
13798                 if (val == magic)
13799                         break;
13800
13801                 cursize <<= 1;
13802         }
13803
13804         tp->nvram_size = cursize;
13805 }
13806
13807 static void tg3_get_nvram_size(struct tg3 *tp)
13808 {
13809         u32 val;
13810
13811         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13812                 return;
13813
13814         /* Selfboot format */
13815         if (val != TG3_EEPROM_MAGIC) {
13816                 tg3_get_eeprom_size(tp);
13817                 return;
13818         }
13819
13820         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13821                 if (val != 0) {
13822                         /* This is confusing.  We want to operate on the
13823                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13824                          * call will read from NVRAM and byteswap the data
13825                          * according to the byteswapping settings for all
13826                          * other register accesses.  This ensures the data we
13827                          * want will always reside in the lower 16-bits.
13828                          * However, the data in NVRAM is in LE format, which
13829                          * means the data from the NVRAM read will always be
13830                          * opposite the endianness of the CPU.  The 16-bit
13831                          * byteswap then brings the data to CPU endianness.
13832                          */
13833                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13834                         return;
13835                 }
13836         }
13837         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13838 }
13839
13840 static void tg3_get_nvram_info(struct tg3 *tp)
13841 {
13842         u32 nvcfg1;
13843
13844         nvcfg1 = tr32(NVRAM_CFG1);
13845         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13846                 tg3_flag_set(tp, FLASH);
13847         } else {
13848                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13849                 tw32(NVRAM_CFG1, nvcfg1);
13850         }
13851
13852         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13853             tg3_flag(tp, 5780_CLASS)) {
13854                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13855                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13856                         tp->nvram_jedecnum = JEDEC_ATMEL;
13857                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13858                         tg3_flag_set(tp, NVRAM_BUFFERED);
13859                         break;
13860                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13861                         tp->nvram_jedecnum = JEDEC_ATMEL;
13862                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13863                         break;
13864                 case FLASH_VENDOR_ATMEL_EEPROM:
13865                         tp->nvram_jedecnum = JEDEC_ATMEL;
13866                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13867                         tg3_flag_set(tp, NVRAM_BUFFERED);
13868                         break;
13869                 case FLASH_VENDOR_ST:
13870                         tp->nvram_jedecnum = JEDEC_ST;
13871                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13872                         tg3_flag_set(tp, NVRAM_BUFFERED);
13873                         break;
13874                 case FLASH_VENDOR_SAIFUN:
13875                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13876                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13877                         break;
13878                 case FLASH_VENDOR_SST_SMALL:
13879                 case FLASH_VENDOR_SST_LARGE:
13880                         tp->nvram_jedecnum = JEDEC_SST;
13881                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13882                         break;
13883                 }
13884         } else {
13885                 tp->nvram_jedecnum = JEDEC_ATMEL;
13886                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13887                 tg3_flag_set(tp, NVRAM_BUFFERED);
13888         }
13889 }
13890
13891 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13892 {
13893         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13894         case FLASH_5752PAGE_SIZE_256:
13895                 tp->nvram_pagesize = 256;
13896                 break;
13897         case FLASH_5752PAGE_SIZE_512:
13898                 tp->nvram_pagesize = 512;
13899                 break;
13900         case FLASH_5752PAGE_SIZE_1K:
13901                 tp->nvram_pagesize = 1024;
13902                 break;
13903         case FLASH_5752PAGE_SIZE_2K:
13904                 tp->nvram_pagesize = 2048;
13905                 break;
13906         case FLASH_5752PAGE_SIZE_4K:
13907                 tp->nvram_pagesize = 4096;
13908                 break;
13909         case FLASH_5752PAGE_SIZE_264:
13910                 tp->nvram_pagesize = 264;
13911                 break;
13912         case FLASH_5752PAGE_SIZE_528:
13913                 tp->nvram_pagesize = 528;
13914                 break;
13915         }
13916 }
13917
13918 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13919 {
13920         u32 nvcfg1;
13921
13922         nvcfg1 = tr32(NVRAM_CFG1);
13923
13924         /* NVRAM protection for TPM */
13925         if (nvcfg1 & (1 << 27))
13926                 tg3_flag_set(tp, PROTECTED_NVRAM);
13927
13928         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13929         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13930         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13931                 tp->nvram_jedecnum = JEDEC_ATMEL;
13932                 tg3_flag_set(tp, NVRAM_BUFFERED);
13933                 break;
13934         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13935                 tp->nvram_jedecnum = JEDEC_ATMEL;
13936                 tg3_flag_set(tp, NVRAM_BUFFERED);
13937                 tg3_flag_set(tp, FLASH);
13938                 break;
13939         case FLASH_5752VENDOR_ST_M45PE10:
13940         case FLASH_5752VENDOR_ST_M45PE20:
13941         case FLASH_5752VENDOR_ST_M45PE40:
13942                 tp->nvram_jedecnum = JEDEC_ST;
13943                 tg3_flag_set(tp, NVRAM_BUFFERED);
13944                 tg3_flag_set(tp, FLASH);
13945                 break;
13946         }
13947
13948         if (tg3_flag(tp, FLASH)) {
13949                 tg3_nvram_get_pagesize(tp, nvcfg1);
13950         } else {
13951                 /* For eeprom, set pagesize to maximum eeprom size */
13952                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13953
13954                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13955                 tw32(NVRAM_CFG1, nvcfg1);
13956         }
13957 }
13958
13959 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13960 {
13961         u32 nvcfg1, protect = 0;
13962
13963         nvcfg1 = tr32(NVRAM_CFG1);
13964
13965         /* NVRAM protection for TPM */
13966         if (nvcfg1 & (1 << 27)) {
13967                 tg3_flag_set(tp, PROTECTED_NVRAM);
13968                 protect = 1;
13969         }
13970
13971         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13972         switch (nvcfg1) {
13973         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13974         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13975         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13976         case FLASH_5755VENDOR_ATMEL_FLASH_5:
13977                 tp->nvram_jedecnum = JEDEC_ATMEL;
13978                 tg3_flag_set(tp, NVRAM_BUFFERED);
13979                 tg3_flag_set(tp, FLASH);
13980                 tp->nvram_pagesize = 264;
13981                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13982                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13983                         tp->nvram_size = (protect ? 0x3e200 :
13984                                           TG3_NVRAM_SIZE_512KB);
13985                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13986                         tp->nvram_size = (protect ? 0x1f200 :
13987                                           TG3_NVRAM_SIZE_256KB);
13988                 else
13989                         tp->nvram_size = (protect ? 0x1f200 :
13990                                           TG3_NVRAM_SIZE_128KB);
13991                 break;
13992         case FLASH_5752VENDOR_ST_M45PE10:
13993         case FLASH_5752VENDOR_ST_M45PE20:
13994         case FLASH_5752VENDOR_ST_M45PE40:
13995                 tp->nvram_jedecnum = JEDEC_ST;
13996                 tg3_flag_set(tp, NVRAM_BUFFERED);
13997                 tg3_flag_set(tp, FLASH);
13998                 tp->nvram_pagesize = 256;
13999                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14000                         tp->nvram_size = (protect ?
14001                                           TG3_NVRAM_SIZE_64KB :
14002                                           TG3_NVRAM_SIZE_128KB);
14003                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14004                         tp->nvram_size = (protect ?
14005                                           TG3_NVRAM_SIZE_64KB :
14006                                           TG3_NVRAM_SIZE_256KB);
14007                 else
14008                         tp->nvram_size = (protect ?
14009                                           TG3_NVRAM_SIZE_128KB :
14010                                           TG3_NVRAM_SIZE_512KB);
14011                 break;
14012         }
14013 }
14014
14015 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14016 {
14017         u32 nvcfg1;
14018
14019         nvcfg1 = tr32(NVRAM_CFG1);
14020
14021         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14022         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14023         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14024         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14025         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14026                 tp->nvram_jedecnum = JEDEC_ATMEL;
14027                 tg3_flag_set(tp, NVRAM_BUFFERED);
14028                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14029
14030                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14031                 tw32(NVRAM_CFG1, nvcfg1);
14032                 break;
14033         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14034         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14035         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14036         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14037                 tp->nvram_jedecnum = JEDEC_ATMEL;
14038                 tg3_flag_set(tp, NVRAM_BUFFERED);
14039                 tg3_flag_set(tp, FLASH);
14040                 tp->nvram_pagesize = 264;
14041                 break;
14042         case FLASH_5752VENDOR_ST_M45PE10:
14043         case FLASH_5752VENDOR_ST_M45PE20:
14044         case FLASH_5752VENDOR_ST_M45PE40:
14045                 tp->nvram_jedecnum = JEDEC_ST;
14046                 tg3_flag_set(tp, NVRAM_BUFFERED);
14047                 tg3_flag_set(tp, FLASH);
14048                 tp->nvram_pagesize = 256;
14049                 break;
14050         }
14051 }
14052
14053 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14054 {
14055         u32 nvcfg1, protect = 0;
14056
14057         nvcfg1 = tr32(NVRAM_CFG1);
14058
14059         /* NVRAM protection for TPM */
14060         if (nvcfg1 & (1 << 27)) {
14061                 tg3_flag_set(tp, PROTECTED_NVRAM);
14062                 protect = 1;
14063         }
14064
14065         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14066         switch (nvcfg1) {
14067         case FLASH_5761VENDOR_ATMEL_ADB021D:
14068         case FLASH_5761VENDOR_ATMEL_ADB041D:
14069         case FLASH_5761VENDOR_ATMEL_ADB081D:
14070         case FLASH_5761VENDOR_ATMEL_ADB161D:
14071         case FLASH_5761VENDOR_ATMEL_MDB021D:
14072         case FLASH_5761VENDOR_ATMEL_MDB041D:
14073         case FLASH_5761VENDOR_ATMEL_MDB081D:
14074         case FLASH_5761VENDOR_ATMEL_MDB161D:
14075                 tp->nvram_jedecnum = JEDEC_ATMEL;
14076                 tg3_flag_set(tp, NVRAM_BUFFERED);
14077                 tg3_flag_set(tp, FLASH);
14078                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14079                 tp->nvram_pagesize = 256;
14080                 break;
14081         case FLASH_5761VENDOR_ST_A_M45PE20:
14082         case FLASH_5761VENDOR_ST_A_M45PE40:
14083         case FLASH_5761VENDOR_ST_A_M45PE80:
14084         case FLASH_5761VENDOR_ST_A_M45PE16:
14085         case FLASH_5761VENDOR_ST_M_M45PE20:
14086         case FLASH_5761VENDOR_ST_M_M45PE40:
14087         case FLASH_5761VENDOR_ST_M_M45PE80:
14088         case FLASH_5761VENDOR_ST_M_M45PE16:
14089                 tp->nvram_jedecnum = JEDEC_ST;
14090                 tg3_flag_set(tp, NVRAM_BUFFERED);
14091                 tg3_flag_set(tp, FLASH);
14092                 tp->nvram_pagesize = 256;
14093                 break;
14094         }
14095
14096         if (protect) {
14097                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14098         } else {
14099                 switch (nvcfg1) {
14100                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14101                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14102                 case FLASH_5761VENDOR_ST_A_M45PE16:
14103                 case FLASH_5761VENDOR_ST_M_M45PE16:
14104                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14105                         break;
14106                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14107                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14108                 case FLASH_5761VENDOR_ST_A_M45PE80:
14109                 case FLASH_5761VENDOR_ST_M_M45PE80:
14110                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14111                         break;
14112                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14113                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14114                 case FLASH_5761VENDOR_ST_A_M45PE40:
14115                 case FLASH_5761VENDOR_ST_M_M45PE40:
14116                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14117                         break;
14118                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14119                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14120                 case FLASH_5761VENDOR_ST_A_M45PE20:
14121                 case FLASH_5761VENDOR_ST_M_M45PE20:
14122                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14123                         break;
14124                 }
14125         }
14126 }
14127
14128 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14129 {
14130         tp->nvram_jedecnum = JEDEC_ATMEL;
14131         tg3_flag_set(tp, NVRAM_BUFFERED);
14132         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14133 }
14134
14135 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14136 {
14137         u32 nvcfg1;
14138
14139         nvcfg1 = tr32(NVRAM_CFG1);
14140
14141         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14142         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14143         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14144                 tp->nvram_jedecnum = JEDEC_ATMEL;
14145                 tg3_flag_set(tp, NVRAM_BUFFERED);
14146                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14147
14148                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14149                 tw32(NVRAM_CFG1, nvcfg1);
14150                 return;
14151         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14152         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14153         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14154         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14155         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14156         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14157         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14158                 tp->nvram_jedecnum = JEDEC_ATMEL;
14159                 tg3_flag_set(tp, NVRAM_BUFFERED);
14160                 tg3_flag_set(tp, FLASH);
14161
14162                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14163                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14164                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14165                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14166                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14167                         break;
14168                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14169                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14170                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14171                         break;
14172                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14173                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14174                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14175                         break;
14176                 }
14177                 break;
14178         case FLASH_5752VENDOR_ST_M45PE10:
14179         case FLASH_5752VENDOR_ST_M45PE20:
14180         case FLASH_5752VENDOR_ST_M45PE40:
14181                 tp->nvram_jedecnum = JEDEC_ST;
14182                 tg3_flag_set(tp, NVRAM_BUFFERED);
14183                 tg3_flag_set(tp, FLASH);
14184
14185                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14186                 case FLASH_5752VENDOR_ST_M45PE10:
14187                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14188                         break;
14189                 case FLASH_5752VENDOR_ST_M45PE20:
14190                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14191                         break;
14192                 case FLASH_5752VENDOR_ST_M45PE40:
14193                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14194                         break;
14195                 }
14196                 break;
14197         default:
14198                 tg3_flag_set(tp, NO_NVRAM);
14199                 return;
14200         }
14201
14202         tg3_nvram_get_pagesize(tp, nvcfg1);
14203         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14204                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14205 }
14206
14207
14208 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14209 {
14210         u32 nvcfg1;
14211
14212         nvcfg1 = tr32(NVRAM_CFG1);
14213
14214         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14215         case FLASH_5717VENDOR_ATMEL_EEPROM:
14216         case FLASH_5717VENDOR_MICRO_EEPROM:
14217                 tp->nvram_jedecnum = JEDEC_ATMEL;
14218                 tg3_flag_set(tp, NVRAM_BUFFERED);
14219                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14220
14221                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14222                 tw32(NVRAM_CFG1, nvcfg1);
14223                 return;
14224         case FLASH_5717VENDOR_ATMEL_MDB011D:
14225         case FLASH_5717VENDOR_ATMEL_ADB011B:
14226         case FLASH_5717VENDOR_ATMEL_ADB011D:
14227         case FLASH_5717VENDOR_ATMEL_MDB021D:
14228         case FLASH_5717VENDOR_ATMEL_ADB021B:
14229         case FLASH_5717VENDOR_ATMEL_ADB021D:
14230         case FLASH_5717VENDOR_ATMEL_45USPT:
14231                 tp->nvram_jedecnum = JEDEC_ATMEL;
14232                 tg3_flag_set(tp, NVRAM_BUFFERED);
14233                 tg3_flag_set(tp, FLASH);
14234
14235                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14236                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14237                         /* Detect size with tg3_nvram_get_size() */
14238                         break;
14239                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14240                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14241                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14242                         break;
14243                 default:
14244                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14245                         break;
14246                 }
14247                 break;
14248         case FLASH_5717VENDOR_ST_M_M25PE10:
14249         case FLASH_5717VENDOR_ST_A_M25PE10:
14250         case FLASH_5717VENDOR_ST_M_M45PE10:
14251         case FLASH_5717VENDOR_ST_A_M45PE10:
14252         case FLASH_5717VENDOR_ST_M_M25PE20:
14253         case FLASH_5717VENDOR_ST_A_M25PE20:
14254         case FLASH_5717VENDOR_ST_M_M45PE20:
14255         case FLASH_5717VENDOR_ST_A_M45PE20:
14256         case FLASH_5717VENDOR_ST_25USPT:
14257         case FLASH_5717VENDOR_ST_45USPT:
14258                 tp->nvram_jedecnum = JEDEC_ST;
14259                 tg3_flag_set(tp, NVRAM_BUFFERED);
14260                 tg3_flag_set(tp, FLASH);
14261
14262                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14263                 case FLASH_5717VENDOR_ST_M_M25PE20:
14264                 case FLASH_5717VENDOR_ST_M_M45PE20:
14265                         /* Detect size with tg3_nvram_get_size() */
14266                         break;
14267                 case FLASH_5717VENDOR_ST_A_M25PE20:
14268                 case FLASH_5717VENDOR_ST_A_M45PE20:
14269                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14270                         break;
14271                 default:
14272                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14273                         break;
14274                 }
14275                 break;
14276         default:
14277                 tg3_flag_set(tp, NO_NVRAM);
14278                 return;
14279         }
14280
14281         tg3_nvram_get_pagesize(tp, nvcfg1);
14282         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14283                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14284 }
14285
14286 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14287 {
14288         u32 nvcfg1, nvmpinstrp;
14289
14290         nvcfg1 = tr32(NVRAM_CFG1);
14291         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14292
14293         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14294                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14295                         tg3_flag_set(tp, NO_NVRAM);
14296                         return;
14297                 }
14298
14299                 switch (nvmpinstrp) {
14300                 case FLASH_5762_EEPROM_HD:
14301                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14302                         break;
14303                 case FLASH_5762_EEPROM_LD:
14304                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14305                         break;
14306                 case FLASH_5720VENDOR_M_ST_M45PE20:
14307                         /* This pinstrap supports multiple sizes, so force it
14308                          * to read the actual size from location 0xf0.
14309                          */
14310                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14311                         break;
14312                 }
14313         }
14314
14315         switch (nvmpinstrp) {
14316         case FLASH_5720_EEPROM_HD:
14317         case FLASH_5720_EEPROM_LD:
14318                 tp->nvram_jedecnum = JEDEC_ATMEL;
14319                 tg3_flag_set(tp, NVRAM_BUFFERED);
14320
14321                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14322                 tw32(NVRAM_CFG1, nvcfg1);
14323                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14324                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14325                 else
14326                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14327                 return;
14328         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14329         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14330         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14331         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14332         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14333         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14334         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14335         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14336         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14337         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14338         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14339         case FLASH_5720VENDOR_ATMEL_45USPT:
14340                 tp->nvram_jedecnum = JEDEC_ATMEL;
14341                 tg3_flag_set(tp, NVRAM_BUFFERED);
14342                 tg3_flag_set(tp, FLASH);
14343
14344                 switch (nvmpinstrp) {
14345                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14346                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14347                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14348                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14349                         break;
14350                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14351                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14352                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14353                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14354                         break;
14355                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14356                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14357                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14358                         break;
14359                 default:
14360                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14361                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14362                         break;
14363                 }
14364                 break;
14365         case FLASH_5720VENDOR_M_ST_M25PE10:
14366         case FLASH_5720VENDOR_M_ST_M45PE10:
14367         case FLASH_5720VENDOR_A_ST_M25PE10:
14368         case FLASH_5720VENDOR_A_ST_M45PE10:
14369         case FLASH_5720VENDOR_M_ST_M25PE20:
14370         case FLASH_5720VENDOR_M_ST_M45PE20:
14371         case FLASH_5720VENDOR_A_ST_M25PE20:
14372         case FLASH_5720VENDOR_A_ST_M45PE20:
14373         case FLASH_5720VENDOR_M_ST_M25PE40:
14374         case FLASH_5720VENDOR_M_ST_M45PE40:
14375         case FLASH_5720VENDOR_A_ST_M25PE40:
14376         case FLASH_5720VENDOR_A_ST_M45PE40:
14377         case FLASH_5720VENDOR_M_ST_M25PE80:
14378         case FLASH_5720VENDOR_M_ST_M45PE80:
14379         case FLASH_5720VENDOR_A_ST_M25PE80:
14380         case FLASH_5720VENDOR_A_ST_M45PE80:
14381         case FLASH_5720VENDOR_ST_25USPT:
14382         case FLASH_5720VENDOR_ST_45USPT:
14383                 tp->nvram_jedecnum = JEDEC_ST;
14384                 tg3_flag_set(tp, NVRAM_BUFFERED);
14385                 tg3_flag_set(tp, FLASH);
14386
14387                 switch (nvmpinstrp) {
14388                 case FLASH_5720VENDOR_M_ST_M25PE20:
14389                 case FLASH_5720VENDOR_M_ST_M45PE20:
14390                 case FLASH_5720VENDOR_A_ST_M25PE20:
14391                 case FLASH_5720VENDOR_A_ST_M45PE20:
14392                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14393                         break;
14394                 case FLASH_5720VENDOR_M_ST_M25PE40:
14395                 case FLASH_5720VENDOR_M_ST_M45PE40:
14396                 case FLASH_5720VENDOR_A_ST_M25PE40:
14397                 case FLASH_5720VENDOR_A_ST_M45PE40:
14398                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14399                         break;
14400                 case FLASH_5720VENDOR_M_ST_M25PE80:
14401                 case FLASH_5720VENDOR_M_ST_M45PE80:
14402                 case FLASH_5720VENDOR_A_ST_M25PE80:
14403                 case FLASH_5720VENDOR_A_ST_M45PE80:
14404                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14405                         break;
14406                 default:
14407                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14408                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14409                         break;
14410                 }
14411                 break;
14412         default:
14413                 tg3_flag_set(tp, NO_NVRAM);
14414                 return;
14415         }
14416
14417         tg3_nvram_get_pagesize(tp, nvcfg1);
14418         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14419                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14420
14421         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14422                 u32 val;
14423
14424                 if (tg3_nvram_read(tp, 0, &val))
14425                         return;
14426
14427                 if (val != TG3_EEPROM_MAGIC &&
14428                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14429                         tg3_flag_set(tp, NO_NVRAM);
14430         }
14431 }
14432
14433 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14434 static void tg3_nvram_init(struct tg3 *tp)
14435 {
14436         if (tg3_flag(tp, IS_SSB_CORE)) {
14437                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14438                 tg3_flag_clear(tp, NVRAM);
14439                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14440                 tg3_flag_set(tp, NO_NVRAM);
14441                 return;
14442         }
14443
14444         tw32_f(GRC_EEPROM_ADDR,
14445              (EEPROM_ADDR_FSM_RESET |
14446               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14447                EEPROM_ADDR_CLKPERD_SHIFT)));
14448
14449         msleep(1);
14450
14451         /* Enable seeprom accesses. */
14452         tw32_f(GRC_LOCAL_CTRL,
14453              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14454         udelay(100);
14455
14456         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14457             tg3_asic_rev(tp) != ASIC_REV_5701) {
14458                 tg3_flag_set(tp, NVRAM);
14459
14460                 if (tg3_nvram_lock(tp)) {
14461                         netdev_warn(tp->dev,
14462                                     "Cannot get nvram lock, %s failed\n",
14463                                     __func__);
14464                         return;
14465                 }
14466                 tg3_enable_nvram_access(tp);
14467
14468                 tp->nvram_size = 0;
14469
14470                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14471                         tg3_get_5752_nvram_info(tp);
14472                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14473                         tg3_get_5755_nvram_info(tp);
14474                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14475                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14476                          tg3_asic_rev(tp) == ASIC_REV_5785)
14477                         tg3_get_5787_nvram_info(tp);
14478                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14479                         tg3_get_5761_nvram_info(tp);
14480                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14481                         tg3_get_5906_nvram_info(tp);
14482                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14483                          tg3_flag(tp, 57765_CLASS))
14484                         tg3_get_57780_nvram_info(tp);
14485                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14486                          tg3_asic_rev(tp) == ASIC_REV_5719)
14487                         tg3_get_5717_nvram_info(tp);
14488                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14489                          tg3_asic_rev(tp) == ASIC_REV_5762)
14490                         tg3_get_5720_nvram_info(tp);
14491                 else
14492                         tg3_get_nvram_info(tp);
14493
14494                 if (tp->nvram_size == 0)
14495                         tg3_get_nvram_size(tp);
14496
14497                 tg3_disable_nvram_access(tp);
14498                 tg3_nvram_unlock(tp);
14499
14500         } else {
14501                 tg3_flag_clear(tp, NVRAM);
14502                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14503
14504                 tg3_get_eeprom_size(tp);
14505         }
14506 }
14507
14508 struct subsys_tbl_ent {
14509         u16 subsys_vendor, subsys_devid;
14510         u32 phy_id;
14511 };
14512
14513 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14514         /* Broadcom boards. */
14515         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14516           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14517         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14518           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14519         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14520           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14521         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14522           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14523         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14524           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14525         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14526           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14527         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14528           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14529         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14530           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14531         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14532           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14533         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14534           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14535         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14536           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14537
14538         /* 3com boards. */
14539         { TG3PCI_SUBVENDOR_ID_3COM,
14540           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14541         { TG3PCI_SUBVENDOR_ID_3COM,
14542           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14543         { TG3PCI_SUBVENDOR_ID_3COM,
14544           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14545         { TG3PCI_SUBVENDOR_ID_3COM,
14546           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14547         { TG3PCI_SUBVENDOR_ID_3COM,
14548           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14549
14550         /* DELL boards. */
14551         { TG3PCI_SUBVENDOR_ID_DELL,
14552           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14553         { TG3PCI_SUBVENDOR_ID_DELL,
14554           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14555         { TG3PCI_SUBVENDOR_ID_DELL,
14556           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14557         { TG3PCI_SUBVENDOR_ID_DELL,
14558           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14559
14560         /* Compaq boards. */
14561         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14562           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14563         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14564           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14565         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14566           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14567         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14568           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14569         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14570           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14571
14572         /* IBM boards. */
14573         { TG3PCI_SUBVENDOR_ID_IBM,
14574           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14575 };
14576
14577 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14578 {
14579         int i;
14580
14581         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14582                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14583                      tp->pdev->subsystem_vendor) &&
14584                     (subsys_id_to_phy_id[i].subsys_devid ==
14585                      tp->pdev->subsystem_device))
14586                         return &subsys_id_to_phy_id[i];
14587         }
14588         return NULL;
14589 }
14590
14591 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14592 {
14593         u32 val;
14594
14595         tp->phy_id = TG3_PHY_ID_INVALID;
14596         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14597
14598         /* Assume an onboard device and WOL capable by default.  */
14599         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14600         tg3_flag_set(tp, WOL_CAP);
14601
14602         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14603                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14604                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14605                         tg3_flag_set(tp, IS_NIC);
14606                 }
14607                 val = tr32(VCPU_CFGSHDW);
14608                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14609                         tg3_flag_set(tp, ASPM_WORKAROUND);
14610                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14611                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14612                         tg3_flag_set(tp, WOL_ENABLE);
14613                         device_set_wakeup_enable(&tp->pdev->dev, true);
14614                 }
14615                 goto done;
14616         }
14617
14618         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14619         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14620                 u32 nic_cfg, led_cfg;
14621                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14622                 int eeprom_phy_serdes = 0;
14623
14624                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14625                 tp->nic_sram_data_cfg = nic_cfg;
14626
14627                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14628                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14629                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14630                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
14631                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
14632                     (ver > 0) && (ver < 0x100))
14633                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14634
14635                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14636                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14637
14638                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14639                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14640                         eeprom_phy_serdes = 1;
14641
14642                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14643                 if (nic_phy_id != 0) {
14644                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14645                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14646
14647                         eeprom_phy_id  = (id1 >> 16) << 10;
14648                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14649                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14650                 } else
14651                         eeprom_phy_id = 0;
14652
14653                 tp->phy_id = eeprom_phy_id;
14654                 if (eeprom_phy_serdes) {
14655                         if (!tg3_flag(tp, 5705_PLUS))
14656                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14657                         else
14658                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14659                 }
14660
14661                 if (tg3_flag(tp, 5750_PLUS))
14662                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14663                                     SHASTA_EXT_LED_MODE_MASK);
14664                 else
14665                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14666
14667                 switch (led_cfg) {
14668                 default:
14669                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14670                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14671                         break;
14672
14673                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14674                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14675                         break;
14676
14677                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14678                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14679
14680                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14681                          * read on some older 5700/5701 bootcode.
14682                          */
14683                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14684                             tg3_asic_rev(tp) == ASIC_REV_5701)
14685                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14686
14687                         break;
14688
14689                 case SHASTA_EXT_LED_SHARED:
14690                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14691                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14692                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14693                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14694                                                  LED_CTRL_MODE_PHY_2);
14695                         break;
14696
14697                 case SHASTA_EXT_LED_MAC:
14698                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14699                         break;
14700
14701                 case SHASTA_EXT_LED_COMBO:
14702                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14703                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14704                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14705                                                  LED_CTRL_MODE_PHY_2);
14706                         break;
14707
14708                 }
14709
14710                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14711                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
14712                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14713                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14714
14715                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14716                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14717
14718                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14719                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14720                         if ((tp->pdev->subsystem_vendor ==
14721                              PCI_VENDOR_ID_ARIMA) &&
14722                             (tp->pdev->subsystem_device == 0x205a ||
14723                              tp->pdev->subsystem_device == 0x2063))
14724                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14725                 } else {
14726                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14727                         tg3_flag_set(tp, IS_NIC);
14728                 }
14729
14730                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14731                         tg3_flag_set(tp, ENABLE_ASF);
14732                         if (tg3_flag(tp, 5750_PLUS))
14733                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14734                 }
14735
14736                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14737                     tg3_flag(tp, 5750_PLUS))
14738                         tg3_flag_set(tp, ENABLE_APE);
14739
14740                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14741                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14742                         tg3_flag_clear(tp, WOL_CAP);
14743
14744                 if (tg3_flag(tp, WOL_CAP) &&
14745                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14746                         tg3_flag_set(tp, WOL_ENABLE);
14747                         device_set_wakeup_enable(&tp->pdev->dev, true);
14748                 }
14749
14750                 if (cfg2 & (1 << 17))
14751                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14752
14753                 /* serdes signal pre-emphasis in register 0x590 set by */
14754                 /* bootcode if bit 18 is set */
14755                 if (cfg2 & (1 << 18))
14756                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14757
14758                 if ((tg3_flag(tp, 57765_PLUS) ||
14759                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14760                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14761                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14762                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14763
14764                 if (tg3_flag(tp, PCI_EXPRESS)) {
14765                         u32 cfg3;
14766
14767                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14768                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14769                             !tg3_flag(tp, 57765_PLUS) &&
14770                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14771                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14772                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14773                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14774                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14775                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14776                 }
14777
14778                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14779                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14780                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14781                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14782                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14783                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14784         }
14785 done:
14786         if (tg3_flag(tp, WOL_CAP))
14787                 device_set_wakeup_enable(&tp->pdev->dev,
14788                                          tg3_flag(tp, WOL_ENABLE));
14789         else
14790                 device_set_wakeup_capable(&tp->pdev->dev, false);
14791 }
14792
14793 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14794 {
14795         int i, err;
14796         u32 val2, off = offset * 8;
14797
14798         err = tg3_nvram_lock(tp);
14799         if (err)
14800                 return err;
14801
14802         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14803         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14804                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14805         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14806         udelay(10);
14807
14808         for (i = 0; i < 100; i++) {
14809                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14810                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14811                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14812                         break;
14813                 }
14814                 udelay(10);
14815         }
14816
14817         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14818
14819         tg3_nvram_unlock(tp);
14820         if (val2 & APE_OTP_STATUS_CMD_DONE)
14821                 return 0;
14822
14823         return -EBUSY;
14824 }
14825
14826 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14827 {
14828         int i;
14829         u32 val;
14830
14831         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14832         tw32(OTP_CTRL, cmd);
14833
14834         /* Wait for up to 1 ms for command to execute. */
14835         for (i = 0; i < 100; i++) {
14836                 val = tr32(OTP_STATUS);
14837                 if (val & OTP_STATUS_CMD_DONE)
14838                         break;
14839                 udelay(10);
14840         }
14841
14842         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14843 }
14844
14845 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14846  * configuration is a 32-bit value that straddles the alignment boundary.
14847  * We do two 32-bit reads and then shift and merge the results.
14848  */
14849 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14850 {
14851         u32 bhalf_otp, thalf_otp;
14852
14853         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14854
14855         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14856                 return 0;
14857
14858         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14859
14860         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14861                 return 0;
14862
14863         thalf_otp = tr32(OTP_READ_DATA);
14864
14865         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14866
14867         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14868                 return 0;
14869
14870         bhalf_otp = tr32(OTP_READ_DATA);
14871
14872         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14873 }
14874
14875 static void tg3_phy_init_link_config(struct tg3 *tp)
14876 {
14877         u32 adv = ADVERTISED_Autoneg;
14878
14879         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14880                 adv |= ADVERTISED_1000baseT_Half |
14881                        ADVERTISED_1000baseT_Full;
14882
14883         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14884                 adv |= ADVERTISED_100baseT_Half |
14885                        ADVERTISED_100baseT_Full |
14886                        ADVERTISED_10baseT_Half |
14887                        ADVERTISED_10baseT_Full |
14888                        ADVERTISED_TP;
14889         else
14890                 adv |= ADVERTISED_FIBRE;
14891
14892         tp->link_config.advertising = adv;
14893         tp->link_config.speed = SPEED_UNKNOWN;
14894         tp->link_config.duplex = DUPLEX_UNKNOWN;
14895         tp->link_config.autoneg = AUTONEG_ENABLE;
14896         tp->link_config.active_speed = SPEED_UNKNOWN;
14897         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14898
14899         tp->old_link = -1;
14900 }
14901
14902 static int tg3_phy_probe(struct tg3 *tp)
14903 {
14904         u32 hw_phy_id_1, hw_phy_id_2;
14905         u32 hw_phy_id, hw_phy_id_masked;
14906         int err;
14907
14908         /* flow control autonegotiation is default behavior */
14909         tg3_flag_set(tp, PAUSE_AUTONEG);
14910         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14911
14912         if (tg3_flag(tp, ENABLE_APE)) {
14913                 switch (tp->pci_fn) {
14914                 case 0:
14915                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14916                         break;
14917                 case 1:
14918                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14919                         break;
14920                 case 2:
14921                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14922                         break;
14923                 case 3:
14924                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14925                         break;
14926                 }
14927         }
14928
14929         if (!tg3_flag(tp, ENABLE_ASF) &&
14930             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14931             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14932                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
14933                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
14934
14935         if (tg3_flag(tp, USE_PHYLIB))
14936                 return tg3_phy_init(tp);
14937
14938         /* Reading the PHY ID register can conflict with ASF
14939          * firmware access to the PHY hardware.
14940          */
14941         err = 0;
14942         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14943                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14944         } else {
14945                 /* Now read the physical PHY_ID from the chip and verify
14946                  * that it is sane.  If it doesn't look good, we fall back
14947                  * to either the hard-coded table based PHY_ID and failing
14948                  * that the value found in the eeprom area.
14949                  */
14950                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14951                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14952
14953                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14954                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14955                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14956
14957                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14958         }
14959
14960         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14961                 tp->phy_id = hw_phy_id;
14962                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14963                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14964                 else
14965                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14966         } else {
14967                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14968                         /* Do nothing, phy ID already set up in
14969                          * tg3_get_eeprom_hw_cfg().
14970                          */
14971                 } else {
14972                         struct subsys_tbl_ent *p;
14973
14974                         /* No eeprom signature?  Try the hardcoded
14975                          * subsys device table.
14976                          */
14977                         p = tg3_lookup_by_subsys(tp);
14978                         if (p) {
14979                                 tp->phy_id = p->phy_id;
14980                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14981                                 /* For now we saw the IDs 0xbc050cd0,
14982                                  * 0xbc050f80 and 0xbc050c30 on devices
14983                                  * connected to an BCM4785 and there are
14984                                  * probably more. Just assume that the phy is
14985                                  * supported when it is connected to a SSB core
14986                                  * for now.
14987                                  */
14988                                 return -ENODEV;
14989                         }
14990
14991                         if (!tp->phy_id ||
14992                             tp->phy_id == TG3_PHY_ID_BCM8002)
14993                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14994                 }
14995         }
14996
14997         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14998             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14999              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15000              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15001              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15002              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15003               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15004              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15005               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
15006                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15007
15008         tg3_phy_init_link_config(tp);
15009
15010         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15011             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15012             !tg3_flag(tp, ENABLE_APE) &&
15013             !tg3_flag(tp, ENABLE_ASF)) {
15014                 u32 bmsr, dummy;
15015
15016                 tg3_readphy(tp, MII_BMSR, &bmsr);
15017                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15018                     (bmsr & BMSR_LSTATUS))
15019                         goto skip_phy_reset;
15020
15021                 err = tg3_phy_reset(tp);
15022                 if (err)
15023                         return err;
15024
15025                 tg3_phy_set_wirespeed(tp);
15026
15027                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15028                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15029                                             tp->link_config.flowctrl);
15030
15031                         tg3_writephy(tp, MII_BMCR,
15032                                      BMCR_ANENABLE | BMCR_ANRESTART);
15033                 }
15034         }
15035
15036 skip_phy_reset:
15037         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15038                 err = tg3_init_5401phy_dsp(tp);
15039                 if (err)
15040                         return err;
15041
15042                 err = tg3_init_5401phy_dsp(tp);
15043         }
15044
15045         return err;
15046 }
15047
15048 static void tg3_read_vpd(struct tg3 *tp)
15049 {
15050         u8 *vpd_data;
15051         unsigned int block_end, rosize, len;
15052         u32 vpdlen;
15053         int j, i = 0;
15054
15055         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15056         if (!vpd_data)
15057                 goto out_no_vpd;
15058
15059         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15060         if (i < 0)
15061                 goto out_not_found;
15062
15063         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15064         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15065         i += PCI_VPD_LRDT_TAG_SIZE;
15066
15067         if (block_end > vpdlen)
15068                 goto out_not_found;
15069
15070         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15071                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15072         if (j > 0) {
15073                 len = pci_vpd_info_field_size(&vpd_data[j]);
15074
15075                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15076                 if (j + len > block_end || len != 4 ||
15077                     memcmp(&vpd_data[j], "1028", 4))
15078                         goto partno;
15079
15080                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15081                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15082                 if (j < 0)
15083                         goto partno;
15084
15085                 len = pci_vpd_info_field_size(&vpd_data[j]);
15086
15087                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15088                 if (j + len > block_end)
15089                         goto partno;
15090
15091                 if (len >= sizeof(tp->fw_ver))
15092                         len = sizeof(tp->fw_ver) - 1;
15093                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15094                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15095                          &vpd_data[j]);
15096         }
15097
15098 partno:
15099         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15100                                       PCI_VPD_RO_KEYWORD_PARTNO);
15101         if (i < 0)
15102                 goto out_not_found;
15103
15104         len = pci_vpd_info_field_size(&vpd_data[i]);
15105
15106         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15107         if (len > TG3_BPN_SIZE ||
15108             (len + i) > vpdlen)
15109                 goto out_not_found;
15110
15111         memcpy(tp->board_part_number, &vpd_data[i], len);
15112
15113 out_not_found:
15114         kfree(vpd_data);
15115         if (tp->board_part_number[0])
15116                 return;
15117
15118 out_no_vpd:
15119         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15120                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15121                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15122                         strcpy(tp->board_part_number, "BCM5717");
15123                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15124                         strcpy(tp->board_part_number, "BCM5718");
15125                 else
15126                         goto nomatch;
15127         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15128                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15129                         strcpy(tp->board_part_number, "BCM57780");
15130                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15131                         strcpy(tp->board_part_number, "BCM57760");
15132                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15133                         strcpy(tp->board_part_number, "BCM57790");
15134                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15135                         strcpy(tp->board_part_number, "BCM57788");
15136                 else
15137                         goto nomatch;
15138         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15139                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15140                         strcpy(tp->board_part_number, "BCM57761");
15141                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15142                         strcpy(tp->board_part_number, "BCM57765");
15143                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15144                         strcpy(tp->board_part_number, "BCM57781");
15145                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15146                         strcpy(tp->board_part_number, "BCM57785");
15147                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15148                         strcpy(tp->board_part_number, "BCM57791");
15149                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15150                         strcpy(tp->board_part_number, "BCM57795");
15151                 else
15152                         goto nomatch;
15153         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15154                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15155                         strcpy(tp->board_part_number, "BCM57762");
15156                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15157                         strcpy(tp->board_part_number, "BCM57766");
15158                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15159                         strcpy(tp->board_part_number, "BCM57782");
15160                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15161                         strcpy(tp->board_part_number, "BCM57786");
15162                 else
15163                         goto nomatch;
15164         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15165                 strcpy(tp->board_part_number, "BCM95906");
15166         } else {
15167 nomatch:
15168                 strcpy(tp->board_part_number, "none");
15169         }
15170 }
15171
15172 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15173 {
15174         u32 val;
15175
15176         if (tg3_nvram_read(tp, offset, &val) ||
15177             (val & 0xfc000000) != 0x0c000000 ||
15178             tg3_nvram_read(tp, offset + 4, &val) ||
15179             val != 0)
15180                 return 0;
15181
15182         return 1;
15183 }
15184
15185 static void tg3_read_bc_ver(struct tg3 *tp)
15186 {
15187         u32 val, offset, start, ver_offset;
15188         int i, dst_off;
15189         bool newver = false;
15190
15191         if (tg3_nvram_read(tp, 0xc, &offset) ||
15192             tg3_nvram_read(tp, 0x4, &start))
15193                 return;
15194
15195         offset = tg3_nvram_logical_addr(tp, offset);
15196
15197         if (tg3_nvram_read(tp, offset, &val))
15198                 return;
15199
15200         if ((val & 0xfc000000) == 0x0c000000) {
15201                 if (tg3_nvram_read(tp, offset + 4, &val))
15202                         return;
15203
15204                 if (val == 0)
15205                         newver = true;
15206         }
15207
15208         dst_off = strlen(tp->fw_ver);
15209
15210         if (newver) {
15211                 if (TG3_VER_SIZE - dst_off < 16 ||
15212                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15213                         return;
15214
15215                 offset = offset + ver_offset - start;
15216                 for (i = 0; i < 16; i += 4) {
15217                         __be32 v;
15218                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15219                                 return;
15220
15221                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15222                 }
15223         } else {
15224                 u32 major, minor;
15225
15226                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15227                         return;
15228
15229                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15230                         TG3_NVM_BCVER_MAJSFT;
15231                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15232                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15233                          "v%d.%02d", major, minor);
15234         }
15235 }
15236
15237 static void tg3_read_hwsb_ver(struct tg3 *tp)
15238 {
15239         u32 val, major, minor;
15240
15241         /* Use native endian representation */
15242         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15243                 return;
15244
15245         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15246                 TG3_NVM_HWSB_CFG1_MAJSFT;
15247         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15248                 TG3_NVM_HWSB_CFG1_MINSFT;
15249
15250         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15251 }
15252
15253 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15254 {
15255         u32 offset, major, minor, build;
15256
15257         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15258
15259         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15260                 return;
15261
15262         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15263         case TG3_EEPROM_SB_REVISION_0:
15264                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15265                 break;
15266         case TG3_EEPROM_SB_REVISION_2:
15267                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15268                 break;
15269         case TG3_EEPROM_SB_REVISION_3:
15270                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15271                 break;
15272         case TG3_EEPROM_SB_REVISION_4:
15273                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15274                 break;
15275         case TG3_EEPROM_SB_REVISION_5:
15276                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15277                 break;
15278         case TG3_EEPROM_SB_REVISION_6:
15279                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15280                 break;
15281         default:
15282                 return;
15283         }
15284
15285         if (tg3_nvram_read(tp, offset, &val))
15286                 return;
15287
15288         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15289                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15290         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15291                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15292         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15293
15294         if (minor > 99 || build > 26)
15295                 return;
15296
15297         offset = strlen(tp->fw_ver);
15298         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15299                  " v%d.%02d", major, minor);
15300
15301         if (build > 0) {
15302                 offset = strlen(tp->fw_ver);
15303                 if (offset < TG3_VER_SIZE - 1)
15304                         tp->fw_ver[offset] = 'a' + build - 1;
15305         }
15306 }
15307
15308 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15309 {
15310         u32 val, offset, start;
15311         int i, vlen;
15312
15313         for (offset = TG3_NVM_DIR_START;
15314              offset < TG3_NVM_DIR_END;
15315              offset += TG3_NVM_DIRENT_SIZE) {
15316                 if (tg3_nvram_read(tp, offset, &val))
15317                         return;
15318
15319                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15320                         break;
15321         }
15322
15323         if (offset == TG3_NVM_DIR_END)
15324                 return;
15325
15326         if (!tg3_flag(tp, 5705_PLUS))
15327                 start = 0x08000000;
15328         else if (tg3_nvram_read(tp, offset - 4, &start))
15329                 return;
15330
15331         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15332             !tg3_fw_img_is_valid(tp, offset) ||
15333             tg3_nvram_read(tp, offset + 8, &val))
15334                 return;
15335
15336         offset += val - start;
15337
15338         vlen = strlen(tp->fw_ver);
15339
15340         tp->fw_ver[vlen++] = ',';
15341         tp->fw_ver[vlen++] = ' ';
15342
15343         for (i = 0; i < 4; i++) {
15344                 __be32 v;
15345                 if (tg3_nvram_read_be32(tp, offset, &v))
15346                         return;
15347
15348                 offset += sizeof(v);
15349
15350                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15351                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15352                         break;
15353                 }
15354
15355                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15356                 vlen += sizeof(v);
15357         }
15358 }
15359
15360 static void tg3_probe_ncsi(struct tg3 *tp)
15361 {
15362         u32 apedata;
15363
15364         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15365         if (apedata != APE_SEG_SIG_MAGIC)
15366                 return;
15367
15368         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15369         if (!(apedata & APE_FW_STATUS_READY))
15370                 return;
15371
15372         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15373                 tg3_flag_set(tp, APE_HAS_NCSI);
15374 }
15375
15376 static void tg3_read_dash_ver(struct tg3 *tp)
15377 {
15378         int vlen;
15379         u32 apedata;
15380         char *fwtype;
15381
15382         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15383
15384         if (tg3_flag(tp, APE_HAS_NCSI))
15385                 fwtype = "NCSI";
15386         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15387                 fwtype = "SMASH";
15388         else
15389                 fwtype = "DASH";
15390
15391         vlen = strlen(tp->fw_ver);
15392
15393         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15394                  fwtype,
15395                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15396                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15397                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15398                  (apedata & APE_FW_VERSION_BLDMSK));
15399 }
15400
15401 static void tg3_read_otp_ver(struct tg3 *tp)
15402 {
15403         u32 val, val2;
15404
15405         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15406                 return;
15407
15408         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15409             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15410             TG3_OTP_MAGIC0_VALID(val)) {
15411                 u64 val64 = (u64) val << 32 | val2;
15412                 u32 ver = 0;
15413                 int i, vlen;
15414
15415                 for (i = 0; i < 7; i++) {
15416                         if ((val64 & 0xff) == 0)
15417                                 break;
15418                         ver = val64 & 0xff;
15419                         val64 >>= 8;
15420                 }
15421                 vlen = strlen(tp->fw_ver);
15422                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15423         }
15424 }
15425
15426 static void tg3_read_fw_ver(struct tg3 *tp)
15427 {
15428         u32 val;
15429         bool vpd_vers = false;
15430
15431         if (tp->fw_ver[0] != 0)
15432                 vpd_vers = true;
15433
15434         if (tg3_flag(tp, NO_NVRAM)) {
15435                 strcat(tp->fw_ver, "sb");
15436                 tg3_read_otp_ver(tp);
15437                 return;
15438         }
15439
15440         if (tg3_nvram_read(tp, 0, &val))
15441                 return;
15442
15443         if (val == TG3_EEPROM_MAGIC)
15444                 tg3_read_bc_ver(tp);
15445         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15446                 tg3_read_sb_ver(tp, val);
15447         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15448                 tg3_read_hwsb_ver(tp);
15449
15450         if (tg3_flag(tp, ENABLE_ASF)) {
15451                 if (tg3_flag(tp, ENABLE_APE)) {
15452                         tg3_probe_ncsi(tp);
15453                         if (!vpd_vers)
15454                                 tg3_read_dash_ver(tp);
15455                 } else if (!vpd_vers) {
15456                         tg3_read_mgmtfw_ver(tp);
15457                 }
15458         }
15459
15460         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15461 }
15462
15463 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15464 {
15465         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15466                 return TG3_RX_RET_MAX_SIZE_5717;
15467         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15468                 return TG3_RX_RET_MAX_SIZE_5700;
15469         else
15470                 return TG3_RX_RET_MAX_SIZE_5705;
15471 }
15472
15473 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15474         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15475         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15476         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15477         { },
15478 };
15479
15480 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15481 {
15482         struct pci_dev *peer;
15483         unsigned int func, devnr = tp->pdev->devfn & ~7;
15484
15485         for (func = 0; func < 8; func++) {
15486                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15487                 if (peer && peer != tp->pdev)
15488                         break;
15489                 pci_dev_put(peer);
15490         }
15491         /* 5704 can be configured in single-port mode, set peer to
15492          * tp->pdev in that case.
15493          */
15494         if (!peer) {
15495                 peer = tp->pdev;
15496                 return peer;
15497         }
15498
15499         /*
15500          * We don't need to keep the refcount elevated; there's no way
15501          * to remove one half of this device without removing the other
15502          */
15503         pci_dev_put(peer);
15504
15505         return peer;
15506 }
15507
15508 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15509 {
15510         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15511         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15512                 u32 reg;
15513
15514                 /* All devices that use the alternate
15515                  * ASIC REV location have a CPMU.
15516                  */
15517                 tg3_flag_set(tp, CPMU_PRESENT);
15518
15519                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15520                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15521                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15522                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15523                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15524                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15525                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15526                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15527                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15528                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15529                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15530                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15531                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15532                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15533                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15534                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15535                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15536                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15537                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15538                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15539                 else
15540                         reg = TG3PCI_PRODID_ASICREV;
15541
15542                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15543         }
15544
15545         /* Wrong chip ID in 5752 A0. This code can be removed later
15546          * as A0 is not in production.
15547          */
15548         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15549                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15550
15551         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15552                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15553
15554         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15555             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15556             tg3_asic_rev(tp) == ASIC_REV_5720)
15557                 tg3_flag_set(tp, 5717_PLUS);
15558
15559         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15560             tg3_asic_rev(tp) == ASIC_REV_57766)
15561                 tg3_flag_set(tp, 57765_CLASS);
15562
15563         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15564              tg3_asic_rev(tp) == ASIC_REV_5762)
15565                 tg3_flag_set(tp, 57765_PLUS);
15566
15567         /* Intentionally exclude ASIC_REV_5906 */
15568         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15569             tg3_asic_rev(tp) == ASIC_REV_5787 ||
15570             tg3_asic_rev(tp) == ASIC_REV_5784 ||
15571             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15572             tg3_asic_rev(tp) == ASIC_REV_5785 ||
15573             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15574             tg3_flag(tp, 57765_PLUS))
15575                 tg3_flag_set(tp, 5755_PLUS);
15576
15577         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15578             tg3_asic_rev(tp) == ASIC_REV_5714)
15579                 tg3_flag_set(tp, 5780_CLASS);
15580
15581         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15582             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15583             tg3_asic_rev(tp) == ASIC_REV_5906 ||
15584             tg3_flag(tp, 5755_PLUS) ||
15585             tg3_flag(tp, 5780_CLASS))
15586                 tg3_flag_set(tp, 5750_PLUS);
15587
15588         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15589             tg3_flag(tp, 5750_PLUS))
15590                 tg3_flag_set(tp, 5705_PLUS);
15591 }
15592
15593 static bool tg3_10_100_only_device(struct tg3 *tp,
15594                                    const struct pci_device_id *ent)
15595 {
15596         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15597
15598         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15599              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15600             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15601                 return true;
15602
15603         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15604                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15605                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15606                                 return true;
15607                 } else {
15608                         return true;
15609                 }
15610         }
15611
15612         return false;
15613 }
15614
15615 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15616 {
15617         u32 misc_ctrl_reg;
15618         u32 pci_state_reg, grc_misc_cfg;
15619         u32 val;
15620         u16 pci_cmd;
15621         int err;
15622
15623         /* Force memory write invalidate off.  If we leave it on,
15624          * then on 5700_BX chips we have to enable a workaround.
15625          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15626          * to match the cacheline size.  The Broadcom driver have this
15627          * workaround but turns MWI off all the times so never uses
15628          * it.  This seems to suggest that the workaround is insufficient.
15629          */
15630         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15631         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15632         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15633
15634         /* Important! -- Make sure register accesses are byteswapped
15635          * correctly.  Also, for those chips that require it, make
15636          * sure that indirect register accesses are enabled before
15637          * the first operation.
15638          */
15639         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15640                               &misc_ctrl_reg);
15641         tp->misc_host_ctrl |= (misc_ctrl_reg &
15642                                MISC_HOST_CTRL_CHIPREV);
15643         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15644                                tp->misc_host_ctrl);
15645
15646         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15647
15648         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15649          * we need to disable memory and use config. cycles
15650          * only to access all registers. The 5702/03 chips
15651          * can mistakenly decode the special cycles from the
15652          * ICH chipsets as memory write cycles, causing corruption
15653          * of register and memory space. Only certain ICH bridges
15654          * will drive special cycles with non-zero data during the
15655          * address phase which can fall within the 5703's address
15656          * range. This is not an ICH bug as the PCI spec allows
15657          * non-zero address during special cycles. However, only
15658          * these ICH bridges are known to drive non-zero addresses
15659          * during special cycles.
15660          *
15661          * Since special cycles do not cross PCI bridges, we only
15662          * enable this workaround if the 5703 is on the secondary
15663          * bus of these ICH bridges.
15664          */
15665         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15666             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15667                 static struct tg3_dev_id {
15668                         u32     vendor;
15669                         u32     device;
15670                         u32     rev;
15671                 } ich_chipsets[] = {
15672                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15673                           PCI_ANY_ID },
15674                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15675                           PCI_ANY_ID },
15676                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15677                           0xa },
15678                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15679                           PCI_ANY_ID },
15680                         { },
15681                 };
15682                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15683                 struct pci_dev *bridge = NULL;
15684
15685                 while (pci_id->vendor != 0) {
15686                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
15687                                                 bridge);
15688                         if (!bridge) {
15689                                 pci_id++;
15690                                 continue;
15691                         }
15692                         if (pci_id->rev != PCI_ANY_ID) {
15693                                 if (bridge->revision > pci_id->rev)
15694                                         continue;
15695                         }
15696                         if (bridge->subordinate &&
15697                             (bridge->subordinate->number ==
15698                              tp->pdev->bus->number)) {
15699                                 tg3_flag_set(tp, ICH_WORKAROUND);
15700                                 pci_dev_put(bridge);
15701                                 break;
15702                         }
15703                 }
15704         }
15705
15706         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15707                 static struct tg3_dev_id {
15708                         u32     vendor;
15709                         u32     device;
15710                 } bridge_chipsets[] = {
15711                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15712                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15713                         { },
15714                 };
15715                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15716                 struct pci_dev *bridge = NULL;
15717
15718                 while (pci_id->vendor != 0) {
15719                         bridge = pci_get_device(pci_id->vendor,
15720                                                 pci_id->device,
15721                                                 bridge);
15722                         if (!bridge) {
15723                                 pci_id++;
15724                                 continue;
15725                         }
15726                         if (bridge->subordinate &&
15727                             (bridge->subordinate->number <=
15728                              tp->pdev->bus->number) &&
15729                             (bridge->subordinate->busn_res.end >=
15730                              tp->pdev->bus->number)) {
15731                                 tg3_flag_set(tp, 5701_DMA_BUG);
15732                                 pci_dev_put(bridge);
15733                                 break;
15734                         }
15735                 }
15736         }
15737
15738         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15739          * DMA addresses > 40-bit. This bridge may have other additional
15740          * 57xx devices behind it in some 4-port NIC designs for example.
15741          * Any tg3 device found behind the bridge will also need the 40-bit
15742          * DMA workaround.
15743          */
15744         if (tg3_flag(tp, 5780_CLASS)) {
15745                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15746                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15747         } else {
15748                 struct pci_dev *bridge = NULL;
15749
15750                 do {
15751                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15752                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15753                                                 bridge);
15754                         if (bridge && bridge->subordinate &&
15755                             (bridge->subordinate->number <=
15756                              tp->pdev->bus->number) &&
15757                             (bridge->subordinate->busn_res.end >=
15758                              tp->pdev->bus->number)) {
15759                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15760                                 pci_dev_put(bridge);
15761                                 break;
15762                         }
15763                 } while (bridge);
15764         }
15765
15766         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15767             tg3_asic_rev(tp) == ASIC_REV_5714)
15768                 tp->pdev_peer = tg3_find_peer(tp);
15769
15770         /* Determine TSO capabilities */
15771         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15772                 ; /* Do nothing. HW bug. */
15773         else if (tg3_flag(tp, 57765_PLUS))
15774                 tg3_flag_set(tp, HW_TSO_3);
15775         else if (tg3_flag(tp, 5755_PLUS) ||
15776                  tg3_asic_rev(tp) == ASIC_REV_5906)
15777                 tg3_flag_set(tp, HW_TSO_2);
15778         else if (tg3_flag(tp, 5750_PLUS)) {
15779                 tg3_flag_set(tp, HW_TSO_1);
15780                 tg3_flag_set(tp, TSO_BUG);
15781                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15782                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15783                         tg3_flag_clear(tp, TSO_BUG);
15784         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15785                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15786                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15787                 tg3_flag_set(tp, FW_TSO);
15788                 tg3_flag_set(tp, TSO_BUG);
15789                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15790                         tp->fw_needed = FIRMWARE_TG3TSO5;
15791                 else
15792                         tp->fw_needed = FIRMWARE_TG3TSO;
15793         }
15794
15795         /* Selectively allow TSO based on operating conditions */
15796         if (tg3_flag(tp, HW_TSO_1) ||
15797             tg3_flag(tp, HW_TSO_2) ||
15798             tg3_flag(tp, HW_TSO_3) ||
15799             tg3_flag(tp, FW_TSO)) {
15800                 /* For firmware TSO, assume ASF is disabled.
15801                  * We'll disable TSO later if we discover ASF
15802                  * is enabled in tg3_get_eeprom_hw_cfg().
15803                  */
15804                 tg3_flag_set(tp, TSO_CAPABLE);
15805         } else {
15806                 tg3_flag_clear(tp, TSO_CAPABLE);
15807                 tg3_flag_clear(tp, TSO_BUG);
15808                 tp->fw_needed = NULL;
15809         }
15810
15811         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15812                 tp->fw_needed = FIRMWARE_TG3;
15813
15814         if (tg3_asic_rev(tp) == ASIC_REV_57766)
15815                 tp->fw_needed = FIRMWARE_TG357766;
15816
15817         tp->irq_max = 1;
15818
15819         if (tg3_flag(tp, 5750_PLUS)) {
15820                 tg3_flag_set(tp, SUPPORT_MSI);
15821                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15822                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15823                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15824                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15825                      tp->pdev_peer == tp->pdev))
15826                         tg3_flag_clear(tp, SUPPORT_MSI);
15827
15828                 if (tg3_flag(tp, 5755_PLUS) ||
15829                     tg3_asic_rev(tp) == ASIC_REV_5906) {
15830                         tg3_flag_set(tp, 1SHOT_MSI);
15831                 }
15832
15833                 if (tg3_flag(tp, 57765_PLUS)) {
15834                         tg3_flag_set(tp, SUPPORT_MSIX);
15835                         tp->irq_max = TG3_IRQ_MAX_VECS;
15836                 }
15837         }
15838
15839         tp->txq_max = 1;
15840         tp->rxq_max = 1;
15841         if (tp->irq_max > 1) {
15842                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15843                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15844
15845                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15846                     tg3_asic_rev(tp) == ASIC_REV_5720)
15847                         tp->txq_max = tp->irq_max - 1;
15848         }
15849
15850         if (tg3_flag(tp, 5755_PLUS) ||
15851             tg3_asic_rev(tp) == ASIC_REV_5906)
15852                 tg3_flag_set(tp, SHORT_DMA_BUG);
15853
15854         if (tg3_asic_rev(tp) == ASIC_REV_5719)
15855                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15856
15857         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15858             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15859             tg3_asic_rev(tp) == ASIC_REV_5720 ||
15860             tg3_asic_rev(tp) == ASIC_REV_5762)
15861                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15862
15863         if (tg3_flag(tp, 57765_PLUS) &&
15864             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15865                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15866
15867         if (!tg3_flag(tp, 5705_PLUS) ||
15868             tg3_flag(tp, 5780_CLASS) ||
15869             tg3_flag(tp, USE_JUMBO_BDFLAG))
15870                 tg3_flag_set(tp, JUMBO_CAPABLE);
15871
15872         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15873                               &pci_state_reg);
15874
15875         if (pci_is_pcie(tp->pdev)) {
15876                 u16 lnkctl;
15877
15878                 tg3_flag_set(tp, PCI_EXPRESS);
15879
15880                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15881                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15882                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15883                                 tg3_flag_clear(tp, HW_TSO_2);
15884                                 tg3_flag_clear(tp, TSO_CAPABLE);
15885                         }
15886                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15887                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15888                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15889                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15890                                 tg3_flag_set(tp, CLKREQ_BUG);
15891                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15892                         tg3_flag_set(tp, L1PLLPD_EN);
15893                 }
15894         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15895                 /* BCM5785 devices are effectively PCIe devices, and should
15896                  * follow PCIe codepaths, but do not have a PCIe capabilities
15897                  * section.
15898                  */
15899                 tg3_flag_set(tp, PCI_EXPRESS);
15900         } else if (!tg3_flag(tp, 5705_PLUS) ||
15901                    tg3_flag(tp, 5780_CLASS)) {
15902                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15903                 if (!tp->pcix_cap) {
15904                         dev_err(&tp->pdev->dev,
15905                                 "Cannot find PCI-X capability, aborting\n");
15906                         return -EIO;
15907                 }
15908
15909                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15910                         tg3_flag_set(tp, PCIX_MODE);
15911         }
15912
15913         /* If we have an AMD 762 or VIA K8T800 chipset, write
15914          * reordering to the mailbox registers done by the host
15915          * controller can cause major troubles.  We read back from
15916          * every mailbox register write to force the writes to be
15917          * posted to the chip in order.
15918          */
15919         if (pci_dev_present(tg3_write_reorder_chipsets) &&
15920             !tg3_flag(tp, PCI_EXPRESS))
15921                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15922
15923         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15924                              &tp->pci_cacheline_sz);
15925         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15926                              &tp->pci_lat_timer);
15927         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15928             tp->pci_lat_timer < 64) {
15929                 tp->pci_lat_timer = 64;
15930                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15931                                       tp->pci_lat_timer);
15932         }
15933
15934         /* Important! -- It is critical that the PCI-X hw workaround
15935          * situation is decided before the first MMIO register access.
15936          */
15937         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15938                 /* 5700 BX chips need to have their TX producer index
15939                  * mailboxes written twice to workaround a bug.
15940                  */
15941                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15942
15943                 /* If we are in PCI-X mode, enable register write workaround.
15944                  *
15945                  * The workaround is to use indirect register accesses
15946                  * for all chip writes not to mailbox registers.
15947                  */
15948                 if (tg3_flag(tp, PCIX_MODE)) {
15949                         u32 pm_reg;
15950
15951                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15952
15953                         /* The chip can have it's power management PCI config
15954                          * space registers clobbered due to this bug.
15955                          * So explicitly force the chip into D0 here.
15956                          */
15957                         pci_read_config_dword(tp->pdev,
15958                                               tp->pm_cap + PCI_PM_CTRL,
15959                                               &pm_reg);
15960                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15961                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15962                         pci_write_config_dword(tp->pdev,
15963                                                tp->pm_cap + PCI_PM_CTRL,
15964                                                pm_reg);
15965
15966                         /* Also, force SERR#/PERR# in PCI command. */
15967                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15968                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15969                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15970                 }
15971         }
15972
15973         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15974                 tg3_flag_set(tp, PCI_HIGH_SPEED);
15975         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15976                 tg3_flag_set(tp, PCI_32BIT);
15977
15978         /* Chip-specific fixup from Broadcom driver */
15979         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15980             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15981                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15982                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15983         }
15984
15985         /* Default fast path register access methods */
15986         tp->read32 = tg3_read32;
15987         tp->write32 = tg3_write32;
15988         tp->read32_mbox = tg3_read32;
15989         tp->write32_mbox = tg3_write32;
15990         tp->write32_tx_mbox = tg3_write32;
15991         tp->write32_rx_mbox = tg3_write32;
15992
15993         /* Various workaround register access methods */
15994         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15995                 tp->write32 = tg3_write_indirect_reg32;
15996         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15997                  (tg3_flag(tp, PCI_EXPRESS) &&
15998                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15999                 /*
16000                  * Back to back register writes can cause problems on these
16001                  * chips, the workaround is to read back all reg writes
16002                  * except those to mailbox regs.
16003                  *
16004                  * See tg3_write_indirect_reg32().
16005                  */
16006                 tp->write32 = tg3_write_flush_reg32;
16007         }
16008
16009         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16010                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16011                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16012                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16013         }
16014
16015         if (tg3_flag(tp, ICH_WORKAROUND)) {
16016                 tp->read32 = tg3_read_indirect_reg32;
16017                 tp->write32 = tg3_write_indirect_reg32;
16018                 tp->read32_mbox = tg3_read_indirect_mbox;
16019                 tp->write32_mbox = tg3_write_indirect_mbox;
16020                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16021                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16022
16023                 iounmap(tp->regs);
16024                 tp->regs = NULL;
16025
16026                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16027                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16028                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16029         }
16030         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16031                 tp->read32_mbox = tg3_read32_mbox_5906;
16032                 tp->write32_mbox = tg3_write32_mbox_5906;
16033                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16034                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16035         }
16036
16037         if (tp->write32 == tg3_write_indirect_reg32 ||
16038             (tg3_flag(tp, PCIX_MODE) &&
16039              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16040               tg3_asic_rev(tp) == ASIC_REV_5701)))
16041                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16042
16043         /* The memory arbiter has to be enabled in order for SRAM accesses
16044          * to succeed.  Normally on powerup the tg3 chip firmware will make
16045          * sure it is enabled, but other entities such as system netboot
16046          * code might disable it.
16047          */
16048         val = tr32(MEMARB_MODE);
16049         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16050
16051         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16052         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16053             tg3_flag(tp, 5780_CLASS)) {
16054                 if (tg3_flag(tp, PCIX_MODE)) {
16055                         pci_read_config_dword(tp->pdev,
16056                                               tp->pcix_cap + PCI_X_STATUS,
16057                                               &val);
16058                         tp->pci_fn = val & 0x7;
16059                 }
16060         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16061                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16062                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16063                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16064                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16065                         val = tr32(TG3_CPMU_STATUS);
16066
16067                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16068                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16069                 else
16070                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16071                                      TG3_CPMU_STATUS_FSHFT_5719;
16072         }
16073
16074         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16075                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16076                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16077         }
16078
16079         /* Get eeprom hw config before calling tg3_set_power_state().
16080          * In particular, the TG3_FLAG_IS_NIC flag must be
16081          * determined before calling tg3_set_power_state() so that
16082          * we know whether or not to switch out of Vaux power.
16083          * When the flag is set, it means that GPIO1 is used for eeprom
16084          * write protect and also implies that it is a LOM where GPIOs
16085          * are not used to switch power.
16086          */
16087         tg3_get_eeprom_hw_cfg(tp);
16088
16089         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16090                 tg3_flag_clear(tp, TSO_CAPABLE);
16091                 tg3_flag_clear(tp, TSO_BUG);
16092                 tp->fw_needed = NULL;
16093         }
16094
16095         if (tg3_flag(tp, ENABLE_APE)) {
16096                 /* Allow reads and writes to the
16097                  * APE register and memory space.
16098                  */
16099                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16100                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16101                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16102                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16103                                        pci_state_reg);
16104
16105                 tg3_ape_lock_init(tp);
16106         }
16107
16108         /* Set up tp->grc_local_ctrl before calling
16109          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16110          * will bring 5700's external PHY out of reset.
16111          * It is also used as eeprom write protect on LOMs.
16112          */
16113         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16114         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16115             tg3_flag(tp, EEPROM_WRITE_PROT))
16116                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16117                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16118         /* Unused GPIO3 must be driven as output on 5752 because there
16119          * are no pull-up resistors on unused GPIO pins.
16120          */
16121         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16122                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16123
16124         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16125             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16126             tg3_flag(tp, 57765_CLASS))
16127                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16128
16129         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16130             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16131                 /* Turn off the debug UART. */
16132                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16133                 if (tg3_flag(tp, IS_NIC))
16134                         /* Keep VMain power. */
16135                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16136                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16137         }
16138
16139         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16140                 tp->grc_local_ctrl |=
16141                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16142
16143         /* Switch out of Vaux if it is a NIC */
16144         tg3_pwrsrc_switch_to_vmain(tp);
16145
16146         /* Derive initial jumbo mode from MTU assigned in
16147          * ether_setup() via the alloc_etherdev() call
16148          */
16149         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16150                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16151
16152         /* Determine WakeOnLan speed to use. */
16153         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16154             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16155             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16156             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16157                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16158         } else {
16159                 tg3_flag_set(tp, WOL_SPEED_100MB);
16160         }
16161
16162         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16163                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16164
16165         /* A few boards don't want Ethernet@WireSpeed phy feature */
16166         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16167             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16168              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16169              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16170             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16171             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16172                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16173
16174         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16175             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16176                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16177         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16178                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16179
16180         if (tg3_flag(tp, 5705_PLUS) &&
16181             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16182             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16183             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16184             !tg3_flag(tp, 57765_PLUS)) {
16185                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16186                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16187                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16188                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16189                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16190                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16191                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16192                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16193                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16194                 } else
16195                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16196         }
16197
16198         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16199             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16200                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16201                 if (tp->phy_otp == 0)
16202                         tp->phy_otp = TG3_OTP_DEFAULT;
16203         }
16204
16205         if (tg3_flag(tp, CPMU_PRESENT))
16206                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16207         else
16208                 tp->mi_mode = MAC_MI_MODE_BASE;
16209
16210         tp->coalesce_mode = 0;
16211         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16212             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16213                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16214
16215         /* Set these bits to enable statistics workaround. */
16216         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16217             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16218             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16219                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16220                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16221         }
16222
16223         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16224             tg3_asic_rev(tp) == ASIC_REV_57780)
16225                 tg3_flag_set(tp, USE_PHYLIB);
16226
16227         err = tg3_mdio_init(tp);
16228         if (err)
16229                 return err;
16230
16231         /* Initialize data/descriptor byte/word swapping. */
16232         val = tr32(GRC_MODE);
16233         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16234             tg3_asic_rev(tp) == ASIC_REV_5762)
16235                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16236                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16237                         GRC_MODE_B2HRX_ENABLE |
16238                         GRC_MODE_HTX2B_ENABLE |
16239                         GRC_MODE_HOST_STACKUP);
16240         else
16241                 val &= GRC_MODE_HOST_STACKUP;
16242
16243         tw32(GRC_MODE, val | tp->grc_mode);
16244
16245         tg3_switch_clocks(tp);
16246
16247         /* Clear this out for sanity. */
16248         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16249
16250         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16251                               &pci_state_reg);
16252         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16253             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16254                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16255                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16256                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16257                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16258                         void __iomem *sram_base;
16259
16260                         /* Write some dummy words into the SRAM status block
16261                          * area, see if it reads back correctly.  If the return
16262                          * value is bad, force enable the PCIX workaround.
16263                          */
16264                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16265
16266                         writel(0x00000000, sram_base);
16267                         writel(0x00000000, sram_base + 4);
16268                         writel(0xffffffff, sram_base + 4);
16269                         if (readl(sram_base) != 0x00000000)
16270                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16271                 }
16272         }
16273
16274         udelay(50);
16275         tg3_nvram_init(tp);
16276
16277         /* If the device has an NVRAM, no need to load patch firmware */
16278         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16279             !tg3_flag(tp, NO_NVRAM))
16280                 tp->fw_needed = NULL;
16281
16282         grc_misc_cfg = tr32(GRC_MISC_CFG);
16283         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16284
16285         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16286             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16287              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16288                 tg3_flag_set(tp, IS_5788);
16289
16290         if (!tg3_flag(tp, IS_5788) &&
16291             tg3_asic_rev(tp) != ASIC_REV_5700)
16292                 tg3_flag_set(tp, TAGGED_STATUS);
16293         if (tg3_flag(tp, TAGGED_STATUS)) {
16294                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16295                                       HOSTCC_MODE_CLRTICK_TXBD);
16296
16297                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16298                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16299                                        tp->misc_host_ctrl);
16300         }
16301
16302         /* Preserve the APE MAC_MODE bits */
16303         if (tg3_flag(tp, ENABLE_APE))
16304                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16305         else
16306                 tp->mac_mode = 0;
16307
16308         if (tg3_10_100_only_device(tp, ent))
16309                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16310
16311         err = tg3_phy_probe(tp);
16312         if (err) {
16313                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16314                 /* ... but do not return immediately ... */
16315                 tg3_mdio_fini(tp);
16316         }
16317
16318         tg3_read_vpd(tp);
16319         tg3_read_fw_ver(tp);
16320
16321         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16322                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16323         } else {
16324                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16325                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16326                 else
16327                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16328         }
16329
16330         /* 5700 {AX,BX} chips have a broken status block link
16331          * change bit implementation, so we must use the
16332          * status register in those cases.
16333          */
16334         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16335                 tg3_flag_set(tp, USE_LINKCHG_REG);
16336         else
16337                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16338
16339         /* The led_ctrl is set during tg3_phy_probe, here we might
16340          * have to force the link status polling mechanism based
16341          * upon subsystem IDs.
16342          */
16343         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16344             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16345             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16346                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16347                 tg3_flag_set(tp, USE_LINKCHG_REG);
16348         }
16349
16350         /* For all SERDES we poll the MAC status register. */
16351         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16352                 tg3_flag_set(tp, POLL_SERDES);
16353         else
16354                 tg3_flag_clear(tp, POLL_SERDES);
16355
16356         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16357         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16358         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16359             tg3_flag(tp, PCIX_MODE)) {
16360                 tp->rx_offset = NET_SKB_PAD;
16361 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16362                 tp->rx_copy_thresh = ~(u16)0;
16363 #endif
16364         }
16365
16366         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16367         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16368         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16369
16370         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16371
16372         /* Increment the rx prod index on the rx std ring by at most
16373          * 8 for these chips to workaround hw errata.
16374          */
16375         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16376             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16377             tg3_asic_rev(tp) == ASIC_REV_5755)
16378                 tp->rx_std_max_post = 8;
16379
16380         if (tg3_flag(tp, ASPM_WORKAROUND))
16381                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16382                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16383
16384         return err;
16385 }
16386
16387 #ifdef CONFIG_SPARC
16388 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16389 {
16390         struct net_device *dev = tp->dev;
16391         struct pci_dev *pdev = tp->pdev;
16392         struct device_node *dp = pci_device_to_OF_node(pdev);
16393         const unsigned char *addr;
16394         int len;
16395
16396         addr = of_get_property(dp, "local-mac-address", &len);
16397         if (addr && len == 6) {
16398                 memcpy(dev->dev_addr, addr, 6);
16399                 return 0;
16400         }
16401         return -ENODEV;
16402 }
16403
16404 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16405 {
16406         struct net_device *dev = tp->dev;
16407
16408         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16409         return 0;
16410 }
16411 #endif
16412
16413 static int tg3_get_device_address(struct tg3 *tp)
16414 {
16415         struct net_device *dev = tp->dev;
16416         u32 hi, lo, mac_offset;
16417         int addr_ok = 0;
16418         int err;
16419
16420 #ifdef CONFIG_SPARC
16421         if (!tg3_get_macaddr_sparc(tp))
16422                 return 0;
16423 #endif
16424
16425         if (tg3_flag(tp, IS_SSB_CORE)) {
16426                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16427                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16428                         return 0;
16429         }
16430
16431         mac_offset = 0x7c;
16432         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16433             tg3_flag(tp, 5780_CLASS)) {
16434                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16435                         mac_offset = 0xcc;
16436                 if (tg3_nvram_lock(tp))
16437                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16438                 else
16439                         tg3_nvram_unlock(tp);
16440         } else if (tg3_flag(tp, 5717_PLUS)) {
16441                 if (tp->pci_fn & 1)
16442                         mac_offset = 0xcc;
16443                 if (tp->pci_fn > 1)
16444                         mac_offset += 0x18c;
16445         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16446                 mac_offset = 0x10;
16447
16448         /* First try to get it from MAC address mailbox. */
16449         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16450         if ((hi >> 16) == 0x484b) {
16451                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16452                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16453
16454                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16455                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16456                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16457                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16458                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16459
16460                 /* Some old bootcode may report a 0 MAC address in SRAM */
16461                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16462         }
16463         if (!addr_ok) {
16464                 /* Next, try NVRAM. */
16465                 if (!tg3_flag(tp, NO_NVRAM) &&
16466                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16467                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16468                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16469                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16470                 }
16471                 /* Finally just fetch it out of the MAC control regs. */
16472                 else {
16473                         hi = tr32(MAC_ADDR_0_HIGH);
16474                         lo = tr32(MAC_ADDR_0_LOW);
16475
16476                         dev->dev_addr[5] = lo & 0xff;
16477                         dev->dev_addr[4] = (lo >> 8) & 0xff;
16478                         dev->dev_addr[3] = (lo >> 16) & 0xff;
16479                         dev->dev_addr[2] = (lo >> 24) & 0xff;
16480                         dev->dev_addr[1] = hi & 0xff;
16481                         dev->dev_addr[0] = (hi >> 8) & 0xff;
16482                 }
16483         }
16484
16485         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16486 #ifdef CONFIG_SPARC
16487                 if (!tg3_get_default_macaddr_sparc(tp))
16488                         return 0;
16489 #endif
16490                 return -EINVAL;
16491         }
16492         return 0;
16493 }
16494
16495 #define BOUNDARY_SINGLE_CACHELINE       1
16496 #define BOUNDARY_MULTI_CACHELINE        2
16497
16498 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16499 {
16500         int cacheline_size;
16501         u8 byte;
16502         int goal;
16503
16504         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16505         if (byte == 0)
16506                 cacheline_size = 1024;
16507         else
16508                 cacheline_size = (int) byte * 4;
16509
16510         /* On 5703 and later chips, the boundary bits have no
16511          * effect.
16512          */
16513         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16514             tg3_asic_rev(tp) != ASIC_REV_5701 &&
16515             !tg3_flag(tp, PCI_EXPRESS))
16516                 goto out;
16517
16518 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16519         goal = BOUNDARY_MULTI_CACHELINE;
16520 #else
16521 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16522         goal = BOUNDARY_SINGLE_CACHELINE;
16523 #else
16524         goal = 0;
16525 #endif
16526 #endif
16527
16528         if (tg3_flag(tp, 57765_PLUS)) {
16529                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16530                 goto out;
16531         }
16532
16533         if (!goal)
16534                 goto out;
16535
16536         /* PCI controllers on most RISC systems tend to disconnect
16537          * when a device tries to burst across a cache-line boundary.
16538          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16539          *
16540          * Unfortunately, for PCI-E there are only limited
16541          * write-side controls for this, and thus for reads
16542          * we will still get the disconnects.  We'll also waste
16543          * these PCI cycles for both read and write for chips
16544          * other than 5700 and 5701 which do not implement the
16545          * boundary bits.
16546          */
16547         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16548                 switch (cacheline_size) {
16549                 case 16:
16550                 case 32:
16551                 case 64:
16552                 case 128:
16553                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16554                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16555                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16556                         } else {
16557                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16558                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16559                         }
16560                         break;
16561
16562                 case 256:
16563                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16564                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16565                         break;
16566
16567                 default:
16568                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16569                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16570                         break;
16571                 }
16572         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16573                 switch (cacheline_size) {
16574                 case 16:
16575                 case 32:
16576                 case 64:
16577                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16578                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16579                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16580                                 break;
16581                         }
16582                         /* fallthrough */
16583                 case 128:
16584                 default:
16585                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16586                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16587                         break;
16588                 }
16589         } else {
16590                 switch (cacheline_size) {
16591                 case 16:
16592                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16593                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16594                                         DMA_RWCTRL_WRITE_BNDRY_16);
16595                                 break;
16596                         }
16597                         /* fallthrough */
16598                 case 32:
16599                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16600                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16601                                         DMA_RWCTRL_WRITE_BNDRY_32);
16602                                 break;
16603                         }
16604                         /* fallthrough */
16605                 case 64:
16606                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16607                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16608                                         DMA_RWCTRL_WRITE_BNDRY_64);
16609                                 break;
16610                         }
16611                         /* fallthrough */
16612                 case 128:
16613                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16614                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16615                                         DMA_RWCTRL_WRITE_BNDRY_128);
16616                                 break;
16617                         }
16618                         /* fallthrough */
16619                 case 256:
16620                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16621                                 DMA_RWCTRL_WRITE_BNDRY_256);
16622                         break;
16623                 case 512:
16624                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16625                                 DMA_RWCTRL_WRITE_BNDRY_512);
16626                         break;
16627                 case 1024:
16628                 default:
16629                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16630                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16631                         break;
16632                 }
16633         }
16634
16635 out:
16636         return val;
16637 }
16638
16639 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16640                            int size, bool to_device)
16641 {
16642         struct tg3_internal_buffer_desc test_desc;
16643         u32 sram_dma_descs;
16644         int i, ret;
16645
16646         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16647
16648         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16649         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16650         tw32(RDMAC_STATUS, 0);
16651         tw32(WDMAC_STATUS, 0);
16652
16653         tw32(BUFMGR_MODE, 0);
16654         tw32(FTQ_RESET, 0);
16655
16656         test_desc.addr_hi = ((u64) buf_dma) >> 32;
16657         test_desc.addr_lo = buf_dma & 0xffffffff;
16658         test_desc.nic_mbuf = 0x00002100;
16659         test_desc.len = size;
16660
16661         /*
16662          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16663          * the *second* time the tg3 driver was getting loaded after an
16664          * initial scan.
16665          *
16666          * Broadcom tells me:
16667          *   ...the DMA engine is connected to the GRC block and a DMA
16668          *   reset may affect the GRC block in some unpredictable way...
16669          *   The behavior of resets to individual blocks has not been tested.
16670          *
16671          * Broadcom noted the GRC reset will also reset all sub-components.
16672          */
16673         if (to_device) {
16674                 test_desc.cqid_sqid = (13 << 8) | 2;
16675
16676                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16677                 udelay(40);
16678         } else {
16679                 test_desc.cqid_sqid = (16 << 8) | 7;
16680
16681                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16682                 udelay(40);
16683         }
16684         test_desc.flags = 0x00000005;
16685
16686         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16687                 u32 val;
16688
16689                 val = *(((u32 *)&test_desc) + i);
16690                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16691                                        sram_dma_descs + (i * sizeof(u32)));
16692                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16693         }
16694         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16695
16696         if (to_device)
16697                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16698         else
16699                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16700
16701         ret = -ENODEV;
16702         for (i = 0; i < 40; i++) {
16703                 u32 val;
16704
16705                 if (to_device)
16706                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16707                 else
16708                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16709                 if ((val & 0xffff) == sram_dma_descs) {
16710                         ret = 0;
16711                         break;
16712                 }
16713
16714                 udelay(100);
16715         }
16716
16717         return ret;
16718 }
16719
16720 #define TEST_BUFFER_SIZE        0x2000
16721
16722 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16723         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16724         { },
16725 };
16726
16727 static int tg3_test_dma(struct tg3 *tp)
16728 {
16729         dma_addr_t buf_dma;
16730         u32 *buf, saved_dma_rwctrl;
16731         int ret = 0;
16732
16733         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16734                                  &buf_dma, GFP_KERNEL);
16735         if (!buf) {
16736                 ret = -ENOMEM;
16737                 goto out_nofree;
16738         }
16739
16740         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16741                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16742
16743         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16744
16745         if (tg3_flag(tp, 57765_PLUS))
16746                 goto out;
16747
16748         if (tg3_flag(tp, PCI_EXPRESS)) {
16749                 /* DMA read watermark not used on PCIE */
16750                 tp->dma_rwctrl |= 0x00180000;
16751         } else if (!tg3_flag(tp, PCIX_MODE)) {
16752                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16753                     tg3_asic_rev(tp) == ASIC_REV_5750)
16754                         tp->dma_rwctrl |= 0x003f0000;
16755                 else
16756                         tp->dma_rwctrl |= 0x003f000f;
16757         } else {
16758                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16759                     tg3_asic_rev(tp) == ASIC_REV_5704) {
16760                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16761                         u32 read_water = 0x7;
16762
16763                         /* If the 5704 is behind the EPB bridge, we can
16764                          * do the less restrictive ONE_DMA workaround for
16765                          * better performance.
16766                          */
16767                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16768                             tg3_asic_rev(tp) == ASIC_REV_5704)
16769                                 tp->dma_rwctrl |= 0x8000;
16770                         else if (ccval == 0x6 || ccval == 0x7)
16771                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16772
16773                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
16774                                 read_water = 4;
16775                         /* Set bit 23 to enable PCIX hw bug fix */
16776                         tp->dma_rwctrl |=
16777                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16778                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16779                                 (1 << 23);
16780                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16781                         /* 5780 always in PCIX mode */
16782                         tp->dma_rwctrl |= 0x00144000;
16783                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16784                         /* 5714 always in PCIX mode */
16785                         tp->dma_rwctrl |= 0x00148000;
16786                 } else {
16787                         tp->dma_rwctrl |= 0x001b000f;
16788                 }
16789         }
16790         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16791                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16792
16793         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16794             tg3_asic_rev(tp) == ASIC_REV_5704)
16795                 tp->dma_rwctrl &= 0xfffffff0;
16796
16797         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16798             tg3_asic_rev(tp) == ASIC_REV_5701) {
16799                 /* Remove this if it causes problems for some boards. */
16800                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16801
16802                 /* On 5700/5701 chips, we need to set this bit.
16803                  * Otherwise the chip will issue cacheline transactions
16804                  * to streamable DMA memory with not all the byte
16805                  * enables turned on.  This is an error on several
16806                  * RISC PCI controllers, in particular sparc64.
16807                  *
16808                  * On 5703/5704 chips, this bit has been reassigned
16809                  * a different meaning.  In particular, it is used
16810                  * on those chips to enable a PCI-X workaround.
16811                  */
16812                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16813         }
16814
16815         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16816
16817 #if 0
16818         /* Unneeded, already done by tg3_get_invariants.  */
16819         tg3_switch_clocks(tp);
16820 #endif
16821
16822         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16823             tg3_asic_rev(tp) != ASIC_REV_5701)
16824                 goto out;
16825
16826         /* It is best to perform DMA test with maximum write burst size
16827          * to expose the 5700/5701 write DMA bug.
16828          */
16829         saved_dma_rwctrl = tp->dma_rwctrl;
16830         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16831         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16832
16833         while (1) {
16834                 u32 *p = buf, i;
16835
16836                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16837                         p[i] = i;
16838
16839                 /* Send the buffer to the chip. */
16840                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16841                 if (ret) {
16842                         dev_err(&tp->pdev->dev,
16843                                 "%s: Buffer write failed. err = %d\n",
16844                                 __func__, ret);
16845                         break;
16846                 }
16847
16848 #if 0
16849                 /* validate data reached card RAM correctly. */
16850                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16851                         u32 val;
16852                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16853                         if (le32_to_cpu(val) != p[i]) {
16854                                 dev_err(&tp->pdev->dev,
16855                                         "%s: Buffer corrupted on device! "
16856                                         "(%d != %d)\n", __func__, val, i);
16857                                 /* ret = -ENODEV here? */
16858                         }
16859                         p[i] = 0;
16860                 }
16861 #endif
16862                 /* Now read it back. */
16863                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
16864                 if (ret) {
16865                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16866                                 "err = %d\n", __func__, ret);
16867                         break;
16868                 }
16869
16870                 /* Verify it. */
16871                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16872                         if (p[i] == i)
16873                                 continue;
16874
16875                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16876                             DMA_RWCTRL_WRITE_BNDRY_16) {
16877                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16878                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16879                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16880                                 break;
16881                         } else {
16882                                 dev_err(&tp->pdev->dev,
16883                                         "%s: Buffer corrupted on read back! "
16884                                         "(%d != %d)\n", __func__, p[i], i);
16885                                 ret = -ENODEV;
16886                                 goto out;
16887                         }
16888                 }
16889
16890                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16891                         /* Success. */
16892                         ret = 0;
16893                         break;
16894                 }
16895         }
16896         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16897             DMA_RWCTRL_WRITE_BNDRY_16) {
16898                 /* DMA test passed without adjusting DMA boundary,
16899                  * now look for chipsets that are known to expose the
16900                  * DMA bug without failing the test.
16901                  */
16902                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16903                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16904                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16905                 } else {
16906                         /* Safe to use the calculated DMA boundary. */
16907                         tp->dma_rwctrl = saved_dma_rwctrl;
16908                 }
16909
16910                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16911         }
16912
16913 out:
16914         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16915 out_nofree:
16916         return ret;
16917 }
16918
16919 static void tg3_init_bufmgr_config(struct tg3 *tp)
16920 {
16921         if (tg3_flag(tp, 57765_PLUS)) {
16922                 tp->bufmgr_config.mbuf_read_dma_low_water =
16923                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16924                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16925                         DEFAULT_MB_MACRX_LOW_WATER_57765;
16926                 tp->bufmgr_config.mbuf_high_water =
16927                         DEFAULT_MB_HIGH_WATER_57765;
16928
16929                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16930                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16931                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16932                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16933                 tp->bufmgr_config.mbuf_high_water_jumbo =
16934                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16935         } else if (tg3_flag(tp, 5705_PLUS)) {
16936                 tp->bufmgr_config.mbuf_read_dma_low_water =
16937                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16938                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16939                         DEFAULT_MB_MACRX_LOW_WATER_5705;
16940                 tp->bufmgr_config.mbuf_high_water =
16941                         DEFAULT_MB_HIGH_WATER_5705;
16942                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16943                         tp->bufmgr_config.mbuf_mac_rx_low_water =
16944                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
16945                         tp->bufmgr_config.mbuf_high_water =
16946                                 DEFAULT_MB_HIGH_WATER_5906;
16947                 }
16948
16949                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16950                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16951                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16952                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16953                 tp->bufmgr_config.mbuf_high_water_jumbo =
16954                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16955         } else {
16956                 tp->bufmgr_config.mbuf_read_dma_low_water =
16957                         DEFAULT_MB_RDMA_LOW_WATER;
16958                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16959                         DEFAULT_MB_MACRX_LOW_WATER;
16960                 tp->bufmgr_config.mbuf_high_water =
16961                         DEFAULT_MB_HIGH_WATER;
16962
16963                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16964                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16965                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16966                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16967                 tp->bufmgr_config.mbuf_high_water_jumbo =
16968                         DEFAULT_MB_HIGH_WATER_JUMBO;
16969         }
16970
16971         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16972         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16973 }
16974
16975 static char *tg3_phy_string(struct tg3 *tp)
16976 {
16977         switch (tp->phy_id & TG3_PHY_ID_MASK) {
16978         case TG3_PHY_ID_BCM5400:        return "5400";
16979         case TG3_PHY_ID_BCM5401:        return "5401";
16980         case TG3_PHY_ID_BCM5411:        return "5411";
16981         case TG3_PHY_ID_BCM5701:        return "5701";
16982         case TG3_PHY_ID_BCM5703:        return "5703";
16983         case TG3_PHY_ID_BCM5704:        return "5704";
16984         case TG3_PHY_ID_BCM5705:        return "5705";
16985         case TG3_PHY_ID_BCM5750:        return "5750";
16986         case TG3_PHY_ID_BCM5752:        return "5752";
16987         case TG3_PHY_ID_BCM5714:        return "5714";
16988         case TG3_PHY_ID_BCM5780:        return "5780";
16989         case TG3_PHY_ID_BCM5755:        return "5755";
16990         case TG3_PHY_ID_BCM5787:        return "5787";
16991         case TG3_PHY_ID_BCM5784:        return "5784";
16992         case TG3_PHY_ID_BCM5756:        return "5722/5756";
16993         case TG3_PHY_ID_BCM5906:        return "5906";
16994         case TG3_PHY_ID_BCM5761:        return "5761";
16995         case TG3_PHY_ID_BCM5718C:       return "5718C";
16996         case TG3_PHY_ID_BCM5718S:       return "5718S";
16997         case TG3_PHY_ID_BCM57765:       return "57765";
16998         case TG3_PHY_ID_BCM5719C:       return "5719C";
16999         case TG3_PHY_ID_BCM5720C:       return "5720C";
17000         case TG3_PHY_ID_BCM5762:        return "5762C";
17001         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17002         case 0:                 return "serdes";
17003         default:                return "unknown";
17004         }
17005 }
17006
17007 static char *tg3_bus_string(struct tg3 *tp, char *str)
17008 {
17009         if (tg3_flag(tp, PCI_EXPRESS)) {
17010                 strcpy(str, "PCI Express");
17011                 return str;
17012         } else if (tg3_flag(tp, PCIX_MODE)) {
17013                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17014
17015                 strcpy(str, "PCIX:");
17016
17017                 if ((clock_ctrl == 7) ||
17018                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17019                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17020                         strcat(str, "133MHz");
17021                 else if (clock_ctrl == 0)
17022                         strcat(str, "33MHz");
17023                 else if (clock_ctrl == 2)
17024                         strcat(str, "50MHz");
17025                 else if (clock_ctrl == 4)
17026                         strcat(str, "66MHz");
17027                 else if (clock_ctrl == 6)
17028                         strcat(str, "100MHz");
17029         } else {
17030                 strcpy(str, "PCI:");
17031                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17032                         strcat(str, "66MHz");
17033                 else
17034                         strcat(str, "33MHz");
17035         }
17036         if (tg3_flag(tp, PCI_32BIT))
17037                 strcat(str, ":32-bit");
17038         else
17039                 strcat(str, ":64-bit");
17040         return str;
17041 }
17042
17043 static void tg3_init_coal(struct tg3 *tp)
17044 {
17045         struct ethtool_coalesce *ec = &tp->coal;
17046
17047         memset(ec, 0, sizeof(*ec));
17048         ec->cmd = ETHTOOL_GCOALESCE;
17049         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17050         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17051         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17052         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17053         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17054         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17055         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17056         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17057         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17058
17059         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17060                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17061                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17062                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17063                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17064                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17065         }
17066
17067         if (tg3_flag(tp, 5705_PLUS)) {
17068                 ec->rx_coalesce_usecs_irq = 0;
17069                 ec->tx_coalesce_usecs_irq = 0;
17070                 ec->stats_block_coalesce_usecs = 0;
17071         }
17072 }
17073
17074 static int tg3_init_one(struct pci_dev *pdev,
17075                                   const struct pci_device_id *ent)
17076 {
17077         struct net_device *dev;
17078         struct tg3 *tp;
17079         int i, err, pm_cap;
17080         u32 sndmbx, rcvmbx, intmbx;
17081         char str[40];
17082         u64 dma_mask, persist_dma_mask;
17083         netdev_features_t features = 0;
17084
17085         printk_once(KERN_INFO "%s\n", version);
17086
17087         err = pci_enable_device(pdev);
17088         if (err) {
17089                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17090                 return err;
17091         }
17092
17093         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17094         if (err) {
17095                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17096                 goto err_out_disable_pdev;
17097         }
17098
17099         pci_set_master(pdev);
17100
17101         /* Find power-management capability. */
17102         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
17103         if (pm_cap == 0) {
17104                 dev_err(&pdev->dev,
17105                         "Cannot find Power Management capability, aborting\n");
17106                 err = -EIO;
17107                 goto err_out_free_res;
17108         }
17109
17110         err = pci_set_power_state(pdev, PCI_D0);
17111         if (err) {
17112                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
17113                 goto err_out_free_res;
17114         }
17115
17116         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17117         if (!dev) {
17118                 err = -ENOMEM;
17119                 goto err_out_power_down;
17120         }
17121
17122         SET_NETDEV_DEV(dev, &pdev->dev);
17123
17124         tp = netdev_priv(dev);
17125         tp->pdev = pdev;
17126         tp->dev = dev;
17127         tp->pm_cap = pm_cap;
17128         tp->rx_mode = TG3_DEF_RX_MODE;
17129         tp->tx_mode = TG3_DEF_TX_MODE;
17130         tp->irq_sync = 1;
17131
17132         if (tg3_debug > 0)
17133                 tp->msg_enable = tg3_debug;
17134         else
17135                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17136
17137         if (pdev_is_ssb_gige_core(pdev)) {
17138                 tg3_flag_set(tp, IS_SSB_CORE);
17139                 if (ssb_gige_must_flush_posted_writes(pdev))
17140                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17141                 if (ssb_gige_one_dma_at_once(pdev))
17142                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17143                 if (ssb_gige_have_roboswitch(pdev))
17144                         tg3_flag_set(tp, ROBOSWITCH);
17145                 if (ssb_gige_is_rgmii(pdev))
17146                         tg3_flag_set(tp, RGMII_MODE);
17147         }
17148
17149         /* The word/byte swap controls here control register access byte
17150          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17151          * setting below.
17152          */
17153         tp->misc_host_ctrl =
17154                 MISC_HOST_CTRL_MASK_PCI_INT |
17155                 MISC_HOST_CTRL_WORD_SWAP |
17156                 MISC_HOST_CTRL_INDIR_ACCESS |
17157                 MISC_HOST_CTRL_PCISTATE_RW;
17158
17159         /* The NONFRM (non-frame) byte/word swap controls take effect
17160          * on descriptor entries, anything which isn't packet data.
17161          *
17162          * The StrongARM chips on the board (one for tx, one for rx)
17163          * are running in big-endian mode.
17164          */
17165         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17166                         GRC_MODE_WSWAP_NONFRM_DATA);
17167 #ifdef __BIG_ENDIAN
17168         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17169 #endif
17170         spin_lock_init(&tp->lock);
17171         spin_lock_init(&tp->indirect_lock);
17172         INIT_WORK(&tp->reset_task, tg3_reset_task);
17173
17174         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17175         if (!tp->regs) {
17176                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17177                 err = -ENOMEM;
17178                 goto err_out_free_dev;
17179         }
17180
17181         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17182             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17183             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17184             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17185             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17186             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17187             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17188             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17189             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17190             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17191             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17192             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17193                 tg3_flag_set(tp, ENABLE_APE);
17194                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17195                 if (!tp->aperegs) {
17196                         dev_err(&pdev->dev,
17197                                 "Cannot map APE registers, aborting\n");
17198                         err = -ENOMEM;
17199                         goto err_out_iounmap;
17200                 }
17201         }
17202
17203         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17204         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17205
17206         dev->ethtool_ops = &tg3_ethtool_ops;
17207         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17208         dev->netdev_ops = &tg3_netdev_ops;
17209         dev->irq = pdev->irq;
17210
17211         err = tg3_get_invariants(tp, ent);
17212         if (err) {
17213                 dev_err(&pdev->dev,
17214                         "Problem fetching invariants of chip, aborting\n");
17215                 goto err_out_apeunmap;
17216         }
17217
17218         /* The EPB bridge inside 5714, 5715, and 5780 and any
17219          * device behind the EPB cannot support DMA addresses > 40-bit.
17220          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17221          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17222          * do DMA address check in tg3_start_xmit().
17223          */
17224         if (tg3_flag(tp, IS_5788))
17225                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17226         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17227                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17228 #ifdef CONFIG_HIGHMEM
17229                 dma_mask = DMA_BIT_MASK(64);
17230 #endif
17231         } else
17232                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17233
17234         /* Configure DMA attributes. */
17235         if (dma_mask > DMA_BIT_MASK(32)) {
17236                 err = pci_set_dma_mask(pdev, dma_mask);
17237                 if (!err) {
17238                         features |= NETIF_F_HIGHDMA;
17239                         err = pci_set_consistent_dma_mask(pdev,
17240                                                           persist_dma_mask);
17241                         if (err < 0) {
17242                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17243                                         "DMA for consistent allocations\n");
17244                                 goto err_out_apeunmap;
17245                         }
17246                 }
17247         }
17248         if (err || dma_mask == DMA_BIT_MASK(32)) {
17249                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17250                 if (err) {
17251                         dev_err(&pdev->dev,
17252                                 "No usable DMA configuration, aborting\n");
17253                         goto err_out_apeunmap;
17254                 }
17255         }
17256
17257         tg3_init_bufmgr_config(tp);
17258
17259         features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17260
17261         /* 5700 B0 chips do not support checksumming correctly due
17262          * to hardware bugs.
17263          */
17264         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17265                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17266
17267                 if (tg3_flag(tp, 5755_PLUS))
17268                         features |= NETIF_F_IPV6_CSUM;
17269         }
17270
17271         /* TSO is on by default on chips that support hardware TSO.
17272          * Firmware TSO on older chips gives lower performance, so it
17273          * is off by default, but can be enabled using ethtool.
17274          */
17275         if ((tg3_flag(tp, HW_TSO_1) ||
17276              tg3_flag(tp, HW_TSO_2) ||
17277              tg3_flag(tp, HW_TSO_3)) &&
17278             (features & NETIF_F_IP_CSUM))
17279                 features |= NETIF_F_TSO;
17280         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17281                 if (features & NETIF_F_IPV6_CSUM)
17282                         features |= NETIF_F_TSO6;
17283                 if (tg3_flag(tp, HW_TSO_3) ||
17284                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17285                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17286                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17287                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17288                     tg3_asic_rev(tp) == ASIC_REV_57780)
17289                         features |= NETIF_F_TSO_ECN;
17290         }
17291
17292         dev->features |= features;
17293         dev->vlan_features |= features;
17294
17295         /*
17296          * Add loopback capability only for a subset of devices that support
17297          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17298          * loopback for the remaining devices.
17299          */
17300         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17301             !tg3_flag(tp, CPMU_PRESENT))
17302                 /* Add the loopback capability */
17303                 features |= NETIF_F_LOOPBACK;
17304
17305         dev->hw_features |= features;
17306
17307         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17308             !tg3_flag(tp, TSO_CAPABLE) &&
17309             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17310                 tg3_flag_set(tp, MAX_RXPEND_64);
17311                 tp->rx_pending = 63;
17312         }
17313
17314         err = tg3_get_device_address(tp);
17315         if (err) {
17316                 dev_err(&pdev->dev,
17317                         "Could not obtain valid ethernet address, aborting\n");
17318                 goto err_out_apeunmap;
17319         }
17320
17321         /*
17322          * Reset chip in case UNDI or EFI driver did not shutdown
17323          * DMA self test will enable WDMAC and we'll see (spurious)
17324          * pending DMA on the PCI bus at that point.
17325          */
17326         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17327             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17328                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17329                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17330         }
17331
17332         err = tg3_test_dma(tp);
17333         if (err) {
17334                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17335                 goto err_out_apeunmap;
17336         }
17337
17338         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17339         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17340         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17341         for (i = 0; i < tp->irq_max; i++) {
17342                 struct tg3_napi *tnapi = &tp->napi[i];
17343
17344                 tnapi->tp = tp;
17345                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17346
17347                 tnapi->int_mbox = intmbx;
17348                 if (i <= 4)
17349                         intmbx += 0x8;
17350                 else
17351                         intmbx += 0x4;
17352
17353                 tnapi->consmbox = rcvmbx;
17354                 tnapi->prodmbox = sndmbx;
17355
17356                 if (i)
17357                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17358                 else
17359                         tnapi->coal_now = HOSTCC_MODE_NOW;
17360
17361                 if (!tg3_flag(tp, SUPPORT_MSIX))
17362                         break;
17363
17364                 /*
17365                  * If we support MSIX, we'll be using RSS.  If we're using
17366                  * RSS, the first vector only handles link interrupts and the
17367                  * remaining vectors handle rx and tx interrupts.  Reuse the
17368                  * mailbox values for the next iteration.  The values we setup
17369                  * above are still useful for the single vectored mode.
17370                  */
17371                 if (!i)
17372                         continue;
17373
17374                 rcvmbx += 0x8;
17375
17376                 if (sndmbx & 0x4)
17377                         sndmbx -= 0x4;
17378                 else
17379                         sndmbx += 0xc;
17380         }
17381
17382         tg3_init_coal(tp);
17383
17384         pci_set_drvdata(pdev, dev);
17385
17386         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17387             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17388             tg3_asic_rev(tp) == ASIC_REV_5762)
17389                 tg3_flag_set(tp, PTP_CAPABLE);
17390
17391         if (tg3_flag(tp, 5717_PLUS)) {
17392                 /* Resume a low-power mode */
17393                 tg3_frob_aux_power(tp, false);
17394         }
17395
17396         tg3_timer_init(tp);
17397
17398         tg3_carrier_off(tp);
17399
17400         err = register_netdev(dev);
17401         if (err) {
17402                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17403                 goto err_out_apeunmap;
17404         }
17405
17406         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17407                     tp->board_part_number,
17408                     tg3_chip_rev_id(tp),
17409                     tg3_bus_string(tp, str),
17410                     dev->dev_addr);
17411
17412         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17413                 struct phy_device *phydev;
17414                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17415                 netdev_info(dev,
17416                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17417                             phydev->drv->name, dev_name(&phydev->dev));
17418         } else {
17419                 char *ethtype;
17420
17421                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17422                         ethtype = "10/100Base-TX";
17423                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17424                         ethtype = "1000Base-SX";
17425                 else
17426                         ethtype = "10/100/1000Base-T";
17427
17428                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17429                             "(WireSpeed[%d], EEE[%d])\n",
17430                             tg3_phy_string(tp), ethtype,
17431                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17432                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17433         }
17434
17435         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17436                     (dev->features & NETIF_F_RXCSUM) != 0,
17437                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17438                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17439                     tg3_flag(tp, ENABLE_ASF) != 0,
17440                     tg3_flag(tp, TSO_CAPABLE) != 0);
17441         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17442                     tp->dma_rwctrl,
17443                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17444                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17445
17446         pci_save_state(pdev);
17447
17448         return 0;
17449
17450 err_out_apeunmap:
17451         if (tp->aperegs) {
17452                 iounmap(tp->aperegs);
17453                 tp->aperegs = NULL;
17454         }
17455
17456 err_out_iounmap:
17457         if (tp->regs) {
17458                 iounmap(tp->regs);
17459                 tp->regs = NULL;
17460         }
17461
17462 err_out_free_dev:
17463         free_netdev(dev);
17464
17465 err_out_power_down:
17466         pci_set_power_state(pdev, PCI_D3hot);
17467
17468 err_out_free_res:
17469         pci_release_regions(pdev);
17470
17471 err_out_disable_pdev:
17472         pci_disable_device(pdev);
17473         pci_set_drvdata(pdev, NULL);
17474         return err;
17475 }
17476
17477 static void tg3_remove_one(struct pci_dev *pdev)
17478 {
17479         struct net_device *dev = pci_get_drvdata(pdev);
17480
17481         if (dev) {
17482                 struct tg3 *tp = netdev_priv(dev);
17483
17484                 release_firmware(tp->fw);
17485
17486                 tg3_reset_task_cancel(tp);
17487
17488                 if (tg3_flag(tp, USE_PHYLIB)) {
17489                         tg3_phy_fini(tp);
17490                         tg3_mdio_fini(tp);
17491                 }
17492
17493                 unregister_netdev(dev);
17494                 if (tp->aperegs) {
17495                         iounmap(tp->aperegs);
17496                         tp->aperegs = NULL;
17497                 }
17498                 if (tp->regs) {
17499                         iounmap(tp->regs);
17500                         tp->regs = NULL;
17501                 }
17502                 free_netdev(dev);
17503                 pci_release_regions(pdev);
17504                 pci_disable_device(pdev);
17505                 pci_set_drvdata(pdev, NULL);
17506         }
17507 }
17508
17509 #ifdef CONFIG_PM_SLEEP
17510 static int tg3_suspend(struct device *device)
17511 {
17512         struct pci_dev *pdev = to_pci_dev(device);
17513         struct net_device *dev = pci_get_drvdata(pdev);
17514         struct tg3 *tp = netdev_priv(dev);
17515         int err;
17516
17517         if (!netif_running(dev))
17518                 return 0;
17519
17520         tg3_reset_task_cancel(tp);
17521         tg3_phy_stop(tp);
17522         tg3_netif_stop(tp);
17523
17524         tg3_timer_stop(tp);
17525
17526         tg3_full_lock(tp, 1);
17527         tg3_disable_ints(tp);
17528         tg3_full_unlock(tp);
17529
17530         netif_device_detach(dev);
17531
17532         tg3_full_lock(tp, 0);
17533         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17534         tg3_flag_clear(tp, INIT_COMPLETE);
17535         tg3_full_unlock(tp);
17536
17537         err = tg3_power_down_prepare(tp);
17538         if (err) {
17539                 int err2;
17540
17541                 tg3_full_lock(tp, 0);
17542
17543                 tg3_flag_set(tp, INIT_COMPLETE);
17544                 err2 = tg3_restart_hw(tp, true);
17545                 if (err2)
17546                         goto out;
17547
17548                 tg3_timer_start(tp);
17549
17550                 netif_device_attach(dev);
17551                 tg3_netif_start(tp);
17552
17553 out:
17554                 tg3_full_unlock(tp);
17555
17556                 if (!err2)
17557                         tg3_phy_start(tp);
17558         }
17559
17560         return err;
17561 }
17562
17563 static int tg3_resume(struct device *device)
17564 {
17565         struct pci_dev *pdev = to_pci_dev(device);
17566         struct net_device *dev = pci_get_drvdata(pdev);
17567         struct tg3 *tp = netdev_priv(dev);
17568         int err;
17569
17570         if (!netif_running(dev))
17571                 return 0;
17572
17573         netif_device_attach(dev);
17574
17575         tg3_full_lock(tp, 0);
17576
17577         tg3_flag_set(tp, INIT_COMPLETE);
17578         err = tg3_restart_hw(tp,
17579                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17580         if (err)
17581                 goto out;
17582
17583         tg3_timer_start(tp);
17584
17585         tg3_netif_start(tp);
17586
17587 out:
17588         tg3_full_unlock(tp);
17589
17590         if (!err)
17591                 tg3_phy_start(tp);
17592
17593         return err;
17594 }
17595 #endif /* CONFIG_PM_SLEEP */
17596
17597 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17598
17599 /**
17600  * tg3_io_error_detected - called when PCI error is detected
17601  * @pdev: Pointer to PCI device
17602  * @state: The current pci connection state
17603  *
17604  * This function is called after a PCI bus error affecting
17605  * this device has been detected.
17606  */
17607 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17608                                               pci_channel_state_t state)
17609 {
17610         struct net_device *netdev = pci_get_drvdata(pdev);
17611         struct tg3 *tp = netdev_priv(netdev);
17612         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17613
17614         netdev_info(netdev, "PCI I/O error detected\n");
17615
17616         rtnl_lock();
17617
17618         if (!netif_running(netdev))
17619                 goto done;
17620
17621         tg3_phy_stop(tp);
17622
17623         tg3_netif_stop(tp);
17624
17625         tg3_timer_stop(tp);
17626
17627         /* Want to make sure that the reset task doesn't run */
17628         tg3_reset_task_cancel(tp);
17629
17630         netif_device_detach(netdev);
17631
17632         /* Clean up software state, even if MMIO is blocked */
17633         tg3_full_lock(tp, 0);
17634         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17635         tg3_full_unlock(tp);
17636
17637 done:
17638         if (state == pci_channel_io_perm_failure)
17639                 err = PCI_ERS_RESULT_DISCONNECT;
17640         else
17641                 pci_disable_device(pdev);
17642
17643         rtnl_unlock();
17644
17645         return err;
17646 }
17647
17648 /**
17649  * tg3_io_slot_reset - called after the pci bus has been reset.
17650  * @pdev: Pointer to PCI device
17651  *
17652  * Restart the card from scratch, as if from a cold-boot.
17653  * At this point, the card has exprienced a hard reset,
17654  * followed by fixups by BIOS, and has its config space
17655  * set up identically to what it was at cold boot.
17656  */
17657 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17658 {
17659         struct net_device *netdev = pci_get_drvdata(pdev);
17660         struct tg3 *tp = netdev_priv(netdev);
17661         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17662         int err;
17663
17664         rtnl_lock();
17665
17666         if (pci_enable_device(pdev)) {
17667                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17668                 goto done;
17669         }
17670
17671         pci_set_master(pdev);
17672         pci_restore_state(pdev);
17673         pci_save_state(pdev);
17674
17675         if (!netif_running(netdev)) {
17676                 rc = PCI_ERS_RESULT_RECOVERED;
17677                 goto done;
17678         }
17679
17680         err = tg3_power_up(tp);
17681         if (err)
17682                 goto done;
17683
17684         rc = PCI_ERS_RESULT_RECOVERED;
17685
17686 done:
17687         rtnl_unlock();
17688
17689         return rc;
17690 }
17691
17692 /**
17693  * tg3_io_resume - called when traffic can start flowing again.
17694  * @pdev: Pointer to PCI device
17695  *
17696  * This callback is called when the error recovery driver tells
17697  * us that its OK to resume normal operation.
17698  */
17699 static void tg3_io_resume(struct pci_dev *pdev)
17700 {
17701         struct net_device *netdev = pci_get_drvdata(pdev);
17702         struct tg3 *tp = netdev_priv(netdev);
17703         int err;
17704
17705         rtnl_lock();
17706
17707         if (!netif_running(netdev))
17708                 goto done;
17709
17710         tg3_full_lock(tp, 0);
17711         tg3_flag_set(tp, INIT_COMPLETE);
17712         err = tg3_restart_hw(tp, true);
17713         if (err) {
17714                 tg3_full_unlock(tp);
17715                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17716                 goto done;
17717         }
17718
17719         netif_device_attach(netdev);
17720
17721         tg3_timer_start(tp);
17722
17723         tg3_netif_start(tp);
17724
17725         tg3_full_unlock(tp);
17726
17727         tg3_phy_start(tp);
17728
17729 done:
17730         rtnl_unlock();
17731 }
17732
17733 static const struct pci_error_handlers tg3_err_handler = {
17734         .error_detected = tg3_io_error_detected,
17735         .slot_reset     = tg3_io_slot_reset,
17736         .resume         = tg3_io_resume
17737 };
17738
17739 static struct pci_driver tg3_driver = {
17740         .name           = DRV_MODULE_NAME,
17741         .id_table       = tg3_pci_tbl,
17742         .probe          = tg3_init_one,
17743         .remove         = tg3_remove_one,
17744         .err_handler    = &tg3_err_handler,
17745         .driver.pm      = &tg3_pm_ops,
17746 };
17747
17748 static int __init tg3_init(void)
17749 {
17750         return pci_register_driver(&tg3_driver);
17751 }
17752
17753 static void __exit tg3_cleanup(void)
17754 {
17755         pci_unregister_driver(&tg3_driver);
17756 }
17757
17758 module_init(tg3_init);
17759 module_exit(tg3_cleanup);