2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
8 * See MAINTAINERS file for support contact information.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
23 #include <linux/tcp.h>
24 #include <linux/interrupt.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/firmware.h>
28 #include <linux/pci-aspm.h>
29 #include <linux/prefetch.h>
30 #include <linux/ipv6.h>
31 #include <net/ip6_checksum.h>
36 #define RTL8169_VERSION "2.3LK-NAPI"
37 #define MODULENAME "r8169"
38 #define PFX MODULENAME ": "
40 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
41 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
42 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
43 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
44 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
45 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
46 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
47 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
48 #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
49 #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
50 #define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw"
51 #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
52 #define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw"
53 #define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw"
54 #define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw"
55 #define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw"
56 #define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw"
57 #define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw"
58 #define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
61 #define assert(expr) \
63 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
64 #expr,__FILE__,__func__,__LINE__); \
66 #define dprintk(fmt, args...) \
67 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
69 #define assert(expr) do {} while (0)
70 #define dprintk(fmt, args...) do {} while (0)
71 #endif /* RTL8169_DEBUG */
73 #define R8169_MSG_DEFAULT \
74 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
76 #define TX_SLOTS_AVAIL(tp) \
77 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
79 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
80 #define TX_FRAGS_READY_FOR(tp,nr_frags) \
81 (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
83 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
84 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
85 static const int multicast_filter_limit = 32;
87 #define MAX_READ_REQUEST_SHIFT 12
88 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
89 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
91 #define R8169_REGS_SIZE 256
92 #define R8169_NAPI_WEIGHT 64
93 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
94 #define NUM_RX_DESC 256U /* Number of Rx descriptor registers */
95 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
96 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
98 #define RTL8169_TX_TIMEOUT (6*HZ)
99 #define RTL8169_PHY_TIMEOUT (10*HZ)
101 /* write/read MMIO register */
102 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
103 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
104 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
105 #define RTL_R8(reg) readb (ioaddr + (reg))
106 #define RTL_R16(reg) readw (ioaddr + (reg))
107 #define RTL_R32(reg) readl (ioaddr + (reg))
110 RTL_GIGA_MAC_VER_01 = 0,
158 RTL_GIGA_MAC_NONE = 0xff,
161 enum rtl_tx_desc_version {
166 #define JUMBO_1K ETH_DATA_LEN
167 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
168 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
169 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
170 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
172 #define _R(NAME,TD,FW,SZ,B) { \
180 static const struct {
182 enum rtl_tx_desc_version txd_version;
186 } rtl_chip_infos[] = {
188 [RTL_GIGA_MAC_VER_01] =
189 _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
190 [RTL_GIGA_MAC_VER_02] =
191 _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
192 [RTL_GIGA_MAC_VER_03] =
193 _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
194 [RTL_GIGA_MAC_VER_04] =
195 _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
196 [RTL_GIGA_MAC_VER_05] =
197 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
198 [RTL_GIGA_MAC_VER_06] =
199 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
201 [RTL_GIGA_MAC_VER_07] =
202 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
203 [RTL_GIGA_MAC_VER_08] =
204 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
205 [RTL_GIGA_MAC_VER_09] =
206 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
207 [RTL_GIGA_MAC_VER_10] =
208 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
209 [RTL_GIGA_MAC_VER_11] =
210 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
211 [RTL_GIGA_MAC_VER_12] =
212 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
213 [RTL_GIGA_MAC_VER_13] =
214 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
215 [RTL_GIGA_MAC_VER_14] =
216 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
217 [RTL_GIGA_MAC_VER_15] =
218 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
219 [RTL_GIGA_MAC_VER_16] =
220 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
221 [RTL_GIGA_MAC_VER_17] =
222 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
223 [RTL_GIGA_MAC_VER_18] =
224 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
225 [RTL_GIGA_MAC_VER_19] =
226 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
227 [RTL_GIGA_MAC_VER_20] =
228 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
229 [RTL_GIGA_MAC_VER_21] =
230 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
231 [RTL_GIGA_MAC_VER_22] =
232 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
233 [RTL_GIGA_MAC_VER_23] =
234 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
235 [RTL_GIGA_MAC_VER_24] =
236 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
237 [RTL_GIGA_MAC_VER_25] =
238 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
240 [RTL_GIGA_MAC_VER_26] =
241 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
243 [RTL_GIGA_MAC_VER_27] =
244 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
245 [RTL_GIGA_MAC_VER_28] =
246 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
247 [RTL_GIGA_MAC_VER_29] =
248 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
250 [RTL_GIGA_MAC_VER_30] =
251 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
253 [RTL_GIGA_MAC_VER_31] =
254 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
255 [RTL_GIGA_MAC_VER_32] =
256 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
258 [RTL_GIGA_MAC_VER_33] =
259 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
261 [RTL_GIGA_MAC_VER_34] =
262 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
264 [RTL_GIGA_MAC_VER_35] =
265 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
267 [RTL_GIGA_MAC_VER_36] =
268 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
270 [RTL_GIGA_MAC_VER_37] =
271 _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
273 [RTL_GIGA_MAC_VER_38] =
274 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
276 [RTL_GIGA_MAC_VER_39] =
277 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
279 [RTL_GIGA_MAC_VER_40] =
280 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_2,
282 [RTL_GIGA_MAC_VER_41] =
283 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
284 [RTL_GIGA_MAC_VER_42] =
285 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_3,
287 [RTL_GIGA_MAC_VER_43] =
288 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2,
290 [RTL_GIGA_MAC_VER_44] =
291 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2,
293 [RTL_GIGA_MAC_VER_45] =
294 _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_1,
296 [RTL_GIGA_MAC_VER_46] =
297 _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_2,
299 [RTL_GIGA_MAC_VER_47] =
300 _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_1,
302 [RTL_GIGA_MAC_VER_48] =
303 _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_2,
314 static const struct pci_device_id rtl8169_pci_tbl[] = {
315 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
316 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
317 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
318 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
319 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
320 { PCI_VENDOR_ID_DLINK, 0x4300,
321 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
322 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
323 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
324 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
325 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
326 { PCI_VENDOR_ID_LINKSYS, 0x1032,
327 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
329 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
333 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
335 static int rx_buf_sz = 16383;
342 MAC0 = 0, /* Ethernet hardware address. */
344 MAR0 = 8, /* Multicast filter. */
345 CounterAddrLow = 0x10,
346 CounterAddrHigh = 0x14,
347 TxDescStartAddrLow = 0x20,
348 TxDescStartAddrHigh = 0x24,
349 TxHDescStartAddrLow = 0x28,
350 TxHDescStartAddrHigh = 0x2c,
359 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
360 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
363 #define RX128_INT_EN (1 << 15) /* 8111c and later */
364 #define RX_MULTI_EN (1 << 14) /* 8111c only */
365 #define RXCFG_FIFO_SHIFT 13
366 /* No threshold before first PCI xfer */
367 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
368 #define RX_EARLY_OFF (1 << 11)
369 #define RXCFG_DMA_SHIFT 8
370 /* Unlimited maximum PCI burst. */
371 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
378 #define PME_SIGNAL (1 << 5) /* 8168c and later */
389 RxDescAddrLow = 0xe4,
390 RxDescAddrHigh = 0xe8,
391 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
393 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
395 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
397 #define TxPacketMax (8064 >> 7)
398 #define EarlySize 0x27
401 FuncEventMask = 0xf4,
402 FuncPresetState = 0xf8,
403 FuncForceEvent = 0xfc,
406 enum rtl8110_registers {
412 enum rtl8168_8101_registers {
415 #define CSIAR_FLAG 0x80000000
416 #define CSIAR_WRITE_CMD 0x80000000
417 #define CSIAR_BYTE_ENABLE 0x0f
418 #define CSIAR_BYTE_ENABLE_SHIFT 12
419 #define CSIAR_ADDR_MASK 0x0fff
420 #define CSIAR_FUNC_CARD 0x00000000
421 #define CSIAR_FUNC_SDIO 0x00010000
422 #define CSIAR_FUNC_NIC 0x00020000
423 #define CSIAR_FUNC_NIC2 0x00010000
426 #define EPHYAR_FLAG 0x80000000
427 #define EPHYAR_WRITE_CMD 0x80000000
428 #define EPHYAR_REG_MASK 0x1f
429 #define EPHYAR_REG_SHIFT 16
430 #define EPHYAR_DATA_MASK 0xffff
432 #define PFM_EN (1 << 6)
433 #define TX_10M_PS_EN (1 << 7)
435 #define FIX_NAK_1 (1 << 4)
436 #define FIX_NAK_2 (1 << 3)
439 #define NOW_IS_OOB (1 << 7)
440 #define TX_EMPTY (1 << 5)
441 #define RX_EMPTY (1 << 4)
442 #define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
443 #define EN_NDP (1 << 3)
444 #define EN_OOB_RESET (1 << 2)
445 #define LINK_LIST_RDY (1 << 1)
447 #define EFUSEAR_FLAG 0x80000000
448 #define EFUSEAR_WRITE_CMD 0x80000000
449 #define EFUSEAR_READ_CMD 0x00000000
450 #define EFUSEAR_REG_MASK 0x03ff
451 #define EFUSEAR_REG_SHIFT 8
452 #define EFUSEAR_DATA_MASK 0xff
454 #define PFM_D3COLD_EN (1 << 6)
457 enum rtl8168_registers {
462 #define ERIAR_FLAG 0x80000000
463 #define ERIAR_WRITE_CMD 0x80000000
464 #define ERIAR_READ_CMD 0x00000000
465 #define ERIAR_ADDR_BYTE_ALIGN 4
466 #define ERIAR_TYPE_SHIFT 16
467 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
468 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
469 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
470 #define ERIAR_MASK_SHIFT 12
471 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
472 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
473 #define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT)
474 #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
475 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
476 EPHY_RXER_NUM = 0x7c,
477 OCPDR = 0xb0, /* OCP GPHY access */
478 #define OCPDR_WRITE_CMD 0x80000000
479 #define OCPDR_READ_CMD 0x00000000
480 #define OCPDR_REG_MASK 0x7f
481 #define OCPDR_GPHY_REG_SHIFT 16
482 #define OCPDR_DATA_MASK 0xffff
484 #define OCPAR_FLAG 0x80000000
485 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
486 #define OCPAR_GPHY_READ_CMD 0x0000f060
488 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
489 MISC = 0xf0, /* 8168e only. */
490 #define TXPLA_RST (1 << 29)
491 #define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
492 #define PWM_EN (1 << 22)
493 #define RXDV_GATED_EN (1 << 19)
494 #define EARLY_TALLY_EN (1 << 16)
497 enum rtl_register_content {
498 /* InterruptStatusBits */
502 TxDescUnavail = 0x0080,
526 /* TXPoll register p.5 */
527 HPQ = 0x80, /* Poll cmd on the high prio queue */
528 NPQ = 0x40, /* Poll cmd on the low prio queue */
529 FSWInt = 0x01, /* Forced software interrupt */
533 Cfg9346_Unlock = 0xc0,
538 AcceptBroadcast = 0x08,
539 AcceptMulticast = 0x04,
541 AcceptAllPhys = 0x01,
542 #define RX_CONFIG_ACCEPT_MASK 0x3f
545 TxInterFrameGapShift = 24,
546 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
548 /* Config1 register p.24 */
551 Speed_down = (1 << 4),
555 PMEnable = (1 << 0), /* Power Management Enable */
557 /* Config2 register p. 25 */
558 ClkReqEn = (1 << 7), /* Clock Request Enable */
559 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
560 PCI_Clock_66MHz = 0x01,
561 PCI_Clock_33MHz = 0x00,
563 /* Config3 register p.25 */
564 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
565 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
566 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
567 Rdy_to_L23 = (1 << 1), /* L23 Enable */
568 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
570 /* Config4 register */
571 Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
573 /* Config5 register p.27 */
574 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
575 MWF = (1 << 5), /* Accept Multicast wakeup frame */
576 UWF = (1 << 4), /* Accept Unicast wakeup frame */
578 LanWake = (1 << 1), /* LanWake enable/disable */
579 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
580 ASPM_en = (1 << 0), /* ASPM enable */
583 TBIReset = 0x80000000,
584 TBILoopback = 0x40000000,
585 TBINwEnable = 0x20000000,
586 TBINwRestart = 0x10000000,
587 TBILinkOk = 0x02000000,
588 TBINwComplete = 0x01000000,
591 EnableBist = (1 << 15), // 8168 8101
592 Mac_dbgo_oe = (1 << 14), // 8168 8101
593 Normal_mode = (1 << 13), // unused
594 Force_half_dup = (1 << 12), // 8168 8101
595 Force_rxflow_en = (1 << 11), // 8168 8101
596 Force_txflow_en = (1 << 10), // 8168 8101
597 Cxpl_dbg_sel = (1 << 9), // 8168 8101
598 ASF = (1 << 8), // 8168 8101
599 PktCntrDisable = (1 << 7), // 8168 8101
600 Mac_dbgo_sel = 0x001c, // 8168
605 INTT_0 = 0x0000, // 8168
606 INTT_1 = 0x0001, // 8168
607 INTT_2 = 0x0002, // 8168
608 INTT_3 = 0x0003, // 8168
610 /* rtl8169_PHYstatus */
621 TBILinkOK = 0x02000000,
623 /* DumpCounterCommand */
626 /* magic enable v2 */
627 MagicPacket_v2 = (1 << 16), /* Wake up when receives a Magic Packet */
631 /* First doubleword. */
632 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
633 RingEnd = (1 << 30), /* End of descriptor ring */
634 FirstFrag = (1 << 29), /* First segment of a packet */
635 LastFrag = (1 << 28), /* Final segment of a packet */
639 enum rtl_tx_desc_bit {
640 /* First doubleword. */
641 TD_LSO = (1 << 27), /* Large Send Offload */
642 #define TD_MSS_MAX 0x07ffu /* MSS value */
644 /* Second doubleword. */
645 TxVlanTag = (1 << 17), /* Add VLAN tag */
648 /* 8169, 8168b and 810x except 8102e. */
649 enum rtl_tx_desc_bit_0 {
650 /* First doubleword. */
651 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
652 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
653 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
654 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
657 /* 8102e, 8168c and beyond. */
658 enum rtl_tx_desc_bit_1 {
659 /* First doubleword. */
660 TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */
661 TD1_GTSENV6 = (1 << 25), /* Giant Send for IPv6 */
662 #define GTTCPHO_SHIFT 18
663 #define GTTCPHO_MAX 0x7fU
665 /* Second doubleword. */
666 #define TCPHO_SHIFT 18
667 #define TCPHO_MAX 0x3ffU
668 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
669 TD1_IPv6_CS = (1 << 28), /* Calculate IPv6 checksum */
670 TD1_IPv4_CS = (1 << 29), /* Calculate IPv4 checksum */
671 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
672 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
675 enum rtl_rx_desc_bit {
677 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
678 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
680 #define RxProtoUDP (PID1)
681 #define RxProtoTCP (PID0)
682 #define RxProtoIP (PID1 | PID0)
683 #define RxProtoMask RxProtoIP
685 IPFail = (1 << 16), /* IP checksum failed */
686 UDPFail = (1 << 15), /* UDP/IP checksum failed */
687 TCPFail = (1 << 14), /* TCP/IP checksum failed */
688 RxVlanTag = (1 << 16), /* VLAN tag available */
691 #define RsvdMask 0x3fffc000
708 u8 __pad[sizeof(void *) - sizeof(u32)];
712 RTL_FEATURE_WOL = (1 << 0),
713 RTL_FEATURE_MSI = (1 << 1),
714 RTL_FEATURE_GMII = (1 << 2),
717 struct rtl8169_counters {
724 __le32 tx_one_collision;
725 __le32 tx_multi_collision;
734 RTL_FLAG_TASK_ENABLED,
735 RTL_FLAG_TASK_SLOW_PENDING,
736 RTL_FLAG_TASK_RESET_PENDING,
737 RTL_FLAG_TASK_PHY_PENDING,
741 struct rtl8169_stats {
744 struct u64_stats_sync syncp;
747 struct rtl8169_private {
748 void __iomem *mmio_addr; /* memory map physical address */
749 struct pci_dev *pci_dev;
750 struct net_device *dev;
751 struct napi_struct napi;
755 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
756 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
758 struct rtl8169_stats rx_stats;
759 struct rtl8169_stats tx_stats;
760 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
761 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
762 dma_addr_t TxPhyAddr;
763 dma_addr_t RxPhyAddr;
764 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
765 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
766 struct timer_list timer;
772 void (*write)(struct rtl8169_private *, int, int);
773 int (*read)(struct rtl8169_private *, int);
776 struct pll_power_ops {
777 void (*down)(struct rtl8169_private *);
778 void (*up)(struct rtl8169_private *);
782 void (*enable)(struct rtl8169_private *);
783 void (*disable)(struct rtl8169_private *);
787 void (*write)(struct rtl8169_private *, int, int);
788 u32 (*read)(struct rtl8169_private *, int);
791 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
792 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
793 void (*phy_reset_enable)(struct rtl8169_private *tp);
794 void (*hw_start)(struct net_device *);
795 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
796 unsigned int (*link_ok)(void __iomem *);
797 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
798 bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *);
801 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
803 struct work_struct work;
808 struct mii_if_info mii;
809 struct rtl8169_counters counters;
814 const struct firmware *fw;
816 #define RTL_VER_SIZE 32
818 char version[RTL_VER_SIZE];
820 struct rtl_fw_phy_action {
825 #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
830 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
831 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
832 module_param(use_dac, int, 0);
833 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
834 module_param_named(debug, debug.msg_enable, int, 0);
835 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
836 MODULE_LICENSE("GPL");
837 MODULE_VERSION(RTL8169_VERSION);
838 MODULE_FIRMWARE(FIRMWARE_8168D_1);
839 MODULE_FIRMWARE(FIRMWARE_8168D_2);
840 MODULE_FIRMWARE(FIRMWARE_8168E_1);
841 MODULE_FIRMWARE(FIRMWARE_8168E_2);
842 MODULE_FIRMWARE(FIRMWARE_8168E_3);
843 MODULE_FIRMWARE(FIRMWARE_8105E_1);
844 MODULE_FIRMWARE(FIRMWARE_8168F_1);
845 MODULE_FIRMWARE(FIRMWARE_8168F_2);
846 MODULE_FIRMWARE(FIRMWARE_8402_1);
847 MODULE_FIRMWARE(FIRMWARE_8411_1);
848 MODULE_FIRMWARE(FIRMWARE_8411_2);
849 MODULE_FIRMWARE(FIRMWARE_8106E_1);
850 MODULE_FIRMWARE(FIRMWARE_8106E_2);
851 MODULE_FIRMWARE(FIRMWARE_8168G_2);
852 MODULE_FIRMWARE(FIRMWARE_8168G_3);
853 MODULE_FIRMWARE(FIRMWARE_8168H_1);
854 MODULE_FIRMWARE(FIRMWARE_8168H_2);
856 static void rtl_lock_work(struct rtl8169_private *tp)
858 mutex_lock(&tp->wk.mutex);
861 static void rtl_unlock_work(struct rtl8169_private *tp)
863 mutex_unlock(&tp->wk.mutex);
866 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
868 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
869 PCI_EXP_DEVCTL_READRQ, force);
873 bool (*check)(struct rtl8169_private *);
877 static void rtl_udelay(unsigned int d)
882 static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
883 void (*delay)(unsigned int), unsigned int d, int n,
888 for (i = 0; i < n; i++) {
890 if (c->check(tp) == high)
893 netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
894 c->msg, !high, n, d);
898 static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
899 const struct rtl_cond *c,
900 unsigned int d, int n)
902 return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
905 static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
906 const struct rtl_cond *c,
907 unsigned int d, int n)
909 return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
912 static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
913 const struct rtl_cond *c,
914 unsigned int d, int n)
916 return rtl_loop_wait(tp, c, msleep, d, n, true);
919 static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
920 const struct rtl_cond *c,
921 unsigned int d, int n)
923 return rtl_loop_wait(tp, c, msleep, d, n, false);
926 #define DECLARE_RTL_COND(name) \
927 static bool name ## _check(struct rtl8169_private *); \
929 static const struct rtl_cond name = { \
930 .check = name ## _check, \
934 static bool name ## _check(struct rtl8169_private *tp)
936 DECLARE_RTL_COND(rtl_ocpar_cond)
938 void __iomem *ioaddr = tp->mmio_addr;
940 return RTL_R32(OCPAR) & OCPAR_FLAG;
943 static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
945 void __iomem *ioaddr = tp->mmio_addr;
947 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
949 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
953 static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
955 void __iomem *ioaddr = tp->mmio_addr;
957 RTL_W32(OCPDR, data);
958 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
960 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
963 DECLARE_RTL_COND(rtl_eriar_cond)
965 void __iomem *ioaddr = tp->mmio_addr;
967 return RTL_R32(ERIAR) & ERIAR_FLAG;
970 static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
972 void __iomem *ioaddr = tp->mmio_addr;
975 RTL_W32(ERIAR, 0x800010e8);
978 if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
981 ocp_write(tp, 0x1, 0x30, 0x00000001);
984 #define OOB_CMD_RESET 0x00
985 #define OOB_CMD_DRIVER_START 0x05
986 #define OOB_CMD_DRIVER_STOP 0x06
988 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
990 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
993 DECLARE_RTL_COND(rtl_ocp_read_cond)
997 reg = rtl8168_get_ocp_reg(tp);
999 return ocp_read(tp, 0x0f, reg) & 0x00000800;
1002 static void rtl8168_driver_start(struct rtl8169_private *tp)
1004 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
1006 rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
1009 static void rtl8168_driver_stop(struct rtl8169_private *tp)
1011 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
1013 rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
1016 static int r8168dp_check_dash(struct rtl8169_private *tp)
1018 u16 reg = rtl8168_get_ocp_reg(tp);
1020 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
1023 static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
1025 if (reg & 0xffff0001) {
1026 netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
1032 DECLARE_RTL_COND(rtl_ocp_gphy_cond)
1034 void __iomem *ioaddr = tp->mmio_addr;
1036 return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
1039 static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1041 void __iomem *ioaddr = tp->mmio_addr;
1043 if (rtl_ocp_reg_failure(tp, reg))
1046 RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
1048 rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
1051 static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1053 void __iomem *ioaddr = tp->mmio_addr;
1055 if (rtl_ocp_reg_failure(tp, reg))
1058 RTL_W32(GPHY_OCP, reg << 15);
1060 return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1061 (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1064 static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1066 void __iomem *ioaddr = tp->mmio_addr;
1068 if (rtl_ocp_reg_failure(tp, reg))
1071 RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1074 static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1076 void __iomem *ioaddr = tp->mmio_addr;
1078 if (rtl_ocp_reg_failure(tp, reg))
1081 RTL_W32(OCPDR, reg << 15);
1083 return RTL_R32(OCPDR);
1086 #define OCP_STD_PHY_BASE 0xa400
1088 static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1091 tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1095 if (tp->ocp_base != OCP_STD_PHY_BASE)
1098 r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1101 static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1103 if (tp->ocp_base != OCP_STD_PHY_BASE)
1106 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1109 static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value)
1112 tp->ocp_base = value << 4;
1116 r8168_mac_ocp_write(tp, tp->ocp_base + reg, value);
1119 static int mac_mcu_read(struct rtl8169_private *tp, int reg)
1121 return r8168_mac_ocp_read(tp, tp->ocp_base + reg);
1124 DECLARE_RTL_COND(rtl_phyar_cond)
1126 void __iomem *ioaddr = tp->mmio_addr;
1128 return RTL_R32(PHYAR) & 0x80000000;
1131 static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1133 void __iomem *ioaddr = tp->mmio_addr;
1135 RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1137 rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1139 * According to hardware specs a 20us delay is required after write
1140 * complete indication, but before sending next command.
1145 static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1147 void __iomem *ioaddr = tp->mmio_addr;
1150 RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1152 value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1153 RTL_R32(PHYAR) & 0xffff : ~0;
1156 * According to hardware specs a 20us delay is required after read
1157 * complete indication, but before sending next command.
1164 static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
1166 void __iomem *ioaddr = tp->mmio_addr;
1168 RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
1169 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
1170 RTL_W32(EPHY_RXER_NUM, 0);
1172 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
1175 static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
1177 r8168dp_1_mdio_access(tp, reg,
1178 OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
1181 static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
1183 void __iomem *ioaddr = tp->mmio_addr;
1185 r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
1188 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
1189 RTL_W32(EPHY_RXER_NUM, 0);
1191 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
1192 RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
1195 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
1197 static void r8168dp_2_mdio_start(void __iomem *ioaddr)
1199 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1202 static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1204 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1207 static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1209 void __iomem *ioaddr = tp->mmio_addr;
1211 r8168dp_2_mdio_start(ioaddr);
1213 r8169_mdio_write(tp, reg, value);
1215 r8168dp_2_mdio_stop(ioaddr);
1218 static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1220 void __iomem *ioaddr = tp->mmio_addr;
1223 r8168dp_2_mdio_start(ioaddr);
1225 value = r8169_mdio_read(tp, reg);
1227 r8168dp_2_mdio_stop(ioaddr);
1232 static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1234 tp->mdio_ops.write(tp, location, val);
1237 static int rtl_readphy(struct rtl8169_private *tp, int location)
1239 return tp->mdio_ops.read(tp, location);
1242 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1244 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1247 static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1251 val = rtl_readphy(tp, reg_addr);
1252 rtl_writephy(tp, reg_addr, (val | p) & ~m);
1255 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1258 struct rtl8169_private *tp = netdev_priv(dev);
1260 rtl_writephy(tp, location, val);
1263 static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1265 struct rtl8169_private *tp = netdev_priv(dev);
1267 return rtl_readphy(tp, location);
1270 DECLARE_RTL_COND(rtl_ephyar_cond)
1272 void __iomem *ioaddr = tp->mmio_addr;
1274 return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1277 static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1279 void __iomem *ioaddr = tp->mmio_addr;
1281 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1282 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1284 rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1289 static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1291 void __iomem *ioaddr = tp->mmio_addr;
1293 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1295 return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1296 RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
1299 static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1302 void __iomem *ioaddr = tp->mmio_addr;
1304 BUG_ON((addr & 3) || (mask == 0));
1305 RTL_W32(ERIDR, val);
1306 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1308 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1311 static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1313 void __iomem *ioaddr = tp->mmio_addr;
1315 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1317 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1318 RTL_R32(ERIDR) : ~0;
1321 static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1326 val = rtl_eri_read(tp, addr, type);
1327 rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1336 static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1337 const struct exgmac_reg *r, int len)
1340 rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1345 DECLARE_RTL_COND(rtl_efusear_cond)
1347 void __iomem *ioaddr = tp->mmio_addr;
1349 return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1352 static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1354 void __iomem *ioaddr = tp->mmio_addr;
1356 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1358 return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1359 RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1362 static u16 rtl_get_events(struct rtl8169_private *tp)
1364 void __iomem *ioaddr = tp->mmio_addr;
1366 return RTL_R16(IntrStatus);
1369 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1371 void __iomem *ioaddr = tp->mmio_addr;
1373 RTL_W16(IntrStatus, bits);
1377 static void rtl_irq_disable(struct rtl8169_private *tp)
1379 void __iomem *ioaddr = tp->mmio_addr;
1381 RTL_W16(IntrMask, 0);
1385 static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1387 void __iomem *ioaddr = tp->mmio_addr;
1389 RTL_W16(IntrMask, bits);
1392 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1393 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1394 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1396 static void rtl_irq_enable_all(struct rtl8169_private *tp)
1398 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1401 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1403 void __iomem *ioaddr = tp->mmio_addr;
1405 rtl_irq_disable(tp);
1406 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1410 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1412 void __iomem *ioaddr = tp->mmio_addr;
1414 return RTL_R32(TBICSR) & TBIReset;
1417 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1419 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1422 static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1424 return RTL_R32(TBICSR) & TBILinkOk;
1427 static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1429 return RTL_R8(PHYstatus) & LinkStatus;
1432 static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1434 void __iomem *ioaddr = tp->mmio_addr;
1436 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1439 static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1443 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1444 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1447 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1449 void __iomem *ioaddr = tp->mmio_addr;
1450 struct net_device *dev = tp->dev;
1452 if (!netif_running(dev))
1455 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1456 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1457 if (RTL_R8(PHYstatus) & _1000bpsF) {
1458 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1460 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1462 } else if (RTL_R8(PHYstatus) & _100bps) {
1463 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1465 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1468 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1470 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1473 /* Reset packet filter */
1474 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1476 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1478 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1479 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1480 if (RTL_R8(PHYstatus) & _1000bpsF) {
1481 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1483 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1486 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1488 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1491 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1492 if (RTL_R8(PHYstatus) & _10bps) {
1493 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1495 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1498 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1504 static void __rtl8169_check_link_status(struct net_device *dev,
1505 struct rtl8169_private *tp,
1506 void __iomem *ioaddr, bool pm)
1508 if (tp->link_ok(ioaddr)) {
1509 rtl_link_chg_patch(tp);
1510 /* This is to cancel a scheduled suspend if there's one. */
1512 pm_request_resume(&tp->pci_dev->dev);
1513 netif_carrier_on(dev);
1514 if (net_ratelimit())
1515 netif_info(tp, ifup, dev, "link up\n");
1517 netif_carrier_off(dev);
1518 netif_info(tp, ifdown, dev, "link down\n");
1520 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1524 static void rtl8169_check_link_status(struct net_device *dev,
1525 struct rtl8169_private *tp,
1526 void __iomem *ioaddr)
1528 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1531 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1533 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1535 void __iomem *ioaddr = tp->mmio_addr;
1539 options = RTL_R8(Config1);
1540 if (!(options & PMEnable))
1543 options = RTL_R8(Config3);
1544 if (options & LinkUp)
1545 wolopts |= WAKE_PHY;
1546 switch (tp->mac_version) {
1547 case RTL_GIGA_MAC_VER_45:
1548 case RTL_GIGA_MAC_VER_46:
1549 if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2)
1550 wolopts |= WAKE_MAGIC;
1553 if (options & MagicPacket)
1554 wolopts |= WAKE_MAGIC;
1558 options = RTL_R8(Config5);
1560 wolopts |= WAKE_UCAST;
1562 wolopts |= WAKE_BCAST;
1564 wolopts |= WAKE_MCAST;
1569 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1571 struct rtl8169_private *tp = netdev_priv(dev);
1575 wol->supported = WAKE_ANY;
1576 wol->wolopts = __rtl8169_get_wol(tp);
1578 rtl_unlock_work(tp);
1581 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1583 void __iomem *ioaddr = tp->mmio_addr;
1584 unsigned int i, tmp;
1585 static const struct {
1590 { WAKE_PHY, Config3, LinkUp },
1591 { WAKE_UCAST, Config5, UWF },
1592 { WAKE_BCAST, Config5, BWF },
1593 { WAKE_MCAST, Config5, MWF },
1594 { WAKE_ANY, Config5, LanWake },
1595 { WAKE_MAGIC, Config3, MagicPacket }
1599 RTL_W8(Cfg9346, Cfg9346_Unlock);
1601 switch (tp->mac_version) {
1602 case RTL_GIGA_MAC_VER_45:
1603 case RTL_GIGA_MAC_VER_46:
1604 tmp = ARRAY_SIZE(cfg) - 1;
1605 if (wolopts & WAKE_MAGIC)
1621 tmp = ARRAY_SIZE(cfg);
1625 for (i = 0; i < tmp; i++) {
1626 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1627 if (wolopts & cfg[i].opt)
1628 options |= cfg[i].mask;
1629 RTL_W8(cfg[i].reg, options);
1632 switch (tp->mac_version) {
1633 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1634 options = RTL_R8(Config1) & ~PMEnable;
1636 options |= PMEnable;
1637 RTL_W8(Config1, options);
1640 options = RTL_R8(Config2) & ~PME_SIGNAL;
1642 options |= PME_SIGNAL;
1643 RTL_W8(Config2, options);
1647 RTL_W8(Cfg9346, Cfg9346_Lock);
1650 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1652 struct rtl8169_private *tp = netdev_priv(dev);
1657 tp->features |= RTL_FEATURE_WOL;
1659 tp->features &= ~RTL_FEATURE_WOL;
1660 __rtl8169_set_wol(tp, wol->wolopts);
1662 rtl_unlock_work(tp);
1664 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1669 static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1671 return rtl_chip_infos[tp->mac_version].fw_name;
1674 static void rtl8169_get_drvinfo(struct net_device *dev,
1675 struct ethtool_drvinfo *info)
1677 struct rtl8169_private *tp = netdev_priv(dev);
1678 struct rtl_fw *rtl_fw = tp->rtl_fw;
1680 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1681 strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1682 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1683 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1684 if (!IS_ERR_OR_NULL(rtl_fw))
1685 strlcpy(info->fw_version, rtl_fw->version,
1686 sizeof(info->fw_version));
1689 static int rtl8169_get_regs_len(struct net_device *dev)
1691 return R8169_REGS_SIZE;
1694 static int rtl8169_set_speed_tbi(struct net_device *dev,
1695 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1697 struct rtl8169_private *tp = netdev_priv(dev);
1698 void __iomem *ioaddr = tp->mmio_addr;
1702 reg = RTL_R32(TBICSR);
1703 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1704 (duplex == DUPLEX_FULL)) {
1705 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1706 } else if (autoneg == AUTONEG_ENABLE)
1707 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1709 netif_warn(tp, link, dev,
1710 "incorrect speed setting refused in TBI mode\n");
1717 static int rtl8169_set_speed_xmii(struct net_device *dev,
1718 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1720 struct rtl8169_private *tp = netdev_priv(dev);
1721 int giga_ctrl, bmcr;
1724 rtl_writephy(tp, 0x1f, 0x0000);
1726 if (autoneg == AUTONEG_ENABLE) {
1729 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1730 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1731 ADVERTISE_100HALF | ADVERTISE_100FULL);
1733 if (adv & ADVERTISED_10baseT_Half)
1734 auto_nego |= ADVERTISE_10HALF;
1735 if (adv & ADVERTISED_10baseT_Full)
1736 auto_nego |= ADVERTISE_10FULL;
1737 if (adv & ADVERTISED_100baseT_Half)
1738 auto_nego |= ADVERTISE_100HALF;
1739 if (adv & ADVERTISED_100baseT_Full)
1740 auto_nego |= ADVERTISE_100FULL;
1742 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1744 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1745 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1747 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1748 if (tp->mii.supports_gmii) {
1749 if (adv & ADVERTISED_1000baseT_Half)
1750 giga_ctrl |= ADVERTISE_1000HALF;
1751 if (adv & ADVERTISED_1000baseT_Full)
1752 giga_ctrl |= ADVERTISE_1000FULL;
1753 } else if (adv & (ADVERTISED_1000baseT_Half |
1754 ADVERTISED_1000baseT_Full)) {
1755 netif_info(tp, link, dev,
1756 "PHY does not support 1000Mbps\n");
1760 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1762 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1763 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1767 if (speed == SPEED_10)
1769 else if (speed == SPEED_100)
1770 bmcr = BMCR_SPEED100;
1774 if (duplex == DUPLEX_FULL)
1775 bmcr |= BMCR_FULLDPLX;
1778 rtl_writephy(tp, MII_BMCR, bmcr);
1780 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1781 tp->mac_version == RTL_GIGA_MAC_VER_03) {
1782 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1783 rtl_writephy(tp, 0x17, 0x2138);
1784 rtl_writephy(tp, 0x0e, 0x0260);
1786 rtl_writephy(tp, 0x17, 0x2108);
1787 rtl_writephy(tp, 0x0e, 0x0000);
1796 static int rtl8169_set_speed(struct net_device *dev,
1797 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1799 struct rtl8169_private *tp = netdev_priv(dev);
1802 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1806 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1807 (advertising & ADVERTISED_1000baseT_Full)) {
1808 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1814 static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1816 struct rtl8169_private *tp = netdev_priv(dev);
1819 del_timer_sync(&tp->timer);
1822 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1823 cmd->duplex, cmd->advertising);
1824 rtl_unlock_work(tp);
1829 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1830 netdev_features_t features)
1832 struct rtl8169_private *tp = netdev_priv(dev);
1834 if (dev->mtu > TD_MSS_MAX)
1835 features &= ~NETIF_F_ALL_TSO;
1837 if (dev->mtu > JUMBO_1K &&
1838 !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1839 features &= ~NETIF_F_IP_CSUM;
1844 static void __rtl8169_set_features(struct net_device *dev,
1845 netdev_features_t features)
1847 struct rtl8169_private *tp = netdev_priv(dev);
1848 netdev_features_t changed = features ^ dev->features;
1849 void __iomem *ioaddr = tp->mmio_addr;
1851 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM |
1852 NETIF_F_HW_VLAN_CTAG_RX)))
1855 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) {
1856 if (features & NETIF_F_RXCSUM)
1857 tp->cp_cmd |= RxChkSum;
1859 tp->cp_cmd &= ~RxChkSum;
1861 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
1862 tp->cp_cmd |= RxVlan;
1864 tp->cp_cmd &= ~RxVlan;
1866 RTL_W16(CPlusCmd, tp->cp_cmd);
1869 if (changed & NETIF_F_RXALL) {
1870 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1871 if (features & NETIF_F_RXALL)
1872 tmp |= (AcceptErr | AcceptRunt);
1873 RTL_W32(RxConfig, tmp);
1877 static int rtl8169_set_features(struct net_device *dev,
1878 netdev_features_t features)
1880 struct rtl8169_private *tp = netdev_priv(dev);
1883 __rtl8169_set_features(dev, features);
1884 rtl_unlock_work(tp);
1890 static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1892 return (vlan_tx_tag_present(skb)) ?
1893 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1896 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1898 u32 opts2 = le32_to_cpu(desc->opts2);
1900 if (opts2 & RxVlanTag)
1901 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
1904 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1906 struct rtl8169_private *tp = netdev_priv(dev);
1907 void __iomem *ioaddr = tp->mmio_addr;
1911 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1912 cmd->port = PORT_FIBRE;
1913 cmd->transceiver = XCVR_INTERNAL;
1915 status = RTL_R32(TBICSR);
1916 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1917 cmd->autoneg = !!(status & TBINwEnable);
1919 ethtool_cmd_speed_set(cmd, SPEED_1000);
1920 cmd->duplex = DUPLEX_FULL; /* Always set */
1925 static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1927 struct rtl8169_private *tp = netdev_priv(dev);
1929 return mii_ethtool_gset(&tp->mii, cmd);
1932 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1934 struct rtl8169_private *tp = netdev_priv(dev);
1938 rc = tp->get_settings(dev, cmd);
1939 rtl_unlock_work(tp);
1944 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1947 struct rtl8169_private *tp = netdev_priv(dev);
1948 u32 __iomem *data = tp->mmio_addr;
1953 for (i = 0; i < R8169_REGS_SIZE; i += 4)
1954 memcpy_fromio(dw++, data++, 4);
1955 rtl_unlock_work(tp);
1958 static u32 rtl8169_get_msglevel(struct net_device *dev)
1960 struct rtl8169_private *tp = netdev_priv(dev);
1962 return tp->msg_enable;
1965 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1967 struct rtl8169_private *tp = netdev_priv(dev);
1969 tp->msg_enable = value;
1972 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1979 "tx_single_collisions",
1980 "tx_multi_collisions",
1988 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1992 return ARRAY_SIZE(rtl8169_gstrings);
1998 DECLARE_RTL_COND(rtl_counters_cond)
2000 void __iomem *ioaddr = tp->mmio_addr;
2002 return RTL_R32(CounterAddrLow) & CounterDump;
2005 static void rtl8169_update_counters(struct net_device *dev)
2007 struct rtl8169_private *tp = netdev_priv(dev);
2008 void __iomem *ioaddr = tp->mmio_addr;
2009 struct device *d = &tp->pci_dev->dev;
2010 struct rtl8169_counters *counters;
2015 * Some chips are unable to dump tally counters when the receiver
2018 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
2021 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
2025 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
2026 cmd = (u64)paddr & DMA_BIT_MASK(32);
2027 RTL_W32(CounterAddrLow, cmd);
2028 RTL_W32(CounterAddrLow, cmd | CounterDump);
2030 if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
2031 memcpy(&tp->counters, counters, sizeof(*counters));
2033 RTL_W32(CounterAddrLow, 0);
2034 RTL_W32(CounterAddrHigh, 0);
2036 dma_free_coherent(d, sizeof(*counters), counters, paddr);
2039 static void rtl8169_get_ethtool_stats(struct net_device *dev,
2040 struct ethtool_stats *stats, u64 *data)
2042 struct rtl8169_private *tp = netdev_priv(dev);
2046 rtl8169_update_counters(dev);
2048 data[0] = le64_to_cpu(tp->counters.tx_packets);
2049 data[1] = le64_to_cpu(tp->counters.rx_packets);
2050 data[2] = le64_to_cpu(tp->counters.tx_errors);
2051 data[3] = le32_to_cpu(tp->counters.rx_errors);
2052 data[4] = le16_to_cpu(tp->counters.rx_missed);
2053 data[5] = le16_to_cpu(tp->counters.align_errors);
2054 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
2055 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
2056 data[8] = le64_to_cpu(tp->counters.rx_unicast);
2057 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
2058 data[10] = le32_to_cpu(tp->counters.rx_multicast);
2059 data[11] = le16_to_cpu(tp->counters.tx_aborted);
2060 data[12] = le16_to_cpu(tp->counters.tx_underun);
2063 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2067 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
2072 static const struct ethtool_ops rtl8169_ethtool_ops = {
2073 .get_drvinfo = rtl8169_get_drvinfo,
2074 .get_regs_len = rtl8169_get_regs_len,
2075 .get_link = ethtool_op_get_link,
2076 .get_settings = rtl8169_get_settings,
2077 .set_settings = rtl8169_set_settings,
2078 .get_msglevel = rtl8169_get_msglevel,
2079 .set_msglevel = rtl8169_set_msglevel,
2080 .get_regs = rtl8169_get_regs,
2081 .get_wol = rtl8169_get_wol,
2082 .set_wol = rtl8169_set_wol,
2083 .get_strings = rtl8169_get_strings,
2084 .get_sset_count = rtl8169_get_sset_count,
2085 .get_ethtool_stats = rtl8169_get_ethtool_stats,
2086 .get_ts_info = ethtool_op_get_ts_info,
2089 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2090 struct net_device *dev, u8 default_version)
2092 void __iomem *ioaddr = tp->mmio_addr;
2094 * The driver currently handles the 8168Bf and the 8168Be identically
2095 * but they can be identified more specifically through the test below
2098 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2100 * Same thing for the 8101Eb and the 8101Ec:
2102 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2104 static const struct rtl_mac_info {
2110 { 0x7cf00000, 0x54100000, RTL_GIGA_MAC_VER_46 },
2111 { 0x7cf00000, 0x54000000, RTL_GIGA_MAC_VER_45 },
2114 { 0x7cf00000, 0x5c800000, RTL_GIGA_MAC_VER_44 },
2115 { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 },
2116 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
2117 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
2120 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
2121 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
2122 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
2125 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
2126 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
2127 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
2128 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
2131 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
2132 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
2133 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
2135 /* 8168DP family. */
2136 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
2137 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
2138 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
2141 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
2142 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
2143 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
2144 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
2145 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
2146 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
2147 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
2148 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
2149 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
2152 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
2153 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
2154 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
2155 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
2158 { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
2159 { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
2160 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
2161 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
2162 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
2163 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
2164 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
2165 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
2166 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
2167 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
2168 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
2169 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
2170 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
2171 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
2172 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
2173 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
2174 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
2175 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
2176 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
2177 /* FIXME: where did these entries come from ? -- FR */
2178 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
2179 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
2182 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
2183 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
2184 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
2185 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
2186 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
2187 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
2190 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
2192 const struct rtl_mac_info *p = mac_info;
2195 reg = RTL_R32(TxConfig);
2196 while ((reg & p->mask) != p->val)
2198 tp->mac_version = p->mac_version;
2200 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2201 netif_notice(tp, probe, dev,
2202 "unknown MAC, using family default\n");
2203 tp->mac_version = default_version;
2204 } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
2205 tp->mac_version = tp->mii.supports_gmii ?
2206 RTL_GIGA_MAC_VER_42 :
2207 RTL_GIGA_MAC_VER_43;
2208 } else if (tp->mac_version == RTL_GIGA_MAC_VER_45) {
2209 tp->mac_version = tp->mii.supports_gmii ?
2210 RTL_GIGA_MAC_VER_45 :
2211 RTL_GIGA_MAC_VER_47;
2212 } else if (tp->mac_version == RTL_GIGA_MAC_VER_46) {
2213 tp->mac_version = tp->mii.supports_gmii ?
2214 RTL_GIGA_MAC_VER_46 :
2215 RTL_GIGA_MAC_VER_48;
2219 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
2221 dprintk("mac_version = 0x%02x\n", tp->mac_version);
2229 static void rtl_writephy_batch(struct rtl8169_private *tp,
2230 const struct phy_reg *regs, int len)
2233 rtl_writephy(tp, regs->reg, regs->val);
2238 #define PHY_READ 0x00000000
2239 #define PHY_DATA_OR 0x10000000
2240 #define PHY_DATA_AND 0x20000000
2241 #define PHY_BJMPN 0x30000000
2242 #define PHY_MDIO_CHG 0x40000000
2243 #define PHY_CLEAR_READCOUNT 0x70000000
2244 #define PHY_WRITE 0x80000000
2245 #define PHY_READCOUNT_EQ_SKIP 0x90000000
2246 #define PHY_COMP_EQ_SKIPN 0xa0000000
2247 #define PHY_COMP_NEQ_SKIPN 0xb0000000
2248 #define PHY_WRITE_PREVIOUS 0xc0000000
2249 #define PHY_SKIPN 0xd0000000
2250 #define PHY_DELAY_MS 0xe0000000
2254 char version[RTL_VER_SIZE];
2260 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2262 static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2264 const struct firmware *fw = rtl_fw->fw;
2265 struct fw_info *fw_info = (struct fw_info *)fw->data;
2266 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2267 char *version = rtl_fw->version;
2270 if (fw->size < FW_OPCODE_SIZE)
2273 if (!fw_info->magic) {
2274 size_t i, size, start;
2277 if (fw->size < sizeof(*fw_info))
2280 for (i = 0; i < fw->size; i++)
2281 checksum += fw->data[i];
2285 start = le32_to_cpu(fw_info->fw_start);
2286 if (start > fw->size)
2289 size = le32_to_cpu(fw_info->fw_len);
2290 if (size > (fw->size - start) / FW_OPCODE_SIZE)
2293 memcpy(version, fw_info->version, RTL_VER_SIZE);
2295 pa->code = (__le32 *)(fw->data + start);
2298 if (fw->size % FW_OPCODE_SIZE)
2301 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2303 pa->code = (__le32 *)fw->data;
2304 pa->size = fw->size / FW_OPCODE_SIZE;
2306 version[RTL_VER_SIZE - 1] = 0;
2313 static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2314 struct rtl_fw_phy_action *pa)
2319 for (index = 0; index < pa->size; index++) {
2320 u32 action = le32_to_cpu(pa->code[index]);
2321 u32 regno = (action & 0x0fff0000) >> 16;
2323 switch(action & 0xf0000000) {
2328 case PHY_CLEAR_READCOUNT:
2330 case PHY_WRITE_PREVIOUS:
2335 if (regno > index) {
2336 netif_err(tp, ifup, tp->dev,
2337 "Out of range of firmware\n");
2341 case PHY_READCOUNT_EQ_SKIP:
2342 if (index + 2 >= pa->size) {
2343 netif_err(tp, ifup, tp->dev,
2344 "Out of range of firmware\n");
2348 case PHY_COMP_EQ_SKIPN:
2349 case PHY_COMP_NEQ_SKIPN:
2351 if (index + 1 + regno >= pa->size) {
2352 netif_err(tp, ifup, tp->dev,
2353 "Out of range of firmware\n");
2359 netif_err(tp, ifup, tp->dev,
2360 "Invalid action 0x%08x\n", action);
2369 static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2371 struct net_device *dev = tp->dev;
2374 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2375 netif_err(tp, ifup, dev, "invalid firwmare\n");
2379 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2385 static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2387 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2388 struct mdio_ops org, *ops = &tp->mdio_ops;
2392 predata = count = 0;
2393 org.write = ops->write;
2394 org.read = ops->read;
2396 for (index = 0; index < pa->size; ) {
2397 u32 action = le32_to_cpu(pa->code[index]);
2398 u32 data = action & 0x0000ffff;
2399 u32 regno = (action & 0x0fff0000) >> 16;
2404 switch(action & 0xf0000000) {
2406 predata = rtl_readphy(tp, regno);
2423 ops->write = org.write;
2424 ops->read = org.read;
2425 } else if (data == 1) {
2426 ops->write = mac_mcu_write;
2427 ops->read = mac_mcu_read;
2432 case PHY_CLEAR_READCOUNT:
2437 rtl_writephy(tp, regno, data);
2440 case PHY_READCOUNT_EQ_SKIP:
2441 index += (count == data) ? 2 : 1;
2443 case PHY_COMP_EQ_SKIPN:
2444 if (predata == data)
2448 case PHY_COMP_NEQ_SKIPN:
2449 if (predata != data)
2453 case PHY_WRITE_PREVIOUS:
2454 rtl_writephy(tp, regno, predata);
2470 ops->write = org.write;
2471 ops->read = org.read;
2474 static void rtl_release_firmware(struct rtl8169_private *tp)
2476 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2477 release_firmware(tp->rtl_fw->fw);
2480 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2483 static void rtl_apply_firmware(struct rtl8169_private *tp)
2485 struct rtl_fw *rtl_fw = tp->rtl_fw;
2487 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2488 if (!IS_ERR_OR_NULL(rtl_fw))
2489 rtl_phy_write_fw(tp, rtl_fw);
2492 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2494 if (rtl_readphy(tp, reg) != val)
2495 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2497 rtl_apply_firmware(tp);
2500 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2502 static const struct phy_reg phy_reg_init[] = {
2564 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2567 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2569 static const struct phy_reg phy_reg_init[] = {
2575 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2578 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2580 struct pci_dev *pdev = tp->pci_dev;
2582 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2583 (pdev->subsystem_device != 0xe000))
2586 rtl_writephy(tp, 0x1f, 0x0001);
2587 rtl_writephy(tp, 0x10, 0xf01b);
2588 rtl_writephy(tp, 0x1f, 0x0000);
2591 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2593 static const struct phy_reg phy_reg_init[] = {
2633 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2635 rtl8169scd_hw_phy_config_quirk(tp);
2638 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2640 static const struct phy_reg phy_reg_init[] = {
2688 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2691 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2693 static const struct phy_reg phy_reg_init[] = {
2698 rtl_writephy(tp, 0x1f, 0x0001);
2699 rtl_patchphy(tp, 0x16, 1 << 0);
2701 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2704 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2706 static const struct phy_reg phy_reg_init[] = {
2712 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2715 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2717 static const struct phy_reg phy_reg_init[] = {
2725 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2728 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2730 static const struct phy_reg phy_reg_init[] = {
2736 rtl_writephy(tp, 0x1f, 0x0000);
2737 rtl_patchphy(tp, 0x14, 1 << 5);
2738 rtl_patchphy(tp, 0x0d, 1 << 5);
2740 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2743 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2745 static const struct phy_reg phy_reg_init[] = {
2765 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2767 rtl_patchphy(tp, 0x14, 1 << 5);
2768 rtl_patchphy(tp, 0x0d, 1 << 5);
2769 rtl_writephy(tp, 0x1f, 0x0000);
2772 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2774 static const struct phy_reg phy_reg_init[] = {
2792 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2794 rtl_patchphy(tp, 0x16, 1 << 0);
2795 rtl_patchphy(tp, 0x14, 1 << 5);
2796 rtl_patchphy(tp, 0x0d, 1 << 5);
2797 rtl_writephy(tp, 0x1f, 0x0000);
2800 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2802 static const struct phy_reg phy_reg_init[] = {
2814 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2816 rtl_patchphy(tp, 0x16, 1 << 0);
2817 rtl_patchphy(tp, 0x14, 1 << 5);
2818 rtl_patchphy(tp, 0x0d, 1 << 5);
2819 rtl_writephy(tp, 0x1f, 0x0000);
2822 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2824 rtl8168c_3_hw_phy_config(tp);
2827 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2829 static const struct phy_reg phy_reg_init_0[] = {
2830 /* Channel Estimation */
2851 * Enhance line driver power
2860 * Can not link to 1Gbps with bad cable
2861 * Decrease SNR threshold form 21.07dB to 19.04dB
2870 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2874 * Fine Tune Switching regulator parameter
2876 rtl_writephy(tp, 0x1f, 0x0002);
2877 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2878 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2880 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2881 static const struct phy_reg phy_reg_init[] = {
2891 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2893 val = rtl_readphy(tp, 0x0d);
2895 if ((val & 0x00ff) != 0x006c) {
2896 static const u32 set[] = {
2897 0x0065, 0x0066, 0x0067, 0x0068,
2898 0x0069, 0x006a, 0x006b, 0x006c
2902 rtl_writephy(tp, 0x1f, 0x0002);
2905 for (i = 0; i < ARRAY_SIZE(set); i++)
2906 rtl_writephy(tp, 0x0d, val | set[i]);
2909 static const struct phy_reg phy_reg_init[] = {
2917 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2920 /* RSET couple improve */
2921 rtl_writephy(tp, 0x1f, 0x0002);
2922 rtl_patchphy(tp, 0x0d, 0x0300);
2923 rtl_patchphy(tp, 0x0f, 0x0010);
2925 /* Fine tune PLL performance */
2926 rtl_writephy(tp, 0x1f, 0x0002);
2927 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2928 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2930 rtl_writephy(tp, 0x1f, 0x0005);
2931 rtl_writephy(tp, 0x05, 0x001b);
2933 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2935 rtl_writephy(tp, 0x1f, 0x0000);
2938 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2940 static const struct phy_reg phy_reg_init_0[] = {
2941 /* Channel Estimation */
2962 * Enhance line driver power
2971 * Can not link to 1Gbps with bad cable
2972 * Decrease SNR threshold form 21.07dB to 19.04dB
2981 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2983 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2984 static const struct phy_reg phy_reg_init[] = {
2995 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2997 val = rtl_readphy(tp, 0x0d);
2998 if ((val & 0x00ff) != 0x006c) {
2999 static const u32 set[] = {
3000 0x0065, 0x0066, 0x0067, 0x0068,
3001 0x0069, 0x006a, 0x006b, 0x006c
3005 rtl_writephy(tp, 0x1f, 0x0002);
3008 for (i = 0; i < ARRAY_SIZE(set); i++)
3009 rtl_writephy(tp, 0x0d, val | set[i]);
3012 static const struct phy_reg phy_reg_init[] = {
3020 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3023 /* Fine tune PLL performance */
3024 rtl_writephy(tp, 0x1f, 0x0002);
3025 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
3026 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
3028 /* Switching regulator Slew rate */
3029 rtl_writephy(tp, 0x1f, 0x0002);
3030 rtl_patchphy(tp, 0x0f, 0x0017);
3032 rtl_writephy(tp, 0x1f, 0x0005);
3033 rtl_writephy(tp, 0x05, 0x001b);
3035 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
3037 rtl_writephy(tp, 0x1f, 0x0000);
3040 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
3042 static const struct phy_reg phy_reg_init[] = {
3098 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3101 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
3103 static const struct phy_reg phy_reg_init[] = {
3113 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3114 rtl_patchphy(tp, 0x0d, 1 << 5);
3117 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3119 static const struct phy_reg phy_reg_init[] = {
3120 /* Enable Delay cap */
3126 /* Channel estimation fine tune */
3135 /* Update PFM & 10M TX idle timer */
3147 rtl_apply_firmware(tp);
3149 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3151 /* DCO enable for 10M IDLE Power */
3152 rtl_writephy(tp, 0x1f, 0x0007);
3153 rtl_writephy(tp, 0x1e, 0x0023);
3154 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3155 rtl_writephy(tp, 0x1f, 0x0000);
3157 /* For impedance matching */
3158 rtl_writephy(tp, 0x1f, 0x0002);
3159 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
3160 rtl_writephy(tp, 0x1f, 0x0000);
3162 /* PHY auto speed down */
3163 rtl_writephy(tp, 0x1f, 0x0007);
3164 rtl_writephy(tp, 0x1e, 0x002d);
3165 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
3166 rtl_writephy(tp, 0x1f, 0x0000);
3167 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3169 rtl_writephy(tp, 0x1f, 0x0005);
3170 rtl_writephy(tp, 0x05, 0x8b86);
3171 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3172 rtl_writephy(tp, 0x1f, 0x0000);
3174 rtl_writephy(tp, 0x1f, 0x0005);
3175 rtl_writephy(tp, 0x05, 0x8b85);
3176 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3177 rtl_writephy(tp, 0x1f, 0x0007);
3178 rtl_writephy(tp, 0x1e, 0x0020);
3179 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
3180 rtl_writephy(tp, 0x1f, 0x0006);
3181 rtl_writephy(tp, 0x00, 0x5a00);
3182 rtl_writephy(tp, 0x1f, 0x0000);
3183 rtl_writephy(tp, 0x0d, 0x0007);
3184 rtl_writephy(tp, 0x0e, 0x003c);
3185 rtl_writephy(tp, 0x0d, 0x4007);
3186 rtl_writephy(tp, 0x0e, 0x0000);
3187 rtl_writephy(tp, 0x0d, 0x0000);
3190 static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
3193 addr[0] | (addr[1] << 8),
3194 addr[2] | (addr[3] << 8),
3195 addr[4] | (addr[5] << 8)
3197 const struct exgmac_reg e[] = {
3198 { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) },
3199 { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] },
3200 { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 },
3201 { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) }
3204 rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3207 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3209 static const struct phy_reg phy_reg_init[] = {
3210 /* Enable Delay cap */
3219 /* Channel estimation fine tune */
3236 rtl_apply_firmware(tp);
3238 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3240 /* For 4-corner performance improve */
3241 rtl_writephy(tp, 0x1f, 0x0005);
3242 rtl_writephy(tp, 0x05, 0x8b80);
3243 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3244 rtl_writephy(tp, 0x1f, 0x0000);
3246 /* PHY auto speed down */
3247 rtl_writephy(tp, 0x1f, 0x0004);
3248 rtl_writephy(tp, 0x1f, 0x0007);
3249 rtl_writephy(tp, 0x1e, 0x002d);
3250 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3251 rtl_writephy(tp, 0x1f, 0x0002);
3252 rtl_writephy(tp, 0x1f, 0x0000);
3253 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3255 /* improve 10M EEE waveform */
3256 rtl_writephy(tp, 0x1f, 0x0005);
3257 rtl_writephy(tp, 0x05, 0x8b86);
3258 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3259 rtl_writephy(tp, 0x1f, 0x0000);
3261 /* Improve 2-pair detection performance */
3262 rtl_writephy(tp, 0x1f, 0x0005);
3263 rtl_writephy(tp, 0x05, 0x8b85);
3264 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3265 rtl_writephy(tp, 0x1f, 0x0000);
3268 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3269 rtl_writephy(tp, 0x1f, 0x0005);
3270 rtl_writephy(tp, 0x05, 0x8b85);
3271 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3272 rtl_writephy(tp, 0x1f, 0x0004);
3273 rtl_writephy(tp, 0x1f, 0x0007);
3274 rtl_writephy(tp, 0x1e, 0x0020);
3275 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3276 rtl_writephy(tp, 0x1f, 0x0002);
3277 rtl_writephy(tp, 0x1f, 0x0000);
3278 rtl_writephy(tp, 0x0d, 0x0007);
3279 rtl_writephy(tp, 0x0e, 0x003c);
3280 rtl_writephy(tp, 0x0d, 0x4007);
3281 rtl_writephy(tp, 0x0e, 0x0000);
3282 rtl_writephy(tp, 0x0d, 0x0000);
3285 rtl_writephy(tp, 0x1f, 0x0003);
3286 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3287 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3288 rtl_writephy(tp, 0x1f, 0x0000);
3290 /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
3291 rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
3294 static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3296 /* For 4-corner performance improve */
3297 rtl_writephy(tp, 0x1f, 0x0005);
3298 rtl_writephy(tp, 0x05, 0x8b80);
3299 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3300 rtl_writephy(tp, 0x1f, 0x0000);
3302 /* PHY auto speed down */
3303 rtl_writephy(tp, 0x1f, 0x0007);
3304 rtl_writephy(tp, 0x1e, 0x002d);
3305 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3306 rtl_writephy(tp, 0x1f, 0x0000);
3307 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3309 /* Improve 10M EEE waveform */
3310 rtl_writephy(tp, 0x1f, 0x0005);
3311 rtl_writephy(tp, 0x05, 0x8b86);
3312 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3313 rtl_writephy(tp, 0x1f, 0x0000);
3316 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3318 static const struct phy_reg phy_reg_init[] = {
3319 /* Channel estimation fine tune */
3324 /* Modify green table for giga & fnet */
3341 /* Modify green table for 10M */
3347 /* Disable hiimpedance detection (RTCT) */
3353 rtl_apply_firmware(tp);
3355 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3357 rtl8168f_hw_phy_config(tp);
3359 /* Improve 2-pair detection performance */
3360 rtl_writephy(tp, 0x1f, 0x0005);
3361 rtl_writephy(tp, 0x05, 0x8b85);
3362 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3363 rtl_writephy(tp, 0x1f, 0x0000);
3366 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3368 rtl_apply_firmware(tp);
3370 rtl8168f_hw_phy_config(tp);
3373 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3375 static const struct phy_reg phy_reg_init[] = {
3376 /* Channel estimation fine tune */
3381 /* Modify green table for giga & fnet */
3398 /* Modify green table for 10M */
3404 /* Disable hiimpedance detection (RTCT) */
3411 rtl_apply_firmware(tp);
3413 rtl8168f_hw_phy_config(tp);
3415 /* Improve 2-pair detection performance */
3416 rtl_writephy(tp, 0x1f, 0x0005);
3417 rtl_writephy(tp, 0x05, 0x8b85);
3418 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3419 rtl_writephy(tp, 0x1f, 0x0000);
3421 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3423 /* Modify green table for giga */
3424 rtl_writephy(tp, 0x1f, 0x0005);
3425 rtl_writephy(tp, 0x05, 0x8b54);
3426 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3427 rtl_writephy(tp, 0x05, 0x8b5d);
3428 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3429 rtl_writephy(tp, 0x05, 0x8a7c);
3430 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3431 rtl_writephy(tp, 0x05, 0x8a7f);
3432 rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3433 rtl_writephy(tp, 0x05, 0x8a82);
3434 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3435 rtl_writephy(tp, 0x05, 0x8a85);
3436 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3437 rtl_writephy(tp, 0x05, 0x8a88);
3438 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3439 rtl_writephy(tp, 0x1f, 0x0000);
3441 /* uc same-seed solution */
3442 rtl_writephy(tp, 0x1f, 0x0005);
3443 rtl_writephy(tp, 0x05, 0x8b85);
3444 rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3445 rtl_writephy(tp, 0x1f, 0x0000);
3448 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3449 rtl_writephy(tp, 0x1f, 0x0005);
3450 rtl_writephy(tp, 0x05, 0x8b85);
3451 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3452 rtl_writephy(tp, 0x1f, 0x0004);
3453 rtl_writephy(tp, 0x1f, 0x0007);
3454 rtl_writephy(tp, 0x1e, 0x0020);
3455 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3456 rtl_writephy(tp, 0x1f, 0x0000);
3457 rtl_writephy(tp, 0x0d, 0x0007);
3458 rtl_writephy(tp, 0x0e, 0x003c);
3459 rtl_writephy(tp, 0x0d, 0x4007);
3460 rtl_writephy(tp, 0x0e, 0x0000);
3461 rtl_writephy(tp, 0x0d, 0x0000);
3464 rtl_writephy(tp, 0x1f, 0x0003);
3465 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3466 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3467 rtl_writephy(tp, 0x1f, 0x0000);
3470 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3472 rtl_apply_firmware(tp);
3474 rtl_writephy(tp, 0x1f, 0x0a46);
3475 if (rtl_readphy(tp, 0x10) & 0x0100) {
3476 rtl_writephy(tp, 0x1f, 0x0bcc);
3477 rtl_w1w0_phy(tp, 0x12, 0x0000, 0x8000);
3479 rtl_writephy(tp, 0x1f, 0x0bcc);
3480 rtl_w1w0_phy(tp, 0x12, 0x8000, 0x0000);
3483 rtl_writephy(tp, 0x1f, 0x0a46);
3484 if (rtl_readphy(tp, 0x13) & 0x0100) {
3485 rtl_writephy(tp, 0x1f, 0x0c41);
3486 rtl_w1w0_phy(tp, 0x15, 0x0002, 0x0000);
3488 rtl_writephy(tp, 0x1f, 0x0c41);
3489 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0002);
3492 /* Enable PHY auto speed down */
3493 rtl_writephy(tp, 0x1f, 0x0a44);
3494 rtl_w1w0_phy(tp, 0x11, 0x000c, 0x0000);
3496 rtl_writephy(tp, 0x1f, 0x0bcc);
3497 rtl_w1w0_phy(tp, 0x14, 0x0100, 0x0000);
3498 rtl_writephy(tp, 0x1f, 0x0a44);
3499 rtl_w1w0_phy(tp, 0x11, 0x00c0, 0x0000);
3500 rtl_writephy(tp, 0x1f, 0x0a43);
3501 rtl_writephy(tp, 0x13, 0x8084);
3502 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x6000);
3503 rtl_w1w0_phy(tp, 0x10, 0x1003, 0x0000);
3505 /* EEE auto-fallback function */
3506 rtl_writephy(tp, 0x1f, 0x0a4b);
3507 rtl_w1w0_phy(tp, 0x11, 0x0004, 0x0000);
3509 /* Enable UC LPF tune function */
3510 rtl_writephy(tp, 0x1f, 0x0a43);
3511 rtl_writephy(tp, 0x13, 0x8012);
3512 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3514 rtl_writephy(tp, 0x1f, 0x0c42);
3515 rtl_w1w0_phy(tp, 0x11, 0x4000, 0x2000);
3517 /* Improve SWR Efficiency */
3518 rtl_writephy(tp, 0x1f, 0x0bcd);
3519 rtl_writephy(tp, 0x14, 0x5065);
3520 rtl_writephy(tp, 0x14, 0xd065);
3521 rtl_writephy(tp, 0x1f, 0x0bc8);
3522 rtl_writephy(tp, 0x11, 0x5655);
3523 rtl_writephy(tp, 0x1f, 0x0bcd);
3524 rtl_writephy(tp, 0x14, 0x1065);
3525 rtl_writephy(tp, 0x14, 0x9065);
3526 rtl_writephy(tp, 0x14, 0x1065);
3528 /* Check ALDPS bit, disable it if enabled */
3529 rtl_writephy(tp, 0x1f, 0x0a43);
3530 if (rtl_readphy(tp, 0x10) & 0x0004)
3531 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004);
3533 rtl_writephy(tp, 0x1f, 0x0000);
3536 static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
3538 rtl_apply_firmware(tp);
3541 static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
3546 rtl_apply_firmware(tp);
3548 /* CHN EST parameters adjust - giga master */
3549 rtl_writephy(tp, 0x1f, 0x0a43);
3550 rtl_writephy(tp, 0x13, 0x809b);
3551 rtl_w1w0_phy(tp, 0x14, 0x8000, 0xf800);
3552 rtl_writephy(tp, 0x13, 0x80a2);
3553 rtl_w1w0_phy(tp, 0x14, 0x8000, 0xff00);
3554 rtl_writephy(tp, 0x13, 0x80a4);
3555 rtl_w1w0_phy(tp, 0x14, 0x8500, 0xff00);
3556 rtl_writephy(tp, 0x13, 0x809c);
3557 rtl_w1w0_phy(tp, 0x14, 0xbd00, 0xff00);
3558 rtl_writephy(tp, 0x1f, 0x0000);
3560 /* CHN EST parameters adjust - giga slave */
3561 rtl_writephy(tp, 0x1f, 0x0a43);
3562 rtl_writephy(tp, 0x13, 0x80ad);
3563 rtl_w1w0_phy(tp, 0x14, 0x7000, 0xf800);
3564 rtl_writephy(tp, 0x13, 0x80b4);
3565 rtl_w1w0_phy(tp, 0x14, 0x5000, 0xff00);
3566 rtl_writephy(tp, 0x13, 0x80ac);
3567 rtl_w1w0_phy(tp, 0x14, 0x4000, 0xff00);
3568 rtl_writephy(tp, 0x1f, 0x0000);
3570 /* CHN EST parameters adjust - fnet */
3571 rtl_writephy(tp, 0x1f, 0x0a43);
3572 rtl_writephy(tp, 0x13, 0x808e);
3573 rtl_w1w0_phy(tp, 0x14, 0x1200, 0xff00);
3574 rtl_writephy(tp, 0x13, 0x8090);
3575 rtl_w1w0_phy(tp, 0x14, 0xe500, 0xff00);
3576 rtl_writephy(tp, 0x13, 0x8092);
3577 rtl_w1w0_phy(tp, 0x14, 0x9f00, 0xff00);
3578 rtl_writephy(tp, 0x1f, 0x0000);
3580 /* enable R-tune & PGA-retune function */
3582 rtl_writephy(tp, 0x1f, 0x0a46);
3583 data = rtl_readphy(tp, 0x13);
3586 dout_tapbin |= data;
3587 data = rtl_readphy(tp, 0x12);
3590 dout_tapbin |= data;
3591 dout_tapbin = ~(dout_tapbin^0x08);
3593 dout_tapbin &= 0xf000;
3594 rtl_writephy(tp, 0x1f, 0x0a43);
3595 rtl_writephy(tp, 0x13, 0x827a);
3596 rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000);
3597 rtl_writephy(tp, 0x13, 0x827b);
3598 rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000);
3599 rtl_writephy(tp, 0x13, 0x827c);
3600 rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000);
3601 rtl_writephy(tp, 0x13, 0x827d);
3602 rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000);
3604 rtl_writephy(tp, 0x1f, 0x0a43);
3605 rtl_writephy(tp, 0x13, 0x0811);
3606 rtl_w1w0_phy(tp, 0x14, 0x0800, 0x0000);
3607 rtl_writephy(tp, 0x1f, 0x0a42);
3608 rtl_w1w0_phy(tp, 0x16, 0x0002, 0x0000);
3609 rtl_writephy(tp, 0x1f, 0x0000);
3611 /* enable GPHY 10M */
3612 rtl_writephy(tp, 0x1f, 0x0a44);
3613 rtl_w1w0_phy(tp, 0x11, 0x0800, 0x0000);
3614 rtl_writephy(tp, 0x1f, 0x0000);
3616 /* SAR ADC performance */
3617 rtl_writephy(tp, 0x1f, 0x0bca);
3618 rtl_w1w0_phy(tp, 0x17, 0x4000, 0x3000);
3619 rtl_writephy(tp, 0x1f, 0x0000);
3621 rtl_writephy(tp, 0x1f, 0x0a43);
3622 rtl_writephy(tp, 0x13, 0x803f);
3623 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
3624 rtl_writephy(tp, 0x13, 0x8047);
3625 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
3626 rtl_writephy(tp, 0x13, 0x804f);
3627 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
3628 rtl_writephy(tp, 0x13, 0x8057);
3629 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
3630 rtl_writephy(tp, 0x13, 0x805f);
3631 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
3632 rtl_writephy(tp, 0x13, 0x8067);
3633 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
3634 rtl_writephy(tp, 0x13, 0x806f);
3635 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
3636 rtl_writephy(tp, 0x1f, 0x0000);
3638 /* disable phy pfm mode */
3639 rtl_writephy(tp, 0x1f, 0x0a44);
3640 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x0080);
3641 rtl_writephy(tp, 0x1f, 0x0000);
3643 /* Check ALDPS bit, disable it if enabled */
3644 rtl_writephy(tp, 0x1f, 0x0a43);
3645 if (rtl_readphy(tp, 0x10) & 0x0004)
3646 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004);
3648 rtl_writephy(tp, 0x1f, 0x0000);
3651 static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
3653 u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0;
3657 rtl_apply_firmware(tp);
3659 /* CHIN EST parameter update */
3660 rtl_writephy(tp, 0x1f, 0x0a43);
3661 rtl_writephy(tp, 0x13, 0x808a);
3662 rtl_w1w0_phy(tp, 0x14, 0x000a, 0x003f);
3663 rtl_writephy(tp, 0x1f, 0x0000);
3665 /* enable R-tune & PGA-retune function */
3666 rtl_writephy(tp, 0x1f, 0x0a43);
3667 rtl_writephy(tp, 0x13, 0x0811);
3668 rtl_w1w0_phy(tp, 0x14, 0x0800, 0x0000);
3669 rtl_writephy(tp, 0x1f, 0x0a42);
3670 rtl_w1w0_phy(tp, 0x16, 0x0002, 0x0000);
3671 rtl_writephy(tp, 0x1f, 0x0000);
3673 /* enable GPHY 10M */
3674 rtl_writephy(tp, 0x1f, 0x0a44);
3675 rtl_w1w0_phy(tp, 0x11, 0x0800, 0x0000);
3676 rtl_writephy(tp, 0x1f, 0x0000);
3678 r8168_mac_ocp_write(tp, 0xdd02, 0x807d);
3679 data = r8168_mac_ocp_read(tp, 0xdd02);
3680 ioffset_p3 = ((data & 0x80)>>7);
3683 data = r8168_mac_ocp_read(tp, 0xdd00);
3684 ioffset_p3 |= ((data & (0xe000))>>13);
3685 ioffset_p2 = ((data & (0x1e00))>>9);
3686 ioffset_p1 = ((data & (0x01e0))>>5);
3687 ioffset_p0 = ((data & 0x0010)>>4);
3689 ioffset_p0 |= (data & (0x07));
3690 data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0);
3692 if ((ioffset_p3 != 0x0F) || (ioffset_p2 != 0x0F) ||
3693 (ioffset_p1 != 0x0F) || (ioffset_p0 == 0x0F)) {
3694 rtl_writephy(tp, 0x1f, 0x0bcf);
3695 rtl_writephy(tp, 0x16, data);
3696 rtl_writephy(tp, 0x1f, 0x0000);
3699 /* Modify rlen (TX LPF corner frequency) level */
3700 rtl_writephy(tp, 0x1f, 0x0bcd);
3701 data = rtl_readphy(tp, 0x16);
3706 data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12);
3707 rtl_writephy(tp, 0x17, data);
3708 rtl_writephy(tp, 0x1f, 0x0bcd);
3709 rtl_writephy(tp, 0x1f, 0x0000);
3711 /* disable phy pfm mode */
3712 rtl_writephy(tp, 0x1f, 0x0a44);
3713 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x0080);
3714 rtl_writephy(tp, 0x1f, 0x0000);
3716 /* Check ALDPS bit, disable it if enabled */
3717 rtl_writephy(tp, 0x1f, 0x0a43);
3718 if (rtl_readphy(tp, 0x10) & 0x0004)
3719 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004);
3721 rtl_writephy(tp, 0x1f, 0x0000);
3724 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3726 static const struct phy_reg phy_reg_init[] = {
3733 rtl_writephy(tp, 0x1f, 0x0000);
3734 rtl_patchphy(tp, 0x11, 1 << 12);
3735 rtl_patchphy(tp, 0x19, 1 << 13);
3736 rtl_patchphy(tp, 0x10, 1 << 15);
3738 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3741 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3743 static const struct phy_reg phy_reg_init[] = {
3757 /* Disable ALDPS before ram code */
3758 rtl_writephy(tp, 0x1f, 0x0000);
3759 rtl_writephy(tp, 0x18, 0x0310);
3762 rtl_apply_firmware(tp);
3764 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3767 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3769 /* Disable ALDPS before setting firmware */
3770 rtl_writephy(tp, 0x1f, 0x0000);
3771 rtl_writephy(tp, 0x18, 0x0310);
3774 rtl_apply_firmware(tp);
3777 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3778 rtl_writephy(tp, 0x1f, 0x0004);
3779 rtl_writephy(tp, 0x10, 0x401f);
3780 rtl_writephy(tp, 0x19, 0x7030);
3781 rtl_writephy(tp, 0x1f, 0x0000);
3784 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3786 static const struct phy_reg phy_reg_init[] = {
3793 /* Disable ALDPS before ram code */
3794 rtl_writephy(tp, 0x1f, 0x0000);
3795 rtl_writephy(tp, 0x18, 0x0310);
3798 rtl_apply_firmware(tp);
3800 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3801 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3803 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3806 static void rtl_hw_phy_config(struct net_device *dev)
3808 struct rtl8169_private *tp = netdev_priv(dev);
3810 rtl8169_print_mac_version(tp);
3812 switch (tp->mac_version) {
3813 case RTL_GIGA_MAC_VER_01:
3815 case RTL_GIGA_MAC_VER_02:
3816 case RTL_GIGA_MAC_VER_03:
3817 rtl8169s_hw_phy_config(tp);
3819 case RTL_GIGA_MAC_VER_04:
3820 rtl8169sb_hw_phy_config(tp);
3822 case RTL_GIGA_MAC_VER_05:
3823 rtl8169scd_hw_phy_config(tp);
3825 case RTL_GIGA_MAC_VER_06:
3826 rtl8169sce_hw_phy_config(tp);
3828 case RTL_GIGA_MAC_VER_07:
3829 case RTL_GIGA_MAC_VER_08:
3830 case RTL_GIGA_MAC_VER_09:
3831 rtl8102e_hw_phy_config(tp);
3833 case RTL_GIGA_MAC_VER_11:
3834 rtl8168bb_hw_phy_config(tp);
3836 case RTL_GIGA_MAC_VER_12:
3837 rtl8168bef_hw_phy_config(tp);
3839 case RTL_GIGA_MAC_VER_17:
3840 rtl8168bef_hw_phy_config(tp);
3842 case RTL_GIGA_MAC_VER_18:
3843 rtl8168cp_1_hw_phy_config(tp);
3845 case RTL_GIGA_MAC_VER_19:
3846 rtl8168c_1_hw_phy_config(tp);
3848 case RTL_GIGA_MAC_VER_20:
3849 rtl8168c_2_hw_phy_config(tp);
3851 case RTL_GIGA_MAC_VER_21:
3852 rtl8168c_3_hw_phy_config(tp);
3854 case RTL_GIGA_MAC_VER_22:
3855 rtl8168c_4_hw_phy_config(tp);
3857 case RTL_GIGA_MAC_VER_23:
3858 case RTL_GIGA_MAC_VER_24:
3859 rtl8168cp_2_hw_phy_config(tp);
3861 case RTL_GIGA_MAC_VER_25:
3862 rtl8168d_1_hw_phy_config(tp);
3864 case RTL_GIGA_MAC_VER_26:
3865 rtl8168d_2_hw_phy_config(tp);
3867 case RTL_GIGA_MAC_VER_27:
3868 rtl8168d_3_hw_phy_config(tp);
3870 case RTL_GIGA_MAC_VER_28:
3871 rtl8168d_4_hw_phy_config(tp);
3873 case RTL_GIGA_MAC_VER_29:
3874 case RTL_GIGA_MAC_VER_30:
3875 rtl8105e_hw_phy_config(tp);
3877 case RTL_GIGA_MAC_VER_31:
3880 case RTL_GIGA_MAC_VER_32:
3881 case RTL_GIGA_MAC_VER_33:
3882 rtl8168e_1_hw_phy_config(tp);
3884 case RTL_GIGA_MAC_VER_34:
3885 rtl8168e_2_hw_phy_config(tp);
3887 case RTL_GIGA_MAC_VER_35:
3888 rtl8168f_1_hw_phy_config(tp);
3890 case RTL_GIGA_MAC_VER_36:
3891 rtl8168f_2_hw_phy_config(tp);
3894 case RTL_GIGA_MAC_VER_37:
3895 rtl8402_hw_phy_config(tp);
3898 case RTL_GIGA_MAC_VER_38:
3899 rtl8411_hw_phy_config(tp);
3902 case RTL_GIGA_MAC_VER_39:
3903 rtl8106e_hw_phy_config(tp);
3906 case RTL_GIGA_MAC_VER_40:
3907 rtl8168g_1_hw_phy_config(tp);
3909 case RTL_GIGA_MAC_VER_42:
3910 case RTL_GIGA_MAC_VER_43:
3911 case RTL_GIGA_MAC_VER_44:
3912 rtl8168g_2_hw_phy_config(tp);
3914 case RTL_GIGA_MAC_VER_45:
3915 case RTL_GIGA_MAC_VER_47:
3916 rtl8168h_1_hw_phy_config(tp);
3918 case RTL_GIGA_MAC_VER_46:
3919 case RTL_GIGA_MAC_VER_48:
3920 rtl8168h_2_hw_phy_config(tp);
3923 case RTL_GIGA_MAC_VER_41:
3929 static void rtl_phy_work(struct rtl8169_private *tp)
3931 struct timer_list *timer = &tp->timer;
3932 void __iomem *ioaddr = tp->mmio_addr;
3933 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3935 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3937 if (tp->phy_reset_pending(tp)) {
3939 * A busy loop could burn quite a few cycles on nowadays CPU.
3940 * Let's delay the execution of the timer for a few ticks.
3946 if (tp->link_ok(ioaddr))
3949 netif_dbg(tp, link, tp->dev, "PHY reset until link up\n");
3951 tp->phy_reset_enable(tp);
3954 mod_timer(timer, jiffies + timeout);
3957 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3959 if (!test_and_set_bit(flag, tp->wk.flags))
3960 schedule_work(&tp->wk.work);
3963 static void rtl8169_phy_timer(unsigned long __opaque)
3965 struct net_device *dev = (struct net_device *)__opaque;
3966 struct rtl8169_private *tp = netdev_priv(dev);
3968 rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3971 static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3972 void __iomem *ioaddr)
3975 pci_release_regions(pdev);
3976 pci_clear_mwi(pdev);
3977 pci_disable_device(pdev);
3981 DECLARE_RTL_COND(rtl_phy_reset_cond)
3983 return tp->phy_reset_pending(tp);
3986 static void rtl8169_phy_reset(struct net_device *dev,
3987 struct rtl8169_private *tp)
3989 tp->phy_reset_enable(tp);
3990 rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
3993 static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3995 void __iomem *ioaddr = tp->mmio_addr;
3997 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3998 (RTL_R8(PHYstatus) & TBI_Enable);
4001 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
4003 void __iomem *ioaddr = tp->mmio_addr;
4005 rtl_hw_phy_config(dev);
4007 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
4008 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
4012 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
4014 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
4015 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
4017 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
4018 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
4020 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
4021 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
4024 rtl8169_phy_reset(dev, tp);
4026 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
4027 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4028 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
4029 (tp->mii.supports_gmii ?
4030 ADVERTISED_1000baseT_Half |
4031 ADVERTISED_1000baseT_Full : 0));
4033 if (rtl_tbi_enabled(tp))
4034 netif_info(tp, link, dev, "TBI auto-negotiating\n");
4037 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
4039 void __iomem *ioaddr = tp->mmio_addr;
4043 RTL_W8(Cfg9346, Cfg9346_Unlock);
4045 RTL_W32(MAC4, addr[4] | addr[5] << 8);
4048 RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
4051 if (tp->mac_version == RTL_GIGA_MAC_VER_34)
4052 rtl_rar_exgmac_set(tp, addr);
4054 RTL_W8(Cfg9346, Cfg9346_Lock);
4056 rtl_unlock_work(tp);
4059 static int rtl_set_mac_address(struct net_device *dev, void *p)
4061 struct rtl8169_private *tp = netdev_priv(dev);
4062 struct sockaddr *addr = p;
4064 if (!is_valid_ether_addr(addr->sa_data))
4065 return -EADDRNOTAVAIL;
4067 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4069 rtl_rar_set(tp, dev->dev_addr);
4074 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4076 struct rtl8169_private *tp = netdev_priv(dev);
4077 struct mii_ioctl_data *data = if_mii(ifr);
4079 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
4082 static int rtl_xmii_ioctl(struct rtl8169_private *tp,
4083 struct mii_ioctl_data *data, int cmd)
4087 data->phy_id = 32; /* Internal PHY */
4091 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
4095 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
4101 static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
4106 static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
4108 if (tp->features & RTL_FEATURE_MSI) {
4109 pci_disable_msi(pdev);
4110 tp->features &= ~RTL_FEATURE_MSI;
4114 static void rtl_init_mdio_ops(struct rtl8169_private *tp)
4116 struct mdio_ops *ops = &tp->mdio_ops;
4118 switch (tp->mac_version) {
4119 case RTL_GIGA_MAC_VER_27:
4120 ops->write = r8168dp_1_mdio_write;
4121 ops->read = r8168dp_1_mdio_read;
4123 case RTL_GIGA_MAC_VER_28:
4124 case RTL_GIGA_MAC_VER_31:
4125 ops->write = r8168dp_2_mdio_write;
4126 ops->read = r8168dp_2_mdio_read;
4128 case RTL_GIGA_MAC_VER_40:
4129 case RTL_GIGA_MAC_VER_41:
4130 case RTL_GIGA_MAC_VER_42:
4131 case RTL_GIGA_MAC_VER_43:
4132 case RTL_GIGA_MAC_VER_44:
4133 case RTL_GIGA_MAC_VER_45:
4134 case RTL_GIGA_MAC_VER_46:
4135 case RTL_GIGA_MAC_VER_47:
4136 case RTL_GIGA_MAC_VER_48:
4137 ops->write = r8168g_mdio_write;
4138 ops->read = r8168g_mdio_read;
4141 ops->write = r8169_mdio_write;
4142 ops->read = r8169_mdio_read;
4147 static void rtl_speed_down(struct rtl8169_private *tp)
4152 rtl_writephy(tp, 0x1f, 0x0000);
4153 lpa = rtl_readphy(tp, MII_LPA);
4155 if (lpa & (LPA_10HALF | LPA_10FULL))
4156 adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
4157 else if (lpa & (LPA_100HALF | LPA_100FULL))
4158 adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4159 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4161 adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4162 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
4163 (tp->mii.supports_gmii ?
4164 ADVERTISED_1000baseT_Half |
4165 ADVERTISED_1000baseT_Full : 0);
4167 rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
4171 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
4173 void __iomem *ioaddr = tp->mmio_addr;
4175 switch (tp->mac_version) {
4176 case RTL_GIGA_MAC_VER_25:
4177 case RTL_GIGA_MAC_VER_26:
4178 case RTL_GIGA_MAC_VER_29:
4179 case RTL_GIGA_MAC_VER_30:
4180 case RTL_GIGA_MAC_VER_32:
4181 case RTL_GIGA_MAC_VER_33:
4182 case RTL_GIGA_MAC_VER_34:
4183 case RTL_GIGA_MAC_VER_37:
4184 case RTL_GIGA_MAC_VER_38:
4185 case RTL_GIGA_MAC_VER_39:
4186 case RTL_GIGA_MAC_VER_40:
4187 case RTL_GIGA_MAC_VER_41:
4188 case RTL_GIGA_MAC_VER_42:
4189 case RTL_GIGA_MAC_VER_43:
4190 case RTL_GIGA_MAC_VER_44:
4191 case RTL_GIGA_MAC_VER_45:
4192 case RTL_GIGA_MAC_VER_46:
4193 case RTL_GIGA_MAC_VER_47:
4194 case RTL_GIGA_MAC_VER_48:
4195 RTL_W32(RxConfig, RTL_R32(RxConfig) |
4196 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
4203 static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
4205 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
4209 rtl_wol_suspend_quirk(tp);
4214 static void r810x_phy_power_down(struct rtl8169_private *tp)
4216 rtl_writephy(tp, 0x1f, 0x0000);
4217 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
4220 static void r810x_phy_power_up(struct rtl8169_private *tp)
4222 rtl_writephy(tp, 0x1f, 0x0000);
4223 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
4226 static void r810x_pll_power_down(struct rtl8169_private *tp)
4228 void __iomem *ioaddr = tp->mmio_addr;
4230 if (rtl_wol_pll_power_down(tp))
4233 r810x_phy_power_down(tp);
4235 switch (tp->mac_version) {
4236 case RTL_GIGA_MAC_VER_07:
4237 case RTL_GIGA_MAC_VER_08:
4238 case RTL_GIGA_MAC_VER_09:
4239 case RTL_GIGA_MAC_VER_10:
4240 case RTL_GIGA_MAC_VER_13:
4241 case RTL_GIGA_MAC_VER_16:
4244 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4249 static void r810x_pll_power_up(struct rtl8169_private *tp)
4251 void __iomem *ioaddr = tp->mmio_addr;
4253 r810x_phy_power_up(tp);
4255 switch (tp->mac_version) {
4256 case RTL_GIGA_MAC_VER_07:
4257 case RTL_GIGA_MAC_VER_08:
4258 case RTL_GIGA_MAC_VER_09:
4259 case RTL_GIGA_MAC_VER_10:
4260 case RTL_GIGA_MAC_VER_13:
4261 case RTL_GIGA_MAC_VER_16:
4263 case RTL_GIGA_MAC_VER_47:
4264 case RTL_GIGA_MAC_VER_48:
4265 RTL_W8(PMCH, RTL_R8(PMCH) | 0xC0);
4268 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4273 static void r8168_phy_power_up(struct rtl8169_private *tp)
4275 rtl_writephy(tp, 0x1f, 0x0000);
4276 switch (tp->mac_version) {
4277 case RTL_GIGA_MAC_VER_11:
4278 case RTL_GIGA_MAC_VER_12:
4279 case RTL_GIGA_MAC_VER_17:
4280 case RTL_GIGA_MAC_VER_18:
4281 case RTL_GIGA_MAC_VER_19:
4282 case RTL_GIGA_MAC_VER_20:
4283 case RTL_GIGA_MAC_VER_21:
4284 case RTL_GIGA_MAC_VER_22:
4285 case RTL_GIGA_MAC_VER_23:
4286 case RTL_GIGA_MAC_VER_24:
4287 case RTL_GIGA_MAC_VER_25:
4288 case RTL_GIGA_MAC_VER_26:
4289 case RTL_GIGA_MAC_VER_27:
4290 case RTL_GIGA_MAC_VER_28:
4291 case RTL_GIGA_MAC_VER_31:
4292 rtl_writephy(tp, 0x0e, 0x0000);
4297 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
4300 static void r8168_phy_power_down(struct rtl8169_private *tp)
4302 rtl_writephy(tp, 0x1f, 0x0000);
4303 switch (tp->mac_version) {
4304 case RTL_GIGA_MAC_VER_32:
4305 case RTL_GIGA_MAC_VER_33:
4306 case RTL_GIGA_MAC_VER_40:
4307 case RTL_GIGA_MAC_VER_41:
4308 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
4311 case RTL_GIGA_MAC_VER_11:
4312 case RTL_GIGA_MAC_VER_12:
4313 case RTL_GIGA_MAC_VER_17:
4314 case RTL_GIGA_MAC_VER_18:
4315 case RTL_GIGA_MAC_VER_19:
4316 case RTL_GIGA_MAC_VER_20:
4317 case RTL_GIGA_MAC_VER_21:
4318 case RTL_GIGA_MAC_VER_22:
4319 case RTL_GIGA_MAC_VER_23:
4320 case RTL_GIGA_MAC_VER_24:
4321 case RTL_GIGA_MAC_VER_25:
4322 case RTL_GIGA_MAC_VER_26:
4323 case RTL_GIGA_MAC_VER_27:
4324 case RTL_GIGA_MAC_VER_28:
4325 case RTL_GIGA_MAC_VER_31:
4326 rtl_writephy(tp, 0x0e, 0x0200);
4328 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
4333 static void r8168_pll_power_down(struct rtl8169_private *tp)
4335 void __iomem *ioaddr = tp->mmio_addr;
4337 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4338 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4339 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
4340 r8168dp_check_dash(tp)) {
4344 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
4345 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
4346 (RTL_R16(CPlusCmd) & ASF)) {
4350 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
4351 tp->mac_version == RTL_GIGA_MAC_VER_33)
4352 rtl_ephy_write(tp, 0x19, 0xff64);
4354 if (rtl_wol_pll_power_down(tp))
4357 r8168_phy_power_down(tp);
4359 switch (tp->mac_version) {
4360 case RTL_GIGA_MAC_VER_25:
4361 case RTL_GIGA_MAC_VER_26:
4362 case RTL_GIGA_MAC_VER_27:
4363 case RTL_GIGA_MAC_VER_28:
4364 case RTL_GIGA_MAC_VER_31:
4365 case RTL_GIGA_MAC_VER_32:
4366 case RTL_GIGA_MAC_VER_33:
4367 case RTL_GIGA_MAC_VER_45:
4368 case RTL_GIGA_MAC_VER_46:
4369 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4371 case RTL_GIGA_MAC_VER_40:
4372 case RTL_GIGA_MAC_VER_41:
4373 rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000,
4374 0xfc000000, ERIAR_EXGMAC);
4379 static void r8168_pll_power_up(struct rtl8169_private *tp)
4381 void __iomem *ioaddr = tp->mmio_addr;
4383 switch (tp->mac_version) {
4384 case RTL_GIGA_MAC_VER_25:
4385 case RTL_GIGA_MAC_VER_26:
4386 case RTL_GIGA_MAC_VER_27:
4387 case RTL_GIGA_MAC_VER_28:
4388 case RTL_GIGA_MAC_VER_31:
4389 case RTL_GIGA_MAC_VER_32:
4390 case RTL_GIGA_MAC_VER_33:
4391 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4393 case RTL_GIGA_MAC_VER_45:
4394 case RTL_GIGA_MAC_VER_46:
4395 RTL_W8(PMCH, RTL_R8(PMCH) | 0xC0);
4397 case RTL_GIGA_MAC_VER_40:
4398 case RTL_GIGA_MAC_VER_41:
4399 rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000,
4400 0x00000000, ERIAR_EXGMAC);
4404 r8168_phy_power_up(tp);
4407 static void rtl_generic_op(struct rtl8169_private *tp,
4408 void (*op)(struct rtl8169_private *))
4414 static void rtl_pll_power_down(struct rtl8169_private *tp)
4416 rtl_generic_op(tp, tp->pll_power_ops.down);
4419 static void rtl_pll_power_up(struct rtl8169_private *tp)
4421 rtl_generic_op(tp, tp->pll_power_ops.up);
4424 static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4426 struct pll_power_ops *ops = &tp->pll_power_ops;
4428 switch (tp->mac_version) {
4429 case RTL_GIGA_MAC_VER_07:
4430 case RTL_GIGA_MAC_VER_08:
4431 case RTL_GIGA_MAC_VER_09:
4432 case RTL_GIGA_MAC_VER_10:
4433 case RTL_GIGA_MAC_VER_16:
4434 case RTL_GIGA_MAC_VER_29:
4435 case RTL_GIGA_MAC_VER_30:
4436 case RTL_GIGA_MAC_VER_37:
4437 case RTL_GIGA_MAC_VER_39:
4438 case RTL_GIGA_MAC_VER_43:
4439 case RTL_GIGA_MAC_VER_47:
4440 case RTL_GIGA_MAC_VER_48:
4441 ops->down = r810x_pll_power_down;
4442 ops->up = r810x_pll_power_up;
4445 case RTL_GIGA_MAC_VER_11:
4446 case RTL_GIGA_MAC_VER_12:
4447 case RTL_GIGA_MAC_VER_17:
4448 case RTL_GIGA_MAC_VER_18:
4449 case RTL_GIGA_MAC_VER_19:
4450 case RTL_GIGA_MAC_VER_20:
4451 case RTL_GIGA_MAC_VER_21:
4452 case RTL_GIGA_MAC_VER_22:
4453 case RTL_GIGA_MAC_VER_23:
4454 case RTL_GIGA_MAC_VER_24:
4455 case RTL_GIGA_MAC_VER_25:
4456 case RTL_GIGA_MAC_VER_26:
4457 case RTL_GIGA_MAC_VER_27:
4458 case RTL_GIGA_MAC_VER_28:
4459 case RTL_GIGA_MAC_VER_31:
4460 case RTL_GIGA_MAC_VER_32:
4461 case RTL_GIGA_MAC_VER_33:
4462 case RTL_GIGA_MAC_VER_34:
4463 case RTL_GIGA_MAC_VER_35:
4464 case RTL_GIGA_MAC_VER_36:
4465 case RTL_GIGA_MAC_VER_38:
4466 case RTL_GIGA_MAC_VER_40:
4467 case RTL_GIGA_MAC_VER_41:
4468 case RTL_GIGA_MAC_VER_42:
4469 case RTL_GIGA_MAC_VER_44:
4470 case RTL_GIGA_MAC_VER_45:
4471 case RTL_GIGA_MAC_VER_46:
4472 ops->down = r8168_pll_power_down;
4473 ops->up = r8168_pll_power_up;
4483 static void rtl_init_rxcfg(struct rtl8169_private *tp)
4485 void __iomem *ioaddr = tp->mmio_addr;
4487 switch (tp->mac_version) {
4488 case RTL_GIGA_MAC_VER_01:
4489 case RTL_GIGA_MAC_VER_02:
4490 case RTL_GIGA_MAC_VER_03:
4491 case RTL_GIGA_MAC_VER_04:
4492 case RTL_GIGA_MAC_VER_05:
4493 case RTL_GIGA_MAC_VER_06:
4494 case RTL_GIGA_MAC_VER_10:
4495 case RTL_GIGA_MAC_VER_11:
4496 case RTL_GIGA_MAC_VER_12:
4497 case RTL_GIGA_MAC_VER_13:
4498 case RTL_GIGA_MAC_VER_14:
4499 case RTL_GIGA_MAC_VER_15:
4500 case RTL_GIGA_MAC_VER_16:
4501 case RTL_GIGA_MAC_VER_17:
4502 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
4504 case RTL_GIGA_MAC_VER_18:
4505 case RTL_GIGA_MAC_VER_19:
4506 case RTL_GIGA_MAC_VER_20:
4507 case RTL_GIGA_MAC_VER_21:
4508 case RTL_GIGA_MAC_VER_22:
4509 case RTL_GIGA_MAC_VER_23:
4510 case RTL_GIGA_MAC_VER_24:
4511 case RTL_GIGA_MAC_VER_34:
4512 case RTL_GIGA_MAC_VER_35:
4513 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4515 case RTL_GIGA_MAC_VER_40:
4516 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
4518 case RTL_GIGA_MAC_VER_41:
4519 case RTL_GIGA_MAC_VER_42:
4520 case RTL_GIGA_MAC_VER_43:
4521 case RTL_GIGA_MAC_VER_44:
4522 case RTL_GIGA_MAC_VER_45:
4523 case RTL_GIGA_MAC_VER_46:
4524 case RTL_GIGA_MAC_VER_47:
4525 case RTL_GIGA_MAC_VER_48:
4526 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
4529 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
4534 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
4536 tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
4539 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
4541 void __iomem *ioaddr = tp->mmio_addr;
4543 RTL_W8(Cfg9346, Cfg9346_Unlock);
4544 rtl_generic_op(tp, tp->jumbo_ops.enable);
4545 RTL_W8(Cfg9346, Cfg9346_Lock);
4548 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
4550 void __iomem *ioaddr = tp->mmio_addr;
4552 RTL_W8(Cfg9346, Cfg9346_Unlock);
4553 rtl_generic_op(tp, tp->jumbo_ops.disable);
4554 RTL_W8(Cfg9346, Cfg9346_Lock);
4557 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
4559 void __iomem *ioaddr = tp->mmio_addr;
4561 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4562 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
4563 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4566 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
4568 void __iomem *ioaddr = tp->mmio_addr;
4570 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4571 RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
4572 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4575 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
4577 void __iomem *ioaddr = tp->mmio_addr;
4579 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4582 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
4584 void __iomem *ioaddr = tp->mmio_addr;
4586 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4589 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
4591 void __iomem *ioaddr = tp->mmio_addr;
4593 RTL_W8(MaxTxPacketSize, 0x3f);
4594 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4595 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4596 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4599 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
4601 void __iomem *ioaddr = tp->mmio_addr;
4603 RTL_W8(MaxTxPacketSize, 0x0c);
4604 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4605 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
4606 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4609 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
4611 rtl_tx_performance_tweak(tp->pci_dev,
4612 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4615 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
4617 rtl_tx_performance_tweak(tp->pci_dev,
4618 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4621 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
4623 void __iomem *ioaddr = tp->mmio_addr;
4625 r8168b_0_hw_jumbo_enable(tp);
4627 RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
4630 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
4632 void __iomem *ioaddr = tp->mmio_addr;
4634 r8168b_0_hw_jumbo_disable(tp);
4636 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4639 static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
4641 struct jumbo_ops *ops = &tp->jumbo_ops;
4643 switch (tp->mac_version) {
4644 case RTL_GIGA_MAC_VER_11:
4645 ops->disable = r8168b_0_hw_jumbo_disable;
4646 ops->enable = r8168b_0_hw_jumbo_enable;
4648 case RTL_GIGA_MAC_VER_12:
4649 case RTL_GIGA_MAC_VER_17:
4650 ops->disable = r8168b_1_hw_jumbo_disable;
4651 ops->enable = r8168b_1_hw_jumbo_enable;
4653 case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
4654 case RTL_GIGA_MAC_VER_19:
4655 case RTL_GIGA_MAC_VER_20:
4656 case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
4657 case RTL_GIGA_MAC_VER_22:
4658 case RTL_GIGA_MAC_VER_23:
4659 case RTL_GIGA_MAC_VER_24:
4660 case RTL_GIGA_MAC_VER_25:
4661 case RTL_GIGA_MAC_VER_26:
4662 ops->disable = r8168c_hw_jumbo_disable;
4663 ops->enable = r8168c_hw_jumbo_enable;
4665 case RTL_GIGA_MAC_VER_27:
4666 case RTL_GIGA_MAC_VER_28:
4667 ops->disable = r8168dp_hw_jumbo_disable;
4668 ops->enable = r8168dp_hw_jumbo_enable;
4670 case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
4671 case RTL_GIGA_MAC_VER_32:
4672 case RTL_GIGA_MAC_VER_33:
4673 case RTL_GIGA_MAC_VER_34:
4674 ops->disable = r8168e_hw_jumbo_disable;
4675 ops->enable = r8168e_hw_jumbo_enable;
4679 * No action needed for jumbo frames with 8169.
4680 * No jumbo for 810x at all.
4682 case RTL_GIGA_MAC_VER_40:
4683 case RTL_GIGA_MAC_VER_41:
4684 case RTL_GIGA_MAC_VER_42:
4685 case RTL_GIGA_MAC_VER_43:
4686 case RTL_GIGA_MAC_VER_44:
4687 case RTL_GIGA_MAC_VER_45:
4688 case RTL_GIGA_MAC_VER_46:
4689 case RTL_GIGA_MAC_VER_47:
4690 case RTL_GIGA_MAC_VER_48:
4692 ops->disable = NULL;
4698 DECLARE_RTL_COND(rtl_chipcmd_cond)
4700 void __iomem *ioaddr = tp->mmio_addr;
4702 return RTL_R8(ChipCmd) & CmdReset;
4705 static void rtl_hw_reset(struct rtl8169_private *tp)
4707 void __iomem *ioaddr = tp->mmio_addr;
4709 RTL_W8(ChipCmd, CmdReset);
4711 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4714 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
4716 struct rtl_fw *rtl_fw;
4720 name = rtl_lookup_firmware_name(tp);
4722 goto out_no_firmware;
4724 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4728 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
4732 rc = rtl_check_firmware(tp, rtl_fw);
4734 goto err_release_firmware;
4736 tp->rtl_fw = rtl_fw;
4740 err_release_firmware:
4741 release_firmware(rtl_fw->fw);
4745 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
4752 static void rtl_request_firmware(struct rtl8169_private *tp)
4754 if (IS_ERR(tp->rtl_fw))
4755 rtl_request_uncached_firmware(tp);
4758 static void rtl_rx_close(struct rtl8169_private *tp)
4760 void __iomem *ioaddr = tp->mmio_addr;
4762 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4765 DECLARE_RTL_COND(rtl_npq_cond)
4767 void __iomem *ioaddr = tp->mmio_addr;
4769 return RTL_R8(TxPoll) & NPQ;
4772 DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4774 void __iomem *ioaddr = tp->mmio_addr;
4776 return RTL_R32(TxConfig) & TXCFG_EMPTY;
4779 static void rtl8169_hw_reset(struct rtl8169_private *tp)
4781 void __iomem *ioaddr = tp->mmio_addr;
4783 /* Disable interrupts */
4784 rtl8169_irq_mask_and_ack(tp);
4788 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4789 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4790 tp->mac_version == RTL_GIGA_MAC_VER_31) {
4791 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4792 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4793 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4794 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4795 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4796 tp->mac_version == RTL_GIGA_MAC_VER_38 ||
4797 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4798 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4799 tp->mac_version == RTL_GIGA_MAC_VER_42 ||
4800 tp->mac_version == RTL_GIGA_MAC_VER_43 ||
4801 tp->mac_version == RTL_GIGA_MAC_VER_44 ||
4802 tp->mac_version == RTL_GIGA_MAC_VER_45 ||
4803 tp->mac_version == RTL_GIGA_MAC_VER_46 ||
4804 tp->mac_version == RTL_GIGA_MAC_VER_47 ||
4805 tp->mac_version == RTL_GIGA_MAC_VER_48) {
4806 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4807 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4809 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4816 static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4818 void __iomem *ioaddr = tp->mmio_addr;
4820 /* Set DMA burst size and Interframe Gap Time */
4821 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4822 (InterFrameGap << TxInterFrameGapShift));
4825 static void rtl_hw_start(struct net_device *dev)
4827 struct rtl8169_private *tp = netdev_priv(dev);
4831 rtl_irq_enable_all(tp);
4834 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4835 void __iomem *ioaddr)
4838 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4839 * register to be written before TxDescAddrLow to work.
4840 * Switching from MMIO to I/O access fixes the issue as well.
4842 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4843 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4844 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4845 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4848 static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4852 cmd = RTL_R16(CPlusCmd);
4853 RTL_W16(CPlusCmd, cmd);
4857 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4859 /* Low hurts. Let's disable the filtering. */
4860 RTL_W16(RxMaxSize, rx_buf_sz + 1);
4863 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4865 static const struct rtl_cfg2_info {
4870 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4871 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4872 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4873 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4875 const struct rtl_cfg2_info *p = cfg2_info;
4879 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4880 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4881 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4882 RTL_W32(0x7c, p->val);
4888 static void rtl_set_rx_mode(struct net_device *dev)
4890 struct rtl8169_private *tp = netdev_priv(dev);
4891 void __iomem *ioaddr = tp->mmio_addr;
4892 u32 mc_filter[2]; /* Multicast hash filter */
4896 if (dev->flags & IFF_PROMISC) {
4897 /* Unconditionally log net taps. */
4898 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4900 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4902 mc_filter[1] = mc_filter[0] = 0xffffffff;
4903 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4904 (dev->flags & IFF_ALLMULTI)) {
4905 /* Too many to filter perfectly -- accept all multicasts. */
4906 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4907 mc_filter[1] = mc_filter[0] = 0xffffffff;
4909 struct netdev_hw_addr *ha;
4911 rx_mode = AcceptBroadcast | AcceptMyPhys;
4912 mc_filter[1] = mc_filter[0] = 0;
4913 netdev_for_each_mc_addr(ha, dev) {
4914 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4915 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4916 rx_mode |= AcceptMulticast;
4920 if (dev->features & NETIF_F_RXALL)
4921 rx_mode |= (AcceptErr | AcceptRunt);
4923 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4925 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4926 u32 data = mc_filter[0];
4928 mc_filter[0] = swab32(mc_filter[1]);
4929 mc_filter[1] = swab32(data);
4932 if (tp->mac_version == RTL_GIGA_MAC_VER_35)
4933 mc_filter[1] = mc_filter[0] = 0xffffffff;
4935 RTL_W32(MAR0 + 4, mc_filter[1]);
4936 RTL_W32(MAR0 + 0, mc_filter[0]);
4938 RTL_W32(RxConfig, tmp);
4941 static void rtl_hw_start_8169(struct net_device *dev)
4943 struct rtl8169_private *tp = netdev_priv(dev);
4944 void __iomem *ioaddr = tp->mmio_addr;
4945 struct pci_dev *pdev = tp->pci_dev;
4947 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4948 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4949 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4952 RTL_W8(Cfg9346, Cfg9346_Unlock);
4953 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4954 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4955 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4956 tp->mac_version == RTL_GIGA_MAC_VER_04)
4957 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4961 RTL_W8(EarlyTxThres, NoEarlyTx);
4963 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4965 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4966 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4967 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4968 tp->mac_version == RTL_GIGA_MAC_VER_04)
4969 rtl_set_rx_tx_config_registers(tp);
4971 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4973 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4974 tp->mac_version == RTL_GIGA_MAC_VER_03) {
4975 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4976 "Bit-3 and bit-14 MUST be 1\n");
4977 tp->cp_cmd |= (1 << 14);
4980 RTL_W16(CPlusCmd, tp->cp_cmd);
4982 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4985 * Undocumented corner. Supposedly:
4986 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4988 RTL_W16(IntrMitigate, 0x0000);
4990 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4992 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4993 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4994 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4995 tp->mac_version != RTL_GIGA_MAC_VER_04) {
4996 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4997 rtl_set_rx_tx_config_registers(tp);
5000 RTL_W8(Cfg9346, Cfg9346_Lock);
5002 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
5005 RTL_W32(RxMissed, 0);
5007 rtl_set_rx_mode(dev);
5009 /* no early-rx interrupts */
5010 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5013 static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
5015 if (tp->csi_ops.write)
5016 tp->csi_ops.write(tp, addr, value);
5019 static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
5021 return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
5024 static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
5028 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
5029 rtl_csi_write(tp, 0x070c, csi | bits);
5032 static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
5034 rtl_csi_access_enable(tp, 0x17000000);
5037 static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
5039 rtl_csi_access_enable(tp, 0x27000000);
5042 DECLARE_RTL_COND(rtl_csiar_cond)
5044 void __iomem *ioaddr = tp->mmio_addr;
5046 return RTL_R32(CSIAR) & CSIAR_FLAG;
5049 static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
5051 void __iomem *ioaddr = tp->mmio_addr;
5053 RTL_W32(CSIDR, value);
5054 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
5055 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
5057 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
5060 static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
5062 void __iomem *ioaddr = tp->mmio_addr;
5064 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
5065 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
5067 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
5068 RTL_R32(CSIDR) : ~0;
5071 static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
5073 void __iomem *ioaddr = tp->mmio_addr;
5075 RTL_W32(CSIDR, value);
5076 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
5077 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
5080 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
5083 static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
5085 void __iomem *ioaddr = tp->mmio_addr;
5087 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
5088 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
5090 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
5091 RTL_R32(CSIDR) : ~0;
5094 static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value)
5096 void __iomem *ioaddr = tp->mmio_addr;
5098 RTL_W32(CSIDR, value);
5099 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
5100 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
5103 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
5106 static u32 r8411_csi_read(struct rtl8169_private *tp, int addr)
5108 void __iomem *ioaddr = tp->mmio_addr;
5110 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 |
5111 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
5113 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
5114 RTL_R32(CSIDR) : ~0;
5117 static void rtl_init_csi_ops(struct rtl8169_private *tp)
5119 struct csi_ops *ops = &tp->csi_ops;
5121 switch (tp->mac_version) {
5122 case RTL_GIGA_MAC_VER_01:
5123 case RTL_GIGA_MAC_VER_02:
5124 case RTL_GIGA_MAC_VER_03:
5125 case RTL_GIGA_MAC_VER_04:
5126 case RTL_GIGA_MAC_VER_05:
5127 case RTL_GIGA_MAC_VER_06:
5128 case RTL_GIGA_MAC_VER_10:
5129 case RTL_GIGA_MAC_VER_11:
5130 case RTL_GIGA_MAC_VER_12:
5131 case RTL_GIGA_MAC_VER_13:
5132 case RTL_GIGA_MAC_VER_14:
5133 case RTL_GIGA_MAC_VER_15:
5134 case RTL_GIGA_MAC_VER_16:
5135 case RTL_GIGA_MAC_VER_17:
5140 case RTL_GIGA_MAC_VER_37:
5141 case RTL_GIGA_MAC_VER_38:
5142 ops->write = r8402_csi_write;
5143 ops->read = r8402_csi_read;
5146 case RTL_GIGA_MAC_VER_44:
5147 ops->write = r8411_csi_write;
5148 ops->read = r8411_csi_read;
5152 ops->write = r8169_csi_write;
5153 ops->read = r8169_csi_read;
5159 unsigned int offset;
5164 static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
5170 w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
5171 rtl_ephy_write(tp, e->offset, w);
5176 static void rtl_disable_clock_request(struct pci_dev *pdev)
5178 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
5179 PCI_EXP_LNKCTL_CLKREQ_EN);
5182 static void rtl_enable_clock_request(struct pci_dev *pdev)
5184 pcie_capability_set_word(pdev, PCI_EXP_LNKCTL,
5185 PCI_EXP_LNKCTL_CLKREQ_EN);
5188 static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
5190 void __iomem *ioaddr = tp->mmio_addr;
5193 data = RTL_R8(Config3);
5198 data &= ~Rdy_to_L23;
5200 RTL_W8(Config3, data);
5203 #define R8168_CPCMD_QUIRK_MASK (\
5214 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
5216 void __iomem *ioaddr = tp->mmio_addr;
5217 struct pci_dev *pdev = tp->pci_dev;
5219 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5221 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
5223 if (tp->dev->mtu <= ETH_DATA_LEN) {
5224 rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) |
5225 PCI_EXP_DEVCTL_NOSNOOP_EN);
5229 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
5231 void __iomem *ioaddr = tp->mmio_addr;
5233 rtl_hw_start_8168bb(tp);
5235 RTL_W8(MaxTxPacketSize, TxPacketMax);
5237 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
5240 static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
5242 void __iomem *ioaddr = tp->mmio_addr;
5243 struct pci_dev *pdev = tp->pci_dev;
5245 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
5247 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5249 if (tp->dev->mtu <= ETH_DATA_LEN)
5250 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5252 rtl_disable_clock_request(pdev);
5254 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
5257 static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
5259 static const struct ephy_info e_info_8168cp[] = {
5260 { 0x01, 0, 0x0001 },
5261 { 0x02, 0x0800, 0x1000 },
5262 { 0x03, 0, 0x0042 },
5263 { 0x06, 0x0080, 0x0000 },
5267 rtl_csi_access_enable_2(tp);
5269 rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
5271 __rtl_hw_start_8168cp(tp);
5274 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
5276 void __iomem *ioaddr = tp->mmio_addr;
5277 struct pci_dev *pdev = tp->pci_dev;
5279 rtl_csi_access_enable_2(tp);
5281 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5283 if (tp->dev->mtu <= ETH_DATA_LEN)
5284 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5286 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
5289 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
5291 void __iomem *ioaddr = tp->mmio_addr;
5292 struct pci_dev *pdev = tp->pci_dev;
5294 rtl_csi_access_enable_2(tp);
5296 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5299 RTL_W8(DBG_REG, 0x20);
5301 RTL_W8(MaxTxPacketSize, TxPacketMax);
5303 if (tp->dev->mtu <= ETH_DATA_LEN)
5304 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5306 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
5309 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
5311 void __iomem *ioaddr = tp->mmio_addr;
5312 static const struct ephy_info e_info_8168c_1[] = {
5313 { 0x02, 0x0800, 0x1000 },
5314 { 0x03, 0, 0x0002 },
5315 { 0x06, 0x0080, 0x0000 }
5318 rtl_csi_access_enable_2(tp);
5320 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
5322 rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
5324 __rtl_hw_start_8168cp(tp);
5327 static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
5329 static const struct ephy_info e_info_8168c_2[] = {
5330 { 0x01, 0, 0x0001 },
5331 { 0x03, 0x0400, 0x0220 }
5334 rtl_csi_access_enable_2(tp);
5336 rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
5338 __rtl_hw_start_8168cp(tp);
5341 static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
5343 rtl_hw_start_8168c_2(tp);
5346 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
5348 rtl_csi_access_enable_2(tp);
5350 __rtl_hw_start_8168cp(tp);
5353 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
5355 void __iomem *ioaddr = tp->mmio_addr;
5356 struct pci_dev *pdev = tp->pci_dev;
5358 rtl_csi_access_enable_2(tp);
5360 rtl_disable_clock_request(pdev);
5362 RTL_W8(MaxTxPacketSize, TxPacketMax);
5364 if (tp->dev->mtu <= ETH_DATA_LEN)
5365 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5367 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
5370 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
5372 void __iomem *ioaddr = tp->mmio_addr;
5373 struct pci_dev *pdev = tp->pci_dev;
5375 rtl_csi_access_enable_1(tp);
5377 if (tp->dev->mtu <= ETH_DATA_LEN)
5378 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5380 RTL_W8(MaxTxPacketSize, TxPacketMax);
5382 rtl_disable_clock_request(pdev);
5385 static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
5387 void __iomem *ioaddr = tp->mmio_addr;
5388 struct pci_dev *pdev = tp->pci_dev;
5389 static const struct ephy_info e_info_8168d_4[] = {
5391 { 0x19, 0x20, 0x50 },
5396 rtl_csi_access_enable_1(tp);
5398 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5400 RTL_W8(MaxTxPacketSize, TxPacketMax);
5402 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
5403 const struct ephy_info *e = e_info_8168d_4 + i;
5406 w = rtl_ephy_read(tp, e->offset);
5407 rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
5410 rtl_enable_clock_request(pdev);
5413 static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
5415 void __iomem *ioaddr = tp->mmio_addr;
5416 struct pci_dev *pdev = tp->pci_dev;
5417 static const struct ephy_info e_info_8168e_1[] = {
5418 { 0x00, 0x0200, 0x0100 },
5419 { 0x00, 0x0000, 0x0004 },
5420 { 0x06, 0x0002, 0x0001 },
5421 { 0x06, 0x0000, 0x0030 },
5422 { 0x07, 0x0000, 0x2000 },
5423 { 0x00, 0x0000, 0x0020 },
5424 { 0x03, 0x5800, 0x2000 },
5425 { 0x03, 0x0000, 0x0001 },
5426 { 0x01, 0x0800, 0x1000 },
5427 { 0x07, 0x0000, 0x4000 },
5428 { 0x1e, 0x0000, 0x2000 },
5429 { 0x19, 0xffff, 0xfe6c },
5430 { 0x0a, 0x0000, 0x0040 }
5433 rtl_csi_access_enable_2(tp);
5435 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
5437 if (tp->dev->mtu <= ETH_DATA_LEN)
5438 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5440 RTL_W8(MaxTxPacketSize, TxPacketMax);
5442 rtl_disable_clock_request(pdev);
5444 /* Reset tx FIFO pointer */
5445 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
5446 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
5448 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5451 static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5453 void __iomem *ioaddr = tp->mmio_addr;
5454 struct pci_dev *pdev = tp->pci_dev;
5455 static const struct ephy_info e_info_8168e_2[] = {
5456 { 0x09, 0x0000, 0x0080 },
5457 { 0x19, 0x0000, 0x0224 }
5460 rtl_csi_access_enable_1(tp);
5462 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
5464 if (tp->dev->mtu <= ETH_DATA_LEN)
5465 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5467 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5468 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5469 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5470 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5471 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5472 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
5473 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5474 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5476 RTL_W8(MaxTxPacketSize, EarlySize);
5478 rtl_disable_clock_request(pdev);
5480 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5481 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5483 /* Adjust EEE LED frequency */
5484 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5486 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5487 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5488 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5491 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5493 void __iomem *ioaddr = tp->mmio_addr;
5494 struct pci_dev *pdev = tp->pci_dev;
5496 rtl_csi_access_enable_2(tp);
5498 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5500 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5501 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5502 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5503 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5504 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5505 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5506 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5507 rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5508 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5509 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
5511 RTL_W8(MaxTxPacketSize, EarlySize);
5513 rtl_disable_clock_request(pdev);
5515 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5516 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5517 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5518 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5519 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5522 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
5524 void __iomem *ioaddr = tp->mmio_addr;
5525 static const struct ephy_info e_info_8168f_1[] = {
5526 { 0x06, 0x00c0, 0x0020 },
5527 { 0x08, 0x0001, 0x0002 },
5528 { 0x09, 0x0000, 0x0080 },
5529 { 0x19, 0x0000, 0x0224 }
5532 rtl_hw_start_8168f(tp);
5534 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5536 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5538 /* Adjust EEE LED frequency */
5539 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5542 static void rtl_hw_start_8411(struct rtl8169_private *tp)
5544 static const struct ephy_info e_info_8168f_1[] = {
5545 { 0x06, 0x00c0, 0x0020 },
5546 { 0x0f, 0xffff, 0x5200 },
5547 { 0x1e, 0x0000, 0x4000 },
5548 { 0x19, 0x0000, 0x0224 }
5551 rtl_hw_start_8168f(tp);
5552 rtl_pcie_state_l2l3_enable(tp, false);
5554 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5556 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
5559 static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5561 void __iomem *ioaddr = tp->mmio_addr;
5562 struct pci_dev *pdev = tp->pci_dev;
5564 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5566 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5567 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5568 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5569 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5571 rtl_csi_access_enable_1(tp);
5573 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5575 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5576 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5577 rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC);
5579 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5580 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
5581 RTL_W8(MaxTxPacketSize, EarlySize);
5583 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5584 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5586 /* Adjust EEE LED frequency */
5587 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5589 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
5590 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
5592 rtl_pcie_state_l2l3_enable(tp, false);
5595 static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
5597 void __iomem *ioaddr = tp->mmio_addr;
5598 static const struct ephy_info e_info_8168g_2[] = {
5599 { 0x00, 0x0000, 0x0008 },
5600 { 0x0c, 0x3df0, 0x0200 },
5601 { 0x19, 0xffff, 0xfc00 },
5602 { 0x1e, 0xffff, 0x20eb }
5605 rtl_hw_start_8168g_1(tp);
5607 /* disable aspm and clock request before access ephy */
5608 RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
5609 RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
5610 rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2));
5613 static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
5615 void __iomem *ioaddr = tp->mmio_addr;
5616 static const struct ephy_info e_info_8411_2[] = {
5617 { 0x00, 0x0000, 0x0008 },
5618 { 0x0c, 0x3df0, 0x0200 },
5619 { 0x0f, 0xffff, 0x5200 },
5620 { 0x19, 0x0020, 0x0000 },
5621 { 0x1e, 0x0000, 0x2000 }
5624 rtl_hw_start_8168g_1(tp);
5626 /* disable aspm and clock request before access ephy */
5627 RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
5628 RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
5629 rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
5632 static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
5634 void __iomem *ioaddr = tp->mmio_addr;
5635 struct pci_dev *pdev = tp->pci_dev;
5638 static const struct ephy_info e_info_8168h_1[] = {
5639 { 0x1e, 0x0800, 0x0001 },
5640 { 0x1d, 0x0000, 0x0800 },
5641 { 0x05, 0xffff, 0x2089 },
5642 { 0x06, 0xffff, 0x5881 },
5643 { 0x04, 0xffff, 0x154a },
5644 { 0x01, 0xffff, 0x068b }
5647 /* disable aspm and clock request before access ephy */
5648 RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
5649 RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
5650 rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
5652 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5654 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
5655 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5656 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5657 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5659 rtl_csi_access_enable_1(tp);
5661 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5663 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5664 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5666 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_1111, 0x0010, 0x00, ERIAR_EXGMAC);
5668 rtl_w1w0_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f00, 0x00, ERIAR_EXGMAC);
5670 rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
5672 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5673 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
5674 RTL_W8(MaxTxPacketSize, EarlySize);
5676 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5677 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5679 /* Adjust EEE LED frequency */
5680 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5682 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5683 RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN);
5685 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN);
5687 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
5689 rtl_pcie_state_l2l3_enable(tp, false);
5691 rtl_writephy(tp, 0x1f, 0x0c42);
5692 rg_saw_cnt = rtl_readphy(tp, 0x13);
5693 rtl_writephy(tp, 0x1f, 0x0000);
5694 if (rg_saw_cnt > 0) {
5697 sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
5698 sw_cnt_1ms_ini &= 0x0fff;
5699 data = r8168_mac_ocp_read(tp, 0xd412);
5701 data |= sw_cnt_1ms_ini;
5702 r8168_mac_ocp_write(tp, 0xd412, data);
5705 data = r8168_mac_ocp_read(tp, 0xe056);
5708 r8168_mac_ocp_write(tp, 0xe056, data);
5710 data = r8168_mac_ocp_read(tp, 0xe052);
5713 r8168_mac_ocp_write(tp, 0xe052, data);
5715 data = r8168_mac_ocp_read(tp, 0xe0d6);
5718 r8168_mac_ocp_write(tp, 0xe0d6, data);
5720 data = r8168_mac_ocp_read(tp, 0xd420);
5723 r8168_mac_ocp_write(tp, 0xd420, data);
5725 r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
5726 r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
5727 r8168_mac_ocp_write(tp, 0xc094, 0x0000);
5728 r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
5731 static void rtl_hw_start_8168(struct net_device *dev)
5733 struct rtl8169_private *tp = netdev_priv(dev);
5734 void __iomem *ioaddr = tp->mmio_addr;
5736 RTL_W8(Cfg9346, Cfg9346_Unlock);
5738 RTL_W8(MaxTxPacketSize, TxPacketMax);
5740 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5742 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
5744 RTL_W16(CPlusCmd, tp->cp_cmd);
5746 RTL_W16(IntrMitigate, 0x5151);
5748 /* Work around for RxFIFO overflow. */
5749 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
5750 tp->event_slow |= RxFIFOOver | PCSTimeout;
5751 tp->event_slow &= ~RxOverflow;
5754 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5756 rtl_set_rx_tx_config_registers(tp);
5760 switch (tp->mac_version) {
5761 case RTL_GIGA_MAC_VER_11:
5762 rtl_hw_start_8168bb(tp);
5765 case RTL_GIGA_MAC_VER_12:
5766 case RTL_GIGA_MAC_VER_17:
5767 rtl_hw_start_8168bef(tp);
5770 case RTL_GIGA_MAC_VER_18:
5771 rtl_hw_start_8168cp_1(tp);
5774 case RTL_GIGA_MAC_VER_19:
5775 rtl_hw_start_8168c_1(tp);
5778 case RTL_GIGA_MAC_VER_20:
5779 rtl_hw_start_8168c_2(tp);
5782 case RTL_GIGA_MAC_VER_21:
5783 rtl_hw_start_8168c_3(tp);
5786 case RTL_GIGA_MAC_VER_22:
5787 rtl_hw_start_8168c_4(tp);
5790 case RTL_GIGA_MAC_VER_23:
5791 rtl_hw_start_8168cp_2(tp);
5794 case RTL_GIGA_MAC_VER_24:
5795 rtl_hw_start_8168cp_3(tp);
5798 case RTL_GIGA_MAC_VER_25:
5799 case RTL_GIGA_MAC_VER_26:
5800 case RTL_GIGA_MAC_VER_27:
5801 rtl_hw_start_8168d(tp);
5804 case RTL_GIGA_MAC_VER_28:
5805 rtl_hw_start_8168d_4(tp);
5808 case RTL_GIGA_MAC_VER_31:
5809 rtl_hw_start_8168dp(tp);
5812 case RTL_GIGA_MAC_VER_32:
5813 case RTL_GIGA_MAC_VER_33:
5814 rtl_hw_start_8168e_1(tp);
5816 case RTL_GIGA_MAC_VER_34:
5817 rtl_hw_start_8168e_2(tp);
5820 case RTL_GIGA_MAC_VER_35:
5821 case RTL_GIGA_MAC_VER_36:
5822 rtl_hw_start_8168f_1(tp);
5825 case RTL_GIGA_MAC_VER_38:
5826 rtl_hw_start_8411(tp);
5829 case RTL_GIGA_MAC_VER_40:
5830 case RTL_GIGA_MAC_VER_41:
5831 rtl_hw_start_8168g_1(tp);
5833 case RTL_GIGA_MAC_VER_42:
5834 rtl_hw_start_8168g_2(tp);
5837 case RTL_GIGA_MAC_VER_44:
5838 rtl_hw_start_8411_2(tp);
5841 case RTL_GIGA_MAC_VER_45:
5842 case RTL_GIGA_MAC_VER_46:
5843 rtl_hw_start_8168h_1(tp);
5847 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
5848 dev->name, tp->mac_version);
5852 RTL_W8(Cfg9346, Cfg9346_Lock);
5854 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5856 rtl_set_rx_mode(dev);
5858 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5861 #define R810X_CPCMD_QUIRK_MASK (\
5872 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5874 void __iomem *ioaddr = tp->mmio_addr;
5875 struct pci_dev *pdev = tp->pci_dev;
5876 static const struct ephy_info e_info_8102e_1[] = {
5877 { 0x01, 0, 0x6e65 },
5878 { 0x02, 0, 0x091f },
5879 { 0x03, 0, 0xc2f9 },
5880 { 0x06, 0, 0xafb5 },
5881 { 0x07, 0, 0x0e00 },
5882 { 0x19, 0, 0xec80 },
5883 { 0x01, 0, 0x2e65 },
5888 rtl_csi_access_enable_2(tp);
5890 RTL_W8(DBG_REG, FIX_NAK_1);
5892 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5895 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
5896 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5898 cfg1 = RTL_R8(Config1);
5899 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5900 RTL_W8(Config1, cfg1 & ~LEDS0);
5902 rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5905 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
5907 void __iomem *ioaddr = tp->mmio_addr;
5908 struct pci_dev *pdev = tp->pci_dev;
5910 rtl_csi_access_enable_2(tp);
5912 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5914 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
5915 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5918 static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5920 rtl_hw_start_8102e_2(tp);
5922 rtl_ephy_write(tp, 0x03, 0xc2f9);
5925 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5927 void __iomem *ioaddr = tp->mmio_addr;
5928 static const struct ephy_info e_info_8105e_1[] = {
5929 { 0x07, 0, 0x4000 },
5930 { 0x19, 0, 0x0200 },
5931 { 0x19, 0, 0x0020 },
5932 { 0x1e, 0, 0x2000 },
5933 { 0x03, 0, 0x0001 },
5934 { 0x19, 0, 0x0100 },
5935 { 0x19, 0, 0x0004 },
5939 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5940 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5942 /* Disable Early Tally Counter */
5943 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
5945 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5946 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5948 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5950 rtl_pcie_state_l2l3_enable(tp, false);
5953 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5955 rtl_hw_start_8105e_1(tp);
5956 rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5959 static void rtl_hw_start_8402(struct rtl8169_private *tp)
5961 void __iomem *ioaddr = tp->mmio_addr;
5962 static const struct ephy_info e_info_8402[] = {
5963 { 0x19, 0xffff, 0xff64 },
5967 rtl_csi_access_enable_2(tp);
5969 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5970 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5972 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5973 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5975 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5977 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5979 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5980 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5981 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5982 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5983 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5984 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5985 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5987 rtl_pcie_state_l2l3_enable(tp, false);
5990 static void rtl_hw_start_8106(struct rtl8169_private *tp)
5992 void __iomem *ioaddr = tp->mmio_addr;
5994 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5995 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5997 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5998 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5999 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
6001 rtl_pcie_state_l2l3_enable(tp, false);
6004 static void rtl_hw_start_8101(struct net_device *dev)
6006 struct rtl8169_private *tp = netdev_priv(dev);
6007 void __iomem *ioaddr = tp->mmio_addr;
6008 struct pci_dev *pdev = tp->pci_dev;
6010 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
6011 tp->event_slow &= ~RxFIFOOver;
6013 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
6014 tp->mac_version == RTL_GIGA_MAC_VER_16)
6015 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
6016 PCI_EXP_DEVCTL_NOSNOOP_EN);
6018 RTL_W8(Cfg9346, Cfg9346_Unlock);
6020 RTL_W8(MaxTxPacketSize, TxPacketMax);
6022 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
6024 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
6025 RTL_W16(CPlusCmd, tp->cp_cmd);
6027 rtl_set_rx_tx_desc_registers(tp, ioaddr);
6029 rtl_set_rx_tx_config_registers(tp);
6031 switch (tp->mac_version) {
6032 case RTL_GIGA_MAC_VER_07:
6033 rtl_hw_start_8102e_1(tp);
6036 case RTL_GIGA_MAC_VER_08:
6037 rtl_hw_start_8102e_3(tp);
6040 case RTL_GIGA_MAC_VER_09:
6041 rtl_hw_start_8102e_2(tp);
6044 case RTL_GIGA_MAC_VER_29:
6045 rtl_hw_start_8105e_1(tp);
6047 case RTL_GIGA_MAC_VER_30:
6048 rtl_hw_start_8105e_2(tp);
6051 case RTL_GIGA_MAC_VER_37:
6052 rtl_hw_start_8402(tp);
6055 case RTL_GIGA_MAC_VER_39:
6056 rtl_hw_start_8106(tp);
6058 case RTL_GIGA_MAC_VER_43:
6059 rtl_hw_start_8168g_2(tp);
6061 case RTL_GIGA_MAC_VER_47:
6062 case RTL_GIGA_MAC_VER_48:
6063 rtl_hw_start_8168h_1(tp);
6067 RTL_W8(Cfg9346, Cfg9346_Lock);
6069 RTL_W16(IntrMitigate, 0x0000);
6071 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
6073 rtl_set_rx_mode(dev);
6077 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
6080 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
6082 struct rtl8169_private *tp = netdev_priv(dev);
6084 if (new_mtu < ETH_ZLEN ||
6085 new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
6088 if (new_mtu > ETH_DATA_LEN)
6089 rtl_hw_jumbo_enable(tp);
6091 rtl_hw_jumbo_disable(tp);
6094 netdev_update_features(dev);
6099 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
6101 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
6102 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
6105 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
6106 void **data_buff, struct RxDesc *desc)
6108 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
6113 rtl8169_make_unusable_by_asic(desc);
6116 static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
6118 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
6120 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
6123 static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
6126 desc->addr = cpu_to_le64(mapping);
6128 rtl8169_mark_to_asic(desc, rx_buf_sz);
6131 static inline void *rtl8169_align(void *data)
6133 return (void *)ALIGN((long)data, 16);
6136 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
6137 struct RxDesc *desc)
6141 struct device *d = &tp->pci_dev->dev;
6142 struct net_device *dev = tp->dev;
6143 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
6145 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
6149 if (rtl8169_align(data) != data) {
6151 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
6156 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
6158 if (unlikely(dma_mapping_error(d, mapping))) {
6159 if (net_ratelimit())
6160 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
6164 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
6172 static void rtl8169_rx_clear(struct rtl8169_private *tp)
6176 for (i = 0; i < NUM_RX_DESC; i++) {
6177 if (tp->Rx_databuff[i]) {
6178 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
6179 tp->RxDescArray + i);
6184 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
6186 desc->opts1 |= cpu_to_le32(RingEnd);
6189 static int rtl8169_rx_fill(struct rtl8169_private *tp)
6193 for (i = 0; i < NUM_RX_DESC; i++) {
6196 if (tp->Rx_databuff[i])
6199 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
6201 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
6204 tp->Rx_databuff[i] = data;
6207 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
6211 rtl8169_rx_clear(tp);
6215 static int rtl8169_init_ring(struct net_device *dev)
6217 struct rtl8169_private *tp = netdev_priv(dev);
6219 rtl8169_init_ring_indexes(tp);
6221 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
6222 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
6224 return rtl8169_rx_fill(tp);
6227 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
6228 struct TxDesc *desc)
6230 unsigned int len = tx_skb->len;
6232 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
6240 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
6245 for (i = 0; i < n; i++) {
6246 unsigned int entry = (start + i) % NUM_TX_DESC;
6247 struct ring_info *tx_skb = tp->tx_skb + entry;
6248 unsigned int len = tx_skb->len;
6251 struct sk_buff *skb = tx_skb->skb;
6253 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
6254 tp->TxDescArray + entry);
6256 tp->dev->stats.tx_dropped++;
6257 dev_kfree_skb_any(skb);
6264 static void rtl8169_tx_clear(struct rtl8169_private *tp)
6266 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
6267 tp->cur_tx = tp->dirty_tx = 0;
6270 static void rtl_reset_work(struct rtl8169_private *tp)
6272 struct net_device *dev = tp->dev;
6275 napi_disable(&tp->napi);
6276 netif_stop_queue(dev);
6277 synchronize_sched();
6279 rtl8169_hw_reset(tp);
6281 for (i = 0; i < NUM_RX_DESC; i++)
6282 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
6284 rtl8169_tx_clear(tp);
6285 rtl8169_init_ring_indexes(tp);
6287 napi_enable(&tp->napi);
6289 netif_wake_queue(dev);
6290 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
6293 static void rtl8169_tx_timeout(struct net_device *dev)
6295 struct rtl8169_private *tp = netdev_priv(dev);
6297 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6300 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
6303 struct skb_shared_info *info = skb_shinfo(skb);
6304 unsigned int cur_frag, entry;
6305 struct TxDesc *uninitialized_var(txd);
6306 struct device *d = &tp->pci_dev->dev;
6309 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
6310 const skb_frag_t *frag = info->frags + cur_frag;
6315 entry = (entry + 1) % NUM_TX_DESC;
6317 txd = tp->TxDescArray + entry;
6318 len = skb_frag_size(frag);
6319 addr = skb_frag_address(frag);
6320 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
6321 if (unlikely(dma_mapping_error(d, mapping))) {
6322 if (net_ratelimit())
6323 netif_err(tp, drv, tp->dev,
6324 "Failed to map TX fragments DMA!\n");
6328 /* Anti gcc 2.95.3 bugware (sic) */
6329 status = opts[0] | len |
6330 (RingEnd * !((entry + 1) % NUM_TX_DESC));
6332 txd->opts1 = cpu_to_le32(status);
6333 txd->opts2 = cpu_to_le32(opts[1]);
6334 txd->addr = cpu_to_le64(mapping);
6336 tp->tx_skb[entry].len = len;
6340 tp->tx_skb[entry].skb = skb;
6341 txd->opts1 |= cpu_to_le32(LastFrag);
6347 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
6351 static bool rtl_skb_pad(struct sk_buff *skb)
6353 if (skb_padto(skb, ETH_ZLEN))
6355 skb_put(skb, ETH_ZLEN - skb->len);
6359 static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
6361 return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
6364 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
6365 struct net_device *dev);
6366 /* r8169_csum_workaround()
6367 * The hw limites the value the transport offset. When the offset is out of the
6368 * range, calculate the checksum by sw.
6370 static void r8169_csum_workaround(struct rtl8169_private *tp,
6371 struct sk_buff *skb)
6373 if (skb_shinfo(skb)->gso_size) {
6374 netdev_features_t features = tp->dev->features;
6375 struct sk_buff *segs, *nskb;
6377 features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
6378 segs = skb_gso_segment(skb, features);
6379 if (IS_ERR(segs) || !segs)
6386 rtl8169_start_xmit(nskb, tp->dev);
6390 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
6391 if (skb_checksum_help(skb) < 0)
6394 rtl8169_start_xmit(skb, tp->dev);
6396 struct net_device_stats *stats;
6399 stats = &tp->dev->stats;
6400 stats->tx_dropped++;
6405 /* msdn_giant_send_check()
6406 * According to the document of microsoft, the TCP Pseudo Header excludes the
6407 * packet length for IPv6 TCP large packets.
6409 static int msdn_giant_send_check(struct sk_buff *skb)
6411 const struct ipv6hdr *ipv6h;
6415 ret = skb_cow_head(skb, 0);
6419 ipv6h = ipv6_hdr(skb);
6423 th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
6428 static inline __be16 get_protocol(struct sk_buff *skb)
6432 if (skb->protocol == htons(ETH_P_8021Q))
6433 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
6435 protocol = skb->protocol;
6440 static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp,
6441 struct sk_buff *skb, u32 *opts)
6443 u32 mss = skb_shinfo(skb)->gso_size;
6447 opts[0] |= min(mss, TD_MSS_MAX) << TD0_MSS_SHIFT;
6448 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
6449 const struct iphdr *ip = ip_hdr(skb);
6451 if (ip->protocol == IPPROTO_TCP)
6452 opts[0] |= TD0_IP_CS | TD0_TCP_CS;
6453 else if (ip->protocol == IPPROTO_UDP)
6454 opts[0] |= TD0_IP_CS | TD0_UDP_CS;
6462 static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
6463 struct sk_buff *skb, u32 *opts)
6465 u32 transport_offset = (u32)skb_transport_offset(skb);
6466 u32 mss = skb_shinfo(skb)->gso_size;
6469 if (transport_offset > GTTCPHO_MAX) {
6470 netif_warn(tp, tx_err, tp->dev,
6471 "Invalid transport offset 0x%x for TSO\n",
6476 switch (get_protocol(skb)) {
6477 case htons(ETH_P_IP):
6478 opts[0] |= TD1_GTSENV4;
6481 case htons(ETH_P_IPV6):
6482 if (msdn_giant_send_check(skb))
6485 opts[0] |= TD1_GTSENV6;
6493 opts[0] |= transport_offset << GTTCPHO_SHIFT;
6494 opts[1] |= min(mss, TD_MSS_MAX) << TD1_MSS_SHIFT;
6495 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
6498 if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
6499 return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb);
6501 if (transport_offset > TCPHO_MAX) {
6502 netif_warn(tp, tx_err, tp->dev,
6503 "Invalid transport offset 0x%x\n",
6508 switch (get_protocol(skb)) {
6509 case htons(ETH_P_IP):
6510 opts[1] |= TD1_IPv4_CS;
6511 ip_protocol = ip_hdr(skb)->protocol;
6514 case htons(ETH_P_IPV6):
6515 opts[1] |= TD1_IPv6_CS;
6516 ip_protocol = ipv6_hdr(skb)->nexthdr;
6520 ip_protocol = IPPROTO_RAW;
6524 if (ip_protocol == IPPROTO_TCP)
6525 opts[1] |= TD1_TCP_CS;
6526 else if (ip_protocol == IPPROTO_UDP)
6527 opts[1] |= TD1_UDP_CS;
6531 opts[1] |= transport_offset << TCPHO_SHIFT;
6533 if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
6534 return rtl_skb_pad(skb);
6540 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
6541 struct net_device *dev)
6543 struct rtl8169_private *tp = netdev_priv(dev);
6544 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
6545 struct TxDesc *txd = tp->TxDescArray + entry;
6546 void __iomem *ioaddr = tp->mmio_addr;
6547 struct device *d = &tp->pci_dev->dev;
6553 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
6554 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
6558 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
6561 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
6564 if (!tp->tso_csum(tp, skb, opts)) {
6565 r8169_csum_workaround(tp, skb);
6566 return NETDEV_TX_OK;
6569 len = skb_headlen(skb);
6570 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
6571 if (unlikely(dma_mapping_error(d, mapping))) {
6572 if (net_ratelimit())
6573 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
6577 tp->tx_skb[entry].len = len;
6578 txd->addr = cpu_to_le64(mapping);
6580 frags = rtl8169_xmit_frags(tp, skb, opts);
6584 opts[0] |= FirstFrag;
6586 opts[0] |= FirstFrag | LastFrag;
6587 tp->tx_skb[entry].skb = skb;
6590 txd->opts2 = cpu_to_le32(opts[1]);
6592 skb_tx_timestamp(skb);
6596 /* Anti gcc 2.95.3 bugware (sic) */
6597 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
6598 txd->opts1 = cpu_to_le32(status);
6600 tp->cur_tx += frags + 1;
6604 RTL_W8(TxPoll, NPQ);
6608 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
6609 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
6610 * not miss a ring update when it notices a stopped queue.
6613 netif_stop_queue(dev);
6614 /* Sync with rtl_tx:
6615 * - publish queue status and cur_tx ring index (write barrier)
6616 * - refresh dirty_tx ring index (read barrier).
6617 * May the current thread have a pessimistic view of the ring
6618 * status and forget to wake up queue, a racing rtl_tx thread
6622 if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
6623 netif_wake_queue(dev);
6626 return NETDEV_TX_OK;
6629 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
6631 dev_kfree_skb_any(skb);
6632 dev->stats.tx_dropped++;
6633 return NETDEV_TX_OK;
6636 netif_stop_queue(dev);
6637 dev->stats.tx_dropped++;
6638 return NETDEV_TX_BUSY;
6641 static void rtl8169_pcierr_interrupt(struct net_device *dev)
6643 struct rtl8169_private *tp = netdev_priv(dev);
6644 struct pci_dev *pdev = tp->pci_dev;
6645 u16 pci_status, pci_cmd;
6647 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
6648 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
6650 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
6651 pci_cmd, pci_status);
6654 * The recovery sequence below admits a very elaborated explanation:
6655 * - it seems to work;
6656 * - I did not see what else could be done;
6657 * - it makes iop3xx happy.
6659 * Feel free to adjust to your needs.
6661 if (pdev->broken_parity_status)
6662 pci_cmd &= ~PCI_COMMAND_PARITY;
6664 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
6666 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
6668 pci_write_config_word(pdev, PCI_STATUS,
6669 pci_status & (PCI_STATUS_DETECTED_PARITY |
6670 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
6671 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
6673 /* The infamous DAC f*ckup only happens at boot time */
6674 if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) {
6675 void __iomem *ioaddr = tp->mmio_addr;
6677 netif_info(tp, intr, dev, "disabling PCI DAC\n");
6678 tp->cp_cmd &= ~PCIDAC;
6679 RTL_W16(CPlusCmd, tp->cp_cmd);
6680 dev->features &= ~NETIF_F_HIGHDMA;
6683 rtl8169_hw_reset(tp);
6685 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6688 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
6690 unsigned int dirty_tx, tx_left;
6692 dirty_tx = tp->dirty_tx;
6694 tx_left = tp->cur_tx - dirty_tx;
6696 while (tx_left > 0) {
6697 unsigned int entry = dirty_tx % NUM_TX_DESC;
6698 struct ring_info *tx_skb = tp->tx_skb + entry;
6702 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
6703 if (status & DescOwn)
6706 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
6707 tp->TxDescArray + entry);
6708 if (status & LastFrag) {
6709 u64_stats_update_begin(&tp->tx_stats.syncp);
6710 tp->tx_stats.packets++;
6711 tp->tx_stats.bytes += tx_skb->skb->len;
6712 u64_stats_update_end(&tp->tx_stats.syncp);
6713 dev_kfree_skb_any(tx_skb->skb);
6720 if (tp->dirty_tx != dirty_tx) {
6721 tp->dirty_tx = dirty_tx;
6722 /* Sync with rtl8169_start_xmit:
6723 * - publish dirty_tx ring index (write barrier)
6724 * - refresh cur_tx ring index and queue status (read barrier)
6725 * May the current thread miss the stopped queue condition,
6726 * a racing xmit thread can only have a right view of the
6730 if (netif_queue_stopped(dev) &&
6731 TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
6732 netif_wake_queue(dev);
6735 * 8168 hack: TxPoll requests are lost when the Tx packets are
6736 * too close. Let's kick an extra TxPoll request when a burst
6737 * of start_xmit activity is detected (if it is not detected,
6738 * it is slow enough). -- FR
6740 if (tp->cur_tx != dirty_tx) {
6741 void __iomem *ioaddr = tp->mmio_addr;
6743 RTL_W8(TxPoll, NPQ);
6748 static inline int rtl8169_fragmented_frame(u32 status)
6750 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
6753 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
6755 u32 status = opts1 & RxProtoMask;
6757 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
6758 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
6759 skb->ip_summed = CHECKSUM_UNNECESSARY;
6761 skb_checksum_none_assert(skb);
6764 static struct sk_buff *rtl8169_try_rx_copy(void *data,
6765 struct rtl8169_private *tp,
6769 struct sk_buff *skb;
6770 struct device *d = &tp->pci_dev->dev;
6772 data = rtl8169_align(data);
6773 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
6775 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
6777 memcpy(skb->data, data, pkt_size);
6778 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
6783 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
6785 unsigned int cur_rx, rx_left;
6788 cur_rx = tp->cur_rx;
6790 for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
6791 unsigned int entry = cur_rx % NUM_RX_DESC;
6792 struct RxDesc *desc = tp->RxDescArray + entry;
6796 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
6798 if (status & DescOwn)
6800 if (unlikely(status & RxRES)) {
6801 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
6803 dev->stats.rx_errors++;
6804 if (status & (RxRWT | RxRUNT))
6805 dev->stats.rx_length_errors++;
6807 dev->stats.rx_crc_errors++;
6808 if (status & RxFOVF) {
6809 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6810 dev->stats.rx_fifo_errors++;
6812 if ((status & (RxRUNT | RxCRC)) &&
6813 !(status & (RxRWT | RxFOVF)) &&
6814 (dev->features & NETIF_F_RXALL))
6817 struct sk_buff *skb;
6822 addr = le64_to_cpu(desc->addr);
6823 if (likely(!(dev->features & NETIF_F_RXFCS)))
6824 pkt_size = (status & 0x00003fff) - 4;
6826 pkt_size = status & 0x00003fff;
6829 * The driver does not support incoming fragmented
6830 * frames. They are seen as a symptom of over-mtu
6833 if (unlikely(rtl8169_fragmented_frame(status))) {
6834 dev->stats.rx_dropped++;
6835 dev->stats.rx_length_errors++;
6836 goto release_descriptor;
6839 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
6840 tp, pkt_size, addr);
6842 dev->stats.rx_dropped++;
6843 goto release_descriptor;
6846 rtl8169_rx_csum(skb, status);
6847 skb_put(skb, pkt_size);
6848 skb->protocol = eth_type_trans(skb, dev);
6850 rtl8169_rx_vlan_tag(desc, skb);
6852 napi_gro_receive(&tp->napi, skb);
6854 u64_stats_update_begin(&tp->rx_stats.syncp);
6855 tp->rx_stats.packets++;
6856 tp->rx_stats.bytes += pkt_size;
6857 u64_stats_update_end(&tp->rx_stats.syncp);
6862 rtl8169_mark_to_asic(desc, rx_buf_sz);
6865 count = cur_rx - tp->cur_rx;
6866 tp->cur_rx = cur_rx;
6871 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6873 struct net_device *dev = dev_instance;
6874 struct rtl8169_private *tp = netdev_priv(dev);
6878 status = rtl_get_events(tp);
6879 if (status && status != 0xffff) {
6880 status &= RTL_EVENT_NAPI | tp->event_slow;
6884 rtl_irq_disable(tp);
6885 napi_schedule(&tp->napi);
6888 return IRQ_RETVAL(handled);
6892 * Workqueue context.
6894 static void rtl_slow_event_work(struct rtl8169_private *tp)
6896 struct net_device *dev = tp->dev;
6899 status = rtl_get_events(tp) & tp->event_slow;
6900 rtl_ack_events(tp, status);
6902 if (unlikely(status & RxFIFOOver)) {
6903 switch (tp->mac_version) {
6904 /* Work around for rx fifo overflow */
6905 case RTL_GIGA_MAC_VER_11:
6906 netif_stop_queue(dev);
6907 /* XXX - Hack alert. See rtl_task(). */
6908 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
6914 if (unlikely(status & SYSErr))
6915 rtl8169_pcierr_interrupt(dev);
6917 if (status & LinkChg)
6918 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
6920 rtl_irq_enable_all(tp);
6923 static void rtl_task(struct work_struct *work)
6925 static const struct {
6927 void (*action)(struct rtl8169_private *);
6929 /* XXX - keep rtl_slow_event_work() as first element. */
6930 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
6931 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
6932 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
6934 struct rtl8169_private *tp =
6935 container_of(work, struct rtl8169_private, wk.work);
6936 struct net_device *dev = tp->dev;
6941 if (!netif_running(dev) ||
6942 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
6945 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
6948 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
6950 rtl_work[i].action(tp);
6954 rtl_unlock_work(tp);
6957 static int rtl8169_poll(struct napi_struct *napi, int budget)
6959 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
6960 struct net_device *dev = tp->dev;
6961 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
6965 status = rtl_get_events(tp);
6966 rtl_ack_events(tp, status & ~tp->event_slow);
6968 if (status & RTL_EVENT_NAPI_RX)
6969 work_done = rtl_rx(dev, tp, (u32) budget);
6971 if (status & RTL_EVENT_NAPI_TX)
6974 if (status & tp->event_slow) {
6975 enable_mask &= ~tp->event_slow;
6977 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
6980 if (work_done < budget) {
6981 napi_complete(napi);
6983 rtl_irq_enable(tp, enable_mask);
6990 static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
6992 struct rtl8169_private *tp = netdev_priv(dev);
6994 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
6997 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
6998 RTL_W32(RxMissed, 0);
7001 static void rtl8169_down(struct net_device *dev)
7003 struct rtl8169_private *tp = netdev_priv(dev);
7004 void __iomem *ioaddr = tp->mmio_addr;
7006 del_timer_sync(&tp->timer);
7008 napi_disable(&tp->napi);
7009 netif_stop_queue(dev);
7011 rtl8169_hw_reset(tp);
7013 * At this point device interrupts can not be enabled in any function,
7014 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
7015 * and napi is disabled (rtl8169_poll).
7017 rtl8169_rx_missed(dev, ioaddr);
7019 /* Give a racing hard_start_xmit a few cycles to complete. */
7020 synchronize_sched();
7022 rtl8169_tx_clear(tp);
7024 rtl8169_rx_clear(tp);
7026 rtl_pll_power_down(tp);
7029 static int rtl8169_close(struct net_device *dev)
7031 struct rtl8169_private *tp = netdev_priv(dev);
7032 struct pci_dev *pdev = tp->pci_dev;
7034 pm_runtime_get_sync(&pdev->dev);
7036 /* Update counters before going down */
7037 rtl8169_update_counters(dev);
7040 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
7043 rtl_unlock_work(tp);
7045 cancel_work_sync(&tp->wk.work);
7047 free_irq(pdev->irq, dev);
7049 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
7051 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
7053 tp->TxDescArray = NULL;
7054 tp->RxDescArray = NULL;
7056 pm_runtime_put_sync(&pdev->dev);
7061 #ifdef CONFIG_NET_POLL_CONTROLLER
7062 static void rtl8169_netpoll(struct net_device *dev)
7064 struct rtl8169_private *tp = netdev_priv(dev);
7066 rtl8169_interrupt(tp->pci_dev->irq, dev);
7070 static int rtl_open(struct net_device *dev)
7072 struct rtl8169_private *tp = netdev_priv(dev);
7073 void __iomem *ioaddr = tp->mmio_addr;
7074 struct pci_dev *pdev = tp->pci_dev;
7075 int retval = -ENOMEM;
7077 pm_runtime_get_sync(&pdev->dev);
7080 * Rx and Tx descriptors needs 256 bytes alignment.
7081 * dma_alloc_coherent provides more.
7083 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
7084 &tp->TxPhyAddr, GFP_KERNEL);
7085 if (!tp->TxDescArray)
7086 goto err_pm_runtime_put;
7088 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
7089 &tp->RxPhyAddr, GFP_KERNEL);
7090 if (!tp->RxDescArray)
7093 retval = rtl8169_init_ring(dev);
7097 INIT_WORK(&tp->wk.work, rtl_task);
7101 rtl_request_firmware(tp);
7103 retval = request_irq(pdev->irq, rtl8169_interrupt,
7104 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
7107 goto err_release_fw_2;
7111 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
7113 napi_enable(&tp->napi);
7115 rtl8169_init_phy(dev, tp);
7117 __rtl8169_set_features(dev, dev->features);
7119 rtl_pll_power_up(tp);
7123 netif_start_queue(dev);
7125 rtl_unlock_work(tp);
7127 tp->saved_wolopts = 0;
7128 pm_runtime_put_noidle(&pdev->dev);
7130 rtl8169_check_link_status(dev, tp, ioaddr);
7135 rtl_release_firmware(tp);
7136 rtl8169_rx_clear(tp);
7138 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
7140 tp->RxDescArray = NULL;
7142 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
7144 tp->TxDescArray = NULL;
7146 pm_runtime_put_noidle(&pdev->dev);
7150 static struct rtnl_link_stats64 *
7151 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7153 struct rtl8169_private *tp = netdev_priv(dev);
7154 void __iomem *ioaddr = tp->mmio_addr;
7157 if (netif_running(dev))
7158 rtl8169_rx_missed(dev, ioaddr);
7161 start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
7162 stats->rx_packets = tp->rx_stats.packets;
7163 stats->rx_bytes = tp->rx_stats.bytes;
7164 } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
7168 start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
7169 stats->tx_packets = tp->tx_stats.packets;
7170 stats->tx_bytes = tp->tx_stats.bytes;
7171 } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
7173 stats->rx_dropped = dev->stats.rx_dropped;
7174 stats->tx_dropped = dev->stats.tx_dropped;
7175 stats->rx_length_errors = dev->stats.rx_length_errors;
7176 stats->rx_errors = dev->stats.rx_errors;
7177 stats->rx_crc_errors = dev->stats.rx_crc_errors;
7178 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
7179 stats->rx_missed_errors = dev->stats.rx_missed_errors;
7184 static void rtl8169_net_suspend(struct net_device *dev)
7186 struct rtl8169_private *tp = netdev_priv(dev);
7188 if (!netif_running(dev))
7191 netif_device_detach(dev);
7192 netif_stop_queue(dev);
7195 napi_disable(&tp->napi);
7196 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
7197 rtl_unlock_work(tp);
7199 rtl_pll_power_down(tp);
7204 static int rtl8169_suspend(struct device *device)
7206 struct pci_dev *pdev = to_pci_dev(device);
7207 struct net_device *dev = pci_get_drvdata(pdev);
7209 rtl8169_net_suspend(dev);
7214 static void __rtl8169_resume(struct net_device *dev)
7216 struct rtl8169_private *tp = netdev_priv(dev);
7218 netif_device_attach(dev);
7220 rtl_pll_power_up(tp);
7223 napi_enable(&tp->napi);
7224 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
7225 rtl_unlock_work(tp);
7227 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
7230 static int rtl8169_resume(struct device *device)
7232 struct pci_dev *pdev = to_pci_dev(device);
7233 struct net_device *dev = pci_get_drvdata(pdev);
7234 struct rtl8169_private *tp = netdev_priv(dev);
7236 rtl8169_init_phy(dev, tp);
7238 if (netif_running(dev))
7239 __rtl8169_resume(dev);
7244 static int rtl8169_runtime_suspend(struct device *device)
7246 struct pci_dev *pdev = to_pci_dev(device);
7247 struct net_device *dev = pci_get_drvdata(pdev);
7248 struct rtl8169_private *tp = netdev_priv(dev);
7250 if (!tp->TxDescArray)
7254 tp->saved_wolopts = __rtl8169_get_wol(tp);
7255 __rtl8169_set_wol(tp, WAKE_ANY);
7256 rtl_unlock_work(tp);
7258 rtl8169_net_suspend(dev);
7263 static int rtl8169_runtime_resume(struct device *device)
7265 struct pci_dev *pdev = to_pci_dev(device);
7266 struct net_device *dev = pci_get_drvdata(pdev);
7267 struct rtl8169_private *tp = netdev_priv(dev);
7269 if (!tp->TxDescArray)
7273 __rtl8169_set_wol(tp, tp->saved_wolopts);
7274 tp->saved_wolopts = 0;
7275 rtl_unlock_work(tp);
7277 rtl8169_init_phy(dev, tp);
7279 __rtl8169_resume(dev);
7284 static int rtl8169_runtime_idle(struct device *device)
7286 struct pci_dev *pdev = to_pci_dev(device);
7287 struct net_device *dev = pci_get_drvdata(pdev);
7288 struct rtl8169_private *tp = netdev_priv(dev);
7290 return tp->TxDescArray ? -EBUSY : 0;
7293 static const struct dev_pm_ops rtl8169_pm_ops = {
7294 .suspend = rtl8169_suspend,
7295 .resume = rtl8169_resume,
7296 .freeze = rtl8169_suspend,
7297 .thaw = rtl8169_resume,
7298 .poweroff = rtl8169_suspend,
7299 .restore = rtl8169_resume,
7300 .runtime_suspend = rtl8169_runtime_suspend,
7301 .runtime_resume = rtl8169_runtime_resume,
7302 .runtime_idle = rtl8169_runtime_idle,
7305 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
7307 #else /* !CONFIG_PM */
7309 #define RTL8169_PM_OPS NULL
7311 #endif /* !CONFIG_PM */
7313 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
7315 void __iomem *ioaddr = tp->mmio_addr;
7317 /* WoL fails with 8168b when the receiver is disabled. */
7318 switch (tp->mac_version) {
7319 case RTL_GIGA_MAC_VER_11:
7320 case RTL_GIGA_MAC_VER_12:
7321 case RTL_GIGA_MAC_VER_17:
7322 pci_clear_master(tp->pci_dev);
7324 RTL_W8(ChipCmd, CmdRxEnb);
7333 static void rtl_shutdown(struct pci_dev *pdev)
7335 struct net_device *dev = pci_get_drvdata(pdev);
7336 struct rtl8169_private *tp = netdev_priv(dev);
7337 struct device *d = &pdev->dev;
7339 pm_runtime_get_sync(d);
7341 rtl8169_net_suspend(dev);
7343 /* Restore original MAC address */
7344 rtl_rar_set(tp, dev->perm_addr);
7346 rtl8169_hw_reset(tp);
7348 if (system_state == SYSTEM_POWER_OFF) {
7349 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
7350 rtl_wol_suspend_quirk(tp);
7351 rtl_wol_shutdown_quirk(tp);
7354 pci_wake_from_d3(pdev, true);
7355 pci_set_power_state(pdev, PCI_D3hot);
7358 pm_runtime_put_noidle(d);
7361 static void rtl_remove_one(struct pci_dev *pdev)
7363 struct net_device *dev = pci_get_drvdata(pdev);
7364 struct rtl8169_private *tp = netdev_priv(dev);
7366 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
7367 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
7368 tp->mac_version == RTL_GIGA_MAC_VER_31) {
7369 rtl8168_driver_stop(tp);
7372 netif_napi_del(&tp->napi);
7374 unregister_netdev(dev);
7376 rtl_release_firmware(tp);
7378 if (pci_dev_run_wake(pdev))
7379 pm_runtime_get_noresume(&pdev->dev);
7381 /* restore original MAC address */
7382 rtl_rar_set(tp, dev->perm_addr);
7384 rtl_disable_msi(pdev, tp);
7385 rtl8169_release_board(pdev, dev, tp->mmio_addr);
7388 static const struct net_device_ops rtl_netdev_ops = {
7389 .ndo_open = rtl_open,
7390 .ndo_stop = rtl8169_close,
7391 .ndo_get_stats64 = rtl8169_get_stats64,
7392 .ndo_start_xmit = rtl8169_start_xmit,
7393 .ndo_tx_timeout = rtl8169_tx_timeout,
7394 .ndo_validate_addr = eth_validate_addr,
7395 .ndo_change_mtu = rtl8169_change_mtu,
7396 .ndo_fix_features = rtl8169_fix_features,
7397 .ndo_set_features = rtl8169_set_features,
7398 .ndo_set_mac_address = rtl_set_mac_address,
7399 .ndo_do_ioctl = rtl8169_ioctl,
7400 .ndo_set_rx_mode = rtl_set_rx_mode,
7401 #ifdef CONFIG_NET_POLL_CONTROLLER
7402 .ndo_poll_controller = rtl8169_netpoll,
7407 static const struct rtl_cfg_info {
7408 void (*hw_start)(struct net_device *);
7409 unsigned int region;
7414 } rtl_cfg_infos [] = {
7416 .hw_start = rtl_hw_start_8169,
7419 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
7420 .features = RTL_FEATURE_GMII,
7421 .default_ver = RTL_GIGA_MAC_VER_01,
7424 .hw_start = rtl_hw_start_8168,
7427 .event_slow = SYSErr | LinkChg | RxOverflow,
7428 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
7429 .default_ver = RTL_GIGA_MAC_VER_11,
7432 .hw_start = rtl_hw_start_8101,
7435 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
7437 .features = RTL_FEATURE_MSI,
7438 .default_ver = RTL_GIGA_MAC_VER_13,
7442 /* Cfg9346_Unlock assumed. */
7443 static unsigned rtl_try_msi(struct rtl8169_private *tp,
7444 const struct rtl_cfg_info *cfg)
7446 void __iomem *ioaddr = tp->mmio_addr;
7450 cfg2 = RTL_R8(Config2) & ~MSIEnable;
7451 if (cfg->features & RTL_FEATURE_MSI) {
7452 if (pci_enable_msi(tp->pci_dev)) {
7453 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
7456 msi = RTL_FEATURE_MSI;
7459 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
7460 RTL_W8(Config2, cfg2);
7464 DECLARE_RTL_COND(rtl_link_list_ready_cond)
7466 void __iomem *ioaddr = tp->mmio_addr;
7468 return RTL_R8(MCU) & LINK_LIST_RDY;
7471 DECLARE_RTL_COND(rtl_rxtx_empty_cond)
7473 void __iomem *ioaddr = tp->mmio_addr;
7475 return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
7478 static void rtl_hw_init_8168g(struct rtl8169_private *tp)
7480 void __iomem *ioaddr = tp->mmio_addr;
7483 tp->ocp_base = OCP_STD_PHY_BASE;
7485 RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
7487 if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
7490 if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
7493 RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
7495 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
7497 data = r8168_mac_ocp_read(tp, 0xe8de);
7499 r8168_mac_ocp_write(tp, 0xe8de, data);
7501 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
7504 data = r8168_mac_ocp_read(tp, 0xe8de);
7506 r8168_mac_ocp_write(tp, 0xe8de, data);
7508 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
7512 static void rtl_hw_initialize(struct rtl8169_private *tp)
7514 switch (tp->mac_version) {
7515 case RTL_GIGA_MAC_VER_40:
7516 case RTL_GIGA_MAC_VER_41:
7517 case RTL_GIGA_MAC_VER_42:
7518 case RTL_GIGA_MAC_VER_43:
7519 case RTL_GIGA_MAC_VER_44:
7520 case RTL_GIGA_MAC_VER_45:
7521 case RTL_GIGA_MAC_VER_46:
7522 case RTL_GIGA_MAC_VER_47:
7523 case RTL_GIGA_MAC_VER_48:
7524 rtl_hw_init_8168g(tp);
7533 rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7535 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
7536 const unsigned int region = cfg->region;
7537 struct rtl8169_private *tp;
7538 struct mii_if_info *mii;
7539 struct net_device *dev;
7540 void __iomem *ioaddr;
7544 if (netif_msg_drv(&debug)) {
7545 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
7546 MODULENAME, RTL8169_VERSION);
7549 dev = alloc_etherdev(sizeof (*tp));
7555 SET_NETDEV_DEV(dev, &pdev->dev);
7556 dev->netdev_ops = &rtl_netdev_ops;
7557 tp = netdev_priv(dev);
7560 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
7564 mii->mdio_read = rtl_mdio_read;
7565 mii->mdio_write = rtl_mdio_write;
7566 mii->phy_id_mask = 0x1f;
7567 mii->reg_num_mask = 0x1f;
7568 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
7570 /* disable ASPM completely as that cause random device stop working
7571 * problems as well as full system hangs for some PCIe devices users */
7572 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
7573 PCIE_LINK_STATE_CLKPM);
7575 /* enable device (incl. PCI PM wakeup and hotplug setup) */
7576 rc = pci_enable_device(pdev);
7578 netif_err(tp, probe, dev, "enable failure\n");
7579 goto err_out_free_dev_1;
7582 if (pci_set_mwi(pdev) < 0)
7583 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
7585 /* make sure PCI base addr 1 is MMIO */
7586 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
7587 netif_err(tp, probe, dev,
7588 "region #%d not an MMIO resource, aborting\n",
7594 /* check for weird/broken PCI region reporting */
7595 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
7596 netif_err(tp, probe, dev,
7597 "Invalid PCI region size(s), aborting\n");
7602 rc = pci_request_regions(pdev, MODULENAME);
7604 netif_err(tp, probe, dev, "could not request regions\n");
7608 tp->cp_cmd = RxChkSum;
7610 if ((sizeof(dma_addr_t) > 4) &&
7611 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
7612 tp->cp_cmd |= PCIDAC;
7613 dev->features |= NETIF_F_HIGHDMA;
7615 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7617 netif_err(tp, probe, dev, "DMA configuration failed\n");
7618 goto err_out_free_res_3;
7622 /* ioremap MMIO region */
7623 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
7625 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
7627 goto err_out_free_res_3;
7629 tp->mmio_addr = ioaddr;
7631 if (!pci_is_pcie(pdev))
7632 netif_info(tp, probe, dev, "not PCI Express\n");
7634 /* Identify chip attached to board */
7635 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
7639 rtl_irq_disable(tp);
7641 rtl_hw_initialize(tp);
7645 rtl_ack_events(tp, 0xffff);
7647 pci_set_master(pdev);
7650 * Pretend we are using VLANs; This bypasses a nasty bug where
7651 * Interrupts stop flowing on high load on 8110SCd controllers.
7653 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
7654 tp->cp_cmd |= RxVlan;
7656 rtl_init_mdio_ops(tp);
7657 rtl_init_pll_power_ops(tp);
7658 rtl_init_jumbo_ops(tp);
7659 rtl_init_csi_ops(tp);
7661 rtl8169_print_mac_version(tp);
7663 chipset = tp->mac_version;
7664 tp->txd_version = rtl_chip_infos[chipset].txd_version;
7666 RTL_W8(Cfg9346, Cfg9346_Unlock);
7667 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
7668 RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus));
7669 switch (tp->mac_version) {
7670 case RTL_GIGA_MAC_VER_45:
7671 case RTL_GIGA_MAC_VER_46:
7672 if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2)
7673 tp->features |= RTL_FEATURE_WOL;
7674 if ((RTL_R8(Config3) & LinkUp) != 0)
7675 tp->features |= RTL_FEATURE_WOL;
7678 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
7679 tp->features |= RTL_FEATURE_WOL;
7682 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
7683 tp->features |= RTL_FEATURE_WOL;
7684 tp->features |= rtl_try_msi(tp, cfg);
7685 RTL_W8(Cfg9346, Cfg9346_Lock);
7687 if (rtl_tbi_enabled(tp)) {
7688 tp->set_speed = rtl8169_set_speed_tbi;
7689 tp->get_settings = rtl8169_gset_tbi;
7690 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
7691 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
7692 tp->link_ok = rtl8169_tbi_link_ok;
7693 tp->do_ioctl = rtl_tbi_ioctl;
7695 tp->set_speed = rtl8169_set_speed_xmii;
7696 tp->get_settings = rtl8169_gset_xmii;
7697 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
7698 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
7699 tp->link_ok = rtl8169_xmii_link_ok;
7700 tp->do_ioctl = rtl_xmii_ioctl;
7703 mutex_init(&tp->wk.mutex);
7704 u64_stats_init(&tp->rx_stats.syncp);
7705 u64_stats_init(&tp->tx_stats.syncp);
7707 /* Get MAC address */
7708 if (tp->mac_version == RTL_GIGA_MAC_VER_45 ||
7709 tp->mac_version == RTL_GIGA_MAC_VER_46 ||
7710 tp->mac_version == RTL_GIGA_MAC_VER_47 ||
7711 tp->mac_version == RTL_GIGA_MAC_VER_48) {
7714 *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xE0, ERIAR_EXGMAC);
7715 *(u16 *)&mac_addr[2] = rtl_eri_read(tp, 0xE4, ERIAR_EXGMAC);
7717 if (is_valid_ether_addr((u8 *)mac_addr))
7718 rtl_rar_set(tp, (u8 *)mac_addr);
7720 for (i = 0; i < ETH_ALEN; i++)
7721 dev->dev_addr[i] = RTL_R8(MAC0 + i);
7723 dev->ethtool_ops = &rtl8169_ethtool_ops;
7724 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
7726 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
7728 /* don't enable SG, IP_CSUM and TSO by default - it might not work
7729 * properly for all devices */
7730 dev->features |= NETIF_F_RXCSUM |
7731 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
7733 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
7734 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
7735 NETIF_F_HW_VLAN_CTAG_RX;
7736 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
7739 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
7740 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
7741 dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
7743 if (tp->txd_version == RTL_TD_0)
7744 tp->tso_csum = rtl8169_tso_csum_v1;
7745 else if (tp->txd_version == RTL_TD_1) {
7746 tp->tso_csum = rtl8169_tso_csum_v2;
7747 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
7751 dev->hw_features |= NETIF_F_RXALL;
7752 dev->hw_features |= NETIF_F_RXFCS;
7754 tp->hw_start = cfg->hw_start;
7755 tp->event_slow = cfg->event_slow;
7757 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
7758 ~(RxBOVF | RxFOVF) : ~0;
7760 init_timer(&tp->timer);
7761 tp->timer.data = (unsigned long) dev;
7762 tp->timer.function = rtl8169_phy_timer;
7764 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
7766 rc = register_netdev(dev);
7770 pci_set_drvdata(pdev, dev);
7772 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
7773 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
7774 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
7775 if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
7776 netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
7777 "tx checksumming: %s]\n",
7778 rtl_chip_infos[chipset].jumbo_max,
7779 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
7782 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
7783 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
7784 tp->mac_version == RTL_GIGA_MAC_VER_31) {
7785 rtl8168_driver_start(tp);
7788 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
7790 if (pci_dev_run_wake(pdev))
7791 pm_runtime_put_noidle(&pdev->dev);
7793 netif_carrier_off(dev);
7799 netif_napi_del(&tp->napi);
7800 rtl_disable_msi(pdev, tp);
7803 pci_release_regions(pdev);
7805 pci_clear_mwi(pdev);
7806 pci_disable_device(pdev);
7812 static struct pci_driver rtl8169_pci_driver = {
7814 .id_table = rtl8169_pci_tbl,
7815 .probe = rtl_init_one,
7816 .remove = rtl_remove_one,
7817 .shutdown = rtl_shutdown,
7818 .driver.pm = RTL8169_PM_OPS,
7821 module_pci_driver(rtl8169_pci_driver);