1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/timer.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/init.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
60 #define DRV_MODULE_NAME "bnx2"
61 #define DRV_MODULE_VERSION "2.2.4"
62 #define DRV_MODULE_RELDATE "Aug 05, 2013"
63 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
64 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
65 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
66 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
67 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
69 #define RUN_AT(x) (jiffies + (x))
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT (5*HZ)
74 static char version[] =
75 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
87 static int disable_msi = 0;
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
106 /* indexed by board_t, above */
110 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 { "HP NC370T Multifunction Gigabit Server Adapter" },
112 { "HP NC370i Multifunction Gigabit Server Adapter" },
113 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 { "HP NC370F Multifunction Gigabit Server Adapter" },
115 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142 { PCI_VENDOR_ID_BROADCOM, 0x163b,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144 { PCI_VENDOR_ID_BROADCOM, 0x163c,
145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
149 static const struct flash_spec flash_table[] =
151 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
154 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
158 /* Expansion entry 0001 */
159 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163 /* Saifun SA25F010 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
165 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168 "Non-buffered flash (128kB)"},
169 /* Saifun SA25F020 (non-buffered flash) */
170 /* strap, cfg1, & write1 need updates */
171 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174 "Non-buffered flash (256kB)"},
175 /* Expansion entry 0100 */
176 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 /* Saifun SA25F005 (non-buffered flash) */
191 /* strap, cfg1, & write1 need updates */
192 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195 "Non-buffered flash (64kB)"},
197 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
201 /* Expansion entry 1001 */
202 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* Expansion entry 1010 */
207 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
211 /* ATMEL AT45DB011B (buffered flash) */
212 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215 "Buffered flash (128kB)"},
216 /* Expansion entry 1100 */
217 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221 /* Expansion entry 1101 */
222 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
226 /* Ateml Expansion entry 1110 */
227 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230 "Entry 1110 (Atmel)"},
231 /* ATMEL AT45DB021B (buffered flash) */
232 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235 "Buffered flash (256kB)"},
238 static const struct flash_spec flash_5709 = {
239 .flags = BNX2_NV_BUFFERED,
240 .page_bits = BCM5709_FLASH_PAGE_BITS,
241 .page_size = BCM5709_FLASH_PAGE_SIZE,
242 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
243 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
244 .name = "5709 Buffered flash (256kB)",
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
256 /* Tell compiler to fetch tx_prod and tx_cons from memory. */
259 /* The ring uses 256 indices for 255 entries, one of them
260 * needs to be skipped.
262 diff = txr->tx_prod - txr->tx_cons;
263 if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
265 if (diff == BNX2_TX_DESC_CNT)
266 diff = BNX2_MAX_TX_DESC_CNT;
268 return bp->tx_ring_size - diff;
272 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
276 spin_lock_bh(&bp->indirect_lock);
277 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278 val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
279 spin_unlock_bh(&bp->indirect_lock);
284 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
286 spin_lock_bh(&bp->indirect_lock);
287 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289 spin_unlock_bh(&bp->indirect_lock);
293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
295 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
301 return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
308 spin_lock_bh(&bp->indirect_lock);
309 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
312 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
313 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
314 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315 for (i = 0; i < 5; i++) {
316 val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
317 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
322 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
323 BNX2_WR(bp, BNX2_CTX_DATA, val);
325 spin_unlock_bh(&bp->indirect_lock);
330 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
332 struct bnx2 *bp = netdev_priv(dev);
333 struct drv_ctl_io *io = &info->data.io;
336 case DRV_CTL_IO_WR_CMD:
337 bnx2_reg_wr_ind(bp, io->offset, io->data);
339 case DRV_CTL_IO_RD_CMD:
340 io->data = bnx2_reg_rd_ind(bp, io->offset);
342 case DRV_CTL_CTX_WR_CMD:
343 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
351 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
353 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
354 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
357 if (bp->flags & BNX2_FLAG_USING_MSIX) {
358 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
359 bnapi->cnic_present = 0;
360 sb_id = bp->irq_nvecs;
361 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
363 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
364 bnapi->cnic_tag = bnapi->last_status_idx;
365 bnapi->cnic_present = 1;
367 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
370 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
371 cp->irq_arr[0].status_blk = (void *)
372 ((unsigned long) bnapi->status_blk.msi +
373 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
374 cp->irq_arr[0].status_blk_num = sb_id;
378 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
381 struct bnx2 *bp = netdev_priv(dev);
382 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
387 if (cp->drv_state & CNIC_DRV_STATE_REGD)
390 if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
393 bp->cnic_data = data;
394 rcu_assign_pointer(bp->cnic_ops, ops);
397 cp->drv_state = CNIC_DRV_STATE_REGD;
399 bnx2_setup_cnic_irq_info(bp);
404 static int bnx2_unregister_cnic(struct net_device *dev)
406 struct bnx2 *bp = netdev_priv(dev);
407 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
408 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
410 mutex_lock(&bp->cnic_lock);
412 bnapi->cnic_present = 0;
413 RCU_INIT_POINTER(bp->cnic_ops, NULL);
414 mutex_unlock(&bp->cnic_lock);
419 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
421 struct bnx2 *bp = netdev_priv(dev);
422 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
424 if (!cp->max_iscsi_conn)
427 cp->drv_owner = THIS_MODULE;
428 cp->chip_id = bp->chip_id;
430 cp->io_base = bp->regview;
431 cp->drv_ctl = bnx2_drv_ctl;
432 cp->drv_register_cnic = bnx2_register_cnic;
433 cp->drv_unregister_cnic = bnx2_unregister_cnic;
439 bnx2_cnic_stop(struct bnx2 *bp)
441 struct cnic_ops *c_ops;
442 struct cnic_ctl_info info;
444 mutex_lock(&bp->cnic_lock);
445 c_ops = rcu_dereference_protected(bp->cnic_ops,
446 lockdep_is_held(&bp->cnic_lock));
448 info.cmd = CNIC_CTL_STOP_CMD;
449 c_ops->cnic_ctl(bp->cnic_data, &info);
451 mutex_unlock(&bp->cnic_lock);
455 bnx2_cnic_start(struct bnx2 *bp)
457 struct cnic_ops *c_ops;
458 struct cnic_ctl_info info;
460 mutex_lock(&bp->cnic_lock);
461 c_ops = rcu_dereference_protected(bp->cnic_ops,
462 lockdep_is_held(&bp->cnic_lock));
464 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
465 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
467 bnapi->cnic_tag = bnapi->last_status_idx;
469 info.cmd = CNIC_CTL_START_CMD;
470 c_ops->cnic_ctl(bp->cnic_data, &info);
472 mutex_unlock(&bp->cnic_lock);
478 bnx2_cnic_stop(struct bnx2 *bp)
483 bnx2_cnic_start(struct bnx2 *bp)
490 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
495 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
496 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
497 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
499 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
500 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
505 val1 = (bp->phy_addr << 21) | (reg << 16) |
506 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
507 BNX2_EMAC_MDIO_COMM_START_BUSY;
508 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
510 for (i = 0; i < 50; i++) {
513 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
514 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
517 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
518 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
524 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
533 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
534 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
535 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
537 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
538 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
547 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
552 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
553 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
554 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
556 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
557 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
562 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
563 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
564 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
565 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
567 for (i = 0; i < 50; i++) {
570 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
571 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
577 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
582 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
583 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
584 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
586 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
587 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
596 bnx2_disable_int(struct bnx2 *bp)
599 struct bnx2_napi *bnapi;
601 for (i = 0; i < bp->irq_nvecs; i++) {
602 bnapi = &bp->bnx2_napi[i];
603 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
606 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
610 bnx2_enable_int(struct bnx2 *bp)
613 struct bnx2_napi *bnapi;
615 for (i = 0; i < bp->irq_nvecs; i++) {
616 bnapi = &bp->bnx2_napi[i];
618 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
619 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
620 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
621 bnapi->last_status_idx);
623 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
624 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
625 bnapi->last_status_idx);
627 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
631 bnx2_disable_int_sync(struct bnx2 *bp)
635 atomic_inc(&bp->intr_sem);
636 if (!netif_running(bp->dev))
639 bnx2_disable_int(bp);
640 for (i = 0; i < bp->irq_nvecs; i++)
641 synchronize_irq(bp->irq_tbl[i].vector);
645 bnx2_napi_disable(struct bnx2 *bp)
649 for (i = 0; i < bp->irq_nvecs; i++)
650 napi_disable(&bp->bnx2_napi[i].napi);
654 bnx2_napi_enable(struct bnx2 *bp)
658 for (i = 0; i < bp->irq_nvecs; i++)
659 napi_enable(&bp->bnx2_napi[i].napi);
663 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
667 if (netif_running(bp->dev)) {
668 bnx2_napi_disable(bp);
669 netif_tx_disable(bp->dev);
671 bnx2_disable_int_sync(bp);
672 netif_carrier_off(bp->dev); /* prevent tx timeout */
676 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
678 if (atomic_dec_and_test(&bp->intr_sem)) {
679 if (netif_running(bp->dev)) {
680 netif_tx_wake_all_queues(bp->dev);
681 spin_lock_bh(&bp->phy_lock);
683 netif_carrier_on(bp->dev);
684 spin_unlock_bh(&bp->phy_lock);
685 bnx2_napi_enable(bp);
694 bnx2_free_tx_mem(struct bnx2 *bp)
698 for (i = 0; i < bp->num_tx_rings; i++) {
699 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
700 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
702 if (txr->tx_desc_ring) {
703 dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
705 txr->tx_desc_mapping);
706 txr->tx_desc_ring = NULL;
708 kfree(txr->tx_buf_ring);
709 txr->tx_buf_ring = NULL;
714 bnx2_free_rx_mem(struct bnx2 *bp)
718 for (i = 0; i < bp->num_rx_rings; i++) {
719 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
720 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
723 for (j = 0; j < bp->rx_max_ring; j++) {
724 if (rxr->rx_desc_ring[j])
725 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
726 rxr->rx_desc_ring[j],
727 rxr->rx_desc_mapping[j]);
728 rxr->rx_desc_ring[j] = NULL;
730 vfree(rxr->rx_buf_ring);
731 rxr->rx_buf_ring = NULL;
733 for (j = 0; j < bp->rx_max_pg_ring; j++) {
734 if (rxr->rx_pg_desc_ring[j])
735 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
736 rxr->rx_pg_desc_ring[j],
737 rxr->rx_pg_desc_mapping[j]);
738 rxr->rx_pg_desc_ring[j] = NULL;
740 vfree(rxr->rx_pg_ring);
741 rxr->rx_pg_ring = NULL;
746 bnx2_alloc_tx_mem(struct bnx2 *bp)
750 for (i = 0; i < bp->num_tx_rings; i++) {
751 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
752 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
754 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
755 if (txr->tx_buf_ring == NULL)
759 dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
760 &txr->tx_desc_mapping, GFP_KERNEL);
761 if (txr->tx_desc_ring == NULL)
768 bnx2_alloc_rx_mem(struct bnx2 *bp)
772 for (i = 0; i < bp->num_rx_rings; i++) {
773 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
774 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
778 vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
779 if (rxr->rx_buf_ring == NULL)
782 for (j = 0; j < bp->rx_max_ring; j++) {
783 rxr->rx_desc_ring[j] =
784 dma_alloc_coherent(&bp->pdev->dev,
786 &rxr->rx_desc_mapping[j],
788 if (rxr->rx_desc_ring[j] == NULL)
793 if (bp->rx_pg_ring_size) {
794 rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
796 if (rxr->rx_pg_ring == NULL)
801 for (j = 0; j < bp->rx_max_pg_ring; j++) {
802 rxr->rx_pg_desc_ring[j] =
803 dma_alloc_coherent(&bp->pdev->dev,
805 &rxr->rx_pg_desc_mapping[j],
807 if (rxr->rx_pg_desc_ring[j] == NULL)
816 bnx2_free_mem(struct bnx2 *bp)
819 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
821 bnx2_free_tx_mem(bp);
822 bnx2_free_rx_mem(bp);
824 for (i = 0; i < bp->ctx_pages; i++) {
825 if (bp->ctx_blk[i]) {
826 dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
828 bp->ctx_blk_mapping[i]);
829 bp->ctx_blk[i] = NULL;
832 if (bnapi->status_blk.msi) {
833 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
834 bnapi->status_blk.msi,
835 bp->status_blk_mapping);
836 bnapi->status_blk.msi = NULL;
837 bp->stats_blk = NULL;
842 bnx2_alloc_mem(struct bnx2 *bp)
844 int i, status_blk_size, err;
845 struct bnx2_napi *bnapi;
848 /* Combine status and statistics blocks into one allocation. */
849 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
850 if (bp->flags & BNX2_FLAG_MSIX_CAP)
851 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
852 BNX2_SBLK_MSIX_ALIGN_SIZE);
853 bp->status_stats_size = status_blk_size +
854 sizeof(struct statistics_block);
856 status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857 &bp->status_blk_mapping, GFP_KERNEL);
858 if (status_blk == NULL)
861 bnapi = &bp->bnx2_napi[0];
862 bnapi->status_blk.msi = status_blk;
863 bnapi->hw_tx_cons_ptr =
864 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
865 bnapi->hw_rx_cons_ptr =
866 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
867 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
868 for (i = 1; i < bp->irq_nvecs; i++) {
869 struct status_block_msix *sblk;
871 bnapi = &bp->bnx2_napi[i];
873 sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
874 bnapi->status_blk.msix = sblk;
875 bnapi->hw_tx_cons_ptr =
876 &sblk->status_tx_quick_consumer_index;
877 bnapi->hw_rx_cons_ptr =
878 &sblk->status_rx_quick_consumer_index;
879 bnapi->int_num = i << 24;
883 bp->stats_blk = status_blk + status_blk_size;
885 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
887 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
888 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
889 if (bp->ctx_pages == 0)
891 for (i = 0; i < bp->ctx_pages; i++) {
892 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
894 &bp->ctx_blk_mapping[i],
896 if (bp->ctx_blk[i] == NULL)
901 err = bnx2_alloc_rx_mem(bp);
905 err = bnx2_alloc_tx_mem(bp);
917 bnx2_report_fw_link(struct bnx2 *bp)
919 u32 fw_link_status = 0;
921 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
927 switch (bp->line_speed) {
929 if (bp->duplex == DUPLEX_HALF)
930 fw_link_status = BNX2_LINK_STATUS_10HALF;
932 fw_link_status = BNX2_LINK_STATUS_10FULL;
935 if (bp->duplex == DUPLEX_HALF)
936 fw_link_status = BNX2_LINK_STATUS_100HALF;
938 fw_link_status = BNX2_LINK_STATUS_100FULL;
941 if (bp->duplex == DUPLEX_HALF)
942 fw_link_status = BNX2_LINK_STATUS_1000HALF;
944 fw_link_status = BNX2_LINK_STATUS_1000FULL;
947 if (bp->duplex == DUPLEX_HALF)
948 fw_link_status = BNX2_LINK_STATUS_2500HALF;
950 fw_link_status = BNX2_LINK_STATUS_2500FULL;
954 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
957 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
959 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
960 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
962 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
963 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
964 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
966 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
970 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
972 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
976 bnx2_xceiver_str(struct bnx2 *bp)
978 return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
979 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
984 bnx2_report_link(struct bnx2 *bp)
987 netif_carrier_on(bp->dev);
988 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
989 bnx2_xceiver_str(bp),
991 bp->duplex == DUPLEX_FULL ? "full" : "half");
994 if (bp->flow_ctrl & FLOW_CTRL_RX) {
995 pr_cont(", receive ");
996 if (bp->flow_ctrl & FLOW_CTRL_TX)
997 pr_cont("& transmit ");
1000 pr_cont(", transmit ");
1002 pr_cont("flow control ON");
1006 netif_carrier_off(bp->dev);
1007 netdev_err(bp->dev, "NIC %s Link is Down\n",
1008 bnx2_xceiver_str(bp));
1011 bnx2_report_fw_link(bp);
1015 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1017 u32 local_adv, remote_adv;
1020 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1021 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1023 if (bp->duplex == DUPLEX_FULL) {
1024 bp->flow_ctrl = bp->req_flow_ctrl;
1029 if (bp->duplex != DUPLEX_FULL) {
1033 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1034 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1037 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1038 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1039 bp->flow_ctrl |= FLOW_CTRL_TX;
1040 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1041 bp->flow_ctrl |= FLOW_CTRL_RX;
1045 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1046 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1048 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1049 u32 new_local_adv = 0;
1050 u32 new_remote_adv = 0;
1052 if (local_adv & ADVERTISE_1000XPAUSE)
1053 new_local_adv |= ADVERTISE_PAUSE_CAP;
1054 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1055 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1056 if (remote_adv & ADVERTISE_1000XPAUSE)
1057 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1058 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1059 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1061 local_adv = new_local_adv;
1062 remote_adv = new_remote_adv;
1065 /* See Table 28B-3 of 802.3ab-1999 spec. */
1066 if (local_adv & ADVERTISE_PAUSE_CAP) {
1067 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1068 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1069 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1071 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1072 bp->flow_ctrl = FLOW_CTRL_RX;
1076 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1077 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1081 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1082 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1083 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1085 bp->flow_ctrl = FLOW_CTRL_TX;
1091 bnx2_5709s_linkup(struct bnx2 *bp)
1097 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1098 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1099 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1101 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1102 bp->line_speed = bp->req_line_speed;
1103 bp->duplex = bp->req_duplex;
1106 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1108 case MII_BNX2_GP_TOP_AN_SPEED_10:
1109 bp->line_speed = SPEED_10;
1111 case MII_BNX2_GP_TOP_AN_SPEED_100:
1112 bp->line_speed = SPEED_100;
1114 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1115 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1116 bp->line_speed = SPEED_1000;
1118 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1119 bp->line_speed = SPEED_2500;
1122 if (val & MII_BNX2_GP_TOP_AN_FD)
1123 bp->duplex = DUPLEX_FULL;
1125 bp->duplex = DUPLEX_HALF;
1130 bnx2_5708s_linkup(struct bnx2 *bp)
1135 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1136 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1137 case BCM5708S_1000X_STAT1_SPEED_10:
1138 bp->line_speed = SPEED_10;
1140 case BCM5708S_1000X_STAT1_SPEED_100:
1141 bp->line_speed = SPEED_100;
1143 case BCM5708S_1000X_STAT1_SPEED_1G:
1144 bp->line_speed = SPEED_1000;
1146 case BCM5708S_1000X_STAT1_SPEED_2G5:
1147 bp->line_speed = SPEED_2500;
1150 if (val & BCM5708S_1000X_STAT1_FD)
1151 bp->duplex = DUPLEX_FULL;
1153 bp->duplex = DUPLEX_HALF;
1159 bnx2_5706s_linkup(struct bnx2 *bp)
1161 u32 bmcr, local_adv, remote_adv, common;
1164 bp->line_speed = SPEED_1000;
1166 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1167 if (bmcr & BMCR_FULLDPLX) {
1168 bp->duplex = DUPLEX_FULL;
1171 bp->duplex = DUPLEX_HALF;
1174 if (!(bmcr & BMCR_ANENABLE)) {
1178 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1179 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1181 common = local_adv & remote_adv;
1182 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1184 if (common & ADVERTISE_1000XFULL) {
1185 bp->duplex = DUPLEX_FULL;
1188 bp->duplex = DUPLEX_HALF;
1196 bnx2_copper_linkup(struct bnx2 *bp)
1200 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1201 if (bmcr & BMCR_ANENABLE) {
1202 u32 local_adv, remote_adv, common;
1204 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1205 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1207 common = local_adv & (remote_adv >> 2);
1208 if (common & ADVERTISE_1000FULL) {
1209 bp->line_speed = SPEED_1000;
1210 bp->duplex = DUPLEX_FULL;
1212 else if (common & ADVERTISE_1000HALF) {
1213 bp->line_speed = SPEED_1000;
1214 bp->duplex = DUPLEX_HALF;
1217 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1218 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1220 common = local_adv & remote_adv;
1221 if (common & ADVERTISE_100FULL) {
1222 bp->line_speed = SPEED_100;
1223 bp->duplex = DUPLEX_FULL;
1225 else if (common & ADVERTISE_100HALF) {
1226 bp->line_speed = SPEED_100;
1227 bp->duplex = DUPLEX_HALF;
1229 else if (common & ADVERTISE_10FULL) {
1230 bp->line_speed = SPEED_10;
1231 bp->duplex = DUPLEX_FULL;
1233 else if (common & ADVERTISE_10HALF) {
1234 bp->line_speed = SPEED_10;
1235 bp->duplex = DUPLEX_HALF;
1244 if (bmcr & BMCR_SPEED100) {
1245 bp->line_speed = SPEED_100;
1248 bp->line_speed = SPEED_10;
1250 if (bmcr & BMCR_FULLDPLX) {
1251 bp->duplex = DUPLEX_FULL;
1254 bp->duplex = DUPLEX_HALF;
1262 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1264 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1266 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1267 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1270 if (bp->flow_ctrl & FLOW_CTRL_TX)
1271 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1273 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1277 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1282 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1285 bnx2_init_rx_context(bp, cid);
1290 bnx2_set_mac_link(struct bnx2 *bp)
1294 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1295 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1296 (bp->duplex == DUPLEX_HALF)) {
1297 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1300 /* Configure the EMAC mode register. */
1301 val = BNX2_RD(bp, BNX2_EMAC_MODE);
1303 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1304 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1305 BNX2_EMAC_MODE_25G_MODE);
1308 switch (bp->line_speed) {
1310 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1311 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1316 val |= BNX2_EMAC_MODE_PORT_MII;
1319 val |= BNX2_EMAC_MODE_25G_MODE;
1322 val |= BNX2_EMAC_MODE_PORT_GMII;
1327 val |= BNX2_EMAC_MODE_PORT_GMII;
1330 /* Set the MAC to operate in the appropriate duplex mode. */
1331 if (bp->duplex == DUPLEX_HALF)
1332 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1333 BNX2_WR(bp, BNX2_EMAC_MODE, val);
1335 /* Enable/disable rx PAUSE. */
1336 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1338 if (bp->flow_ctrl & FLOW_CTRL_RX)
1339 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1340 BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1342 /* Enable/disable tx PAUSE. */
1343 val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1344 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1346 if (bp->flow_ctrl & FLOW_CTRL_TX)
1347 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1348 BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1350 /* Acknowledge the interrupt. */
1351 BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1353 bnx2_init_all_rx_contexts(bp);
1357 bnx2_enable_bmsr1(struct bnx2 *bp)
1359 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1360 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1361 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1362 MII_BNX2_BLK_ADDR_GP_STATUS);
1366 bnx2_disable_bmsr1(struct bnx2 *bp)
1368 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1369 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1370 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1371 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1375 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1380 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1383 if (bp->autoneg & AUTONEG_SPEED)
1384 bp->advertising |= ADVERTISED_2500baseX_Full;
1386 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1387 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1389 bnx2_read_phy(bp, bp->mii_up1, &up1);
1390 if (!(up1 & BCM5708S_UP1_2G5)) {
1391 up1 |= BCM5708S_UP1_2G5;
1392 bnx2_write_phy(bp, bp->mii_up1, up1);
1396 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1397 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1398 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1404 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1409 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1412 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1413 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1415 bnx2_read_phy(bp, bp->mii_up1, &up1);
1416 if (up1 & BCM5708S_UP1_2G5) {
1417 up1 &= ~BCM5708S_UP1_2G5;
1418 bnx2_write_phy(bp, bp->mii_up1, up1);
1422 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1423 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1424 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1430 bnx2_enable_forced_2g5(struct bnx2 *bp)
1432 u32 uninitialized_var(bmcr);
1435 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1438 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1441 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1442 MII_BNX2_BLK_ADDR_SERDES_DIG);
1443 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1444 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1445 val |= MII_BNX2_SD_MISC1_FORCE |
1446 MII_BNX2_SD_MISC1_FORCE_2_5G;
1447 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1450 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1451 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1452 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1454 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1455 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1457 bmcr |= BCM5708S_BMCR_FORCE_2500;
1465 if (bp->autoneg & AUTONEG_SPEED) {
1466 bmcr &= ~BMCR_ANENABLE;
1467 if (bp->req_duplex == DUPLEX_FULL)
1468 bmcr |= BMCR_FULLDPLX;
1470 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1474 bnx2_disable_forced_2g5(struct bnx2 *bp)
1476 u32 uninitialized_var(bmcr);
1479 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1482 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1485 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1486 MII_BNX2_BLK_ADDR_SERDES_DIG);
1487 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1488 val &= ~MII_BNX2_SD_MISC1_FORCE;
1489 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1492 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1493 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1494 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1496 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1497 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1499 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1507 if (bp->autoneg & AUTONEG_SPEED)
1508 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1509 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1513 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1517 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1518 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1520 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1522 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1526 bnx2_set_link(struct bnx2 *bp)
1531 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1536 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1539 link_up = bp->link_up;
1541 bnx2_enable_bmsr1(bp);
1542 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1543 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1544 bnx2_disable_bmsr1(bp);
1546 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1547 (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1550 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1551 bnx2_5706s_force_link_dn(bp, 0);
1552 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1554 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1556 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1557 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1558 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1560 if ((val & BNX2_EMAC_STATUS_LINK) &&
1561 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1562 bmsr |= BMSR_LSTATUS;
1564 bmsr &= ~BMSR_LSTATUS;
1567 if (bmsr & BMSR_LSTATUS) {
1570 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1571 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1572 bnx2_5706s_linkup(bp);
1573 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1574 bnx2_5708s_linkup(bp);
1575 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1576 bnx2_5709s_linkup(bp);
1579 bnx2_copper_linkup(bp);
1581 bnx2_resolve_flow_ctrl(bp);
1584 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1585 (bp->autoneg & AUTONEG_SPEED))
1586 bnx2_disable_forced_2g5(bp);
1588 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1591 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1592 bmcr |= BMCR_ANENABLE;
1593 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1595 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1600 if (bp->link_up != link_up) {
1601 bnx2_report_link(bp);
1604 bnx2_set_mac_link(bp);
1610 bnx2_reset_phy(struct bnx2 *bp)
1615 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1617 #define PHY_RESET_MAX_WAIT 100
1618 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1621 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1622 if (!(reg & BMCR_RESET)) {
1627 if (i == PHY_RESET_MAX_WAIT) {
1634 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1638 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1639 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1641 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1642 adv = ADVERTISE_1000XPAUSE;
1645 adv = ADVERTISE_PAUSE_CAP;
1648 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1649 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1650 adv = ADVERTISE_1000XPSE_ASYM;
1653 adv = ADVERTISE_PAUSE_ASYM;
1656 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1657 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1658 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1661 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1667 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1670 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1671 __releases(&bp->phy_lock)
1672 __acquires(&bp->phy_lock)
1674 u32 speed_arg = 0, pause_adv;
1676 pause_adv = bnx2_phy_get_pause_adv(bp);
1678 if (bp->autoneg & AUTONEG_SPEED) {
1679 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1680 if (bp->advertising & ADVERTISED_10baseT_Half)
1681 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1682 if (bp->advertising & ADVERTISED_10baseT_Full)
1683 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1684 if (bp->advertising & ADVERTISED_100baseT_Half)
1685 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1686 if (bp->advertising & ADVERTISED_100baseT_Full)
1687 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1688 if (bp->advertising & ADVERTISED_1000baseT_Full)
1689 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1690 if (bp->advertising & ADVERTISED_2500baseX_Full)
1691 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1693 if (bp->req_line_speed == SPEED_2500)
1694 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1695 else if (bp->req_line_speed == SPEED_1000)
1696 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1697 else if (bp->req_line_speed == SPEED_100) {
1698 if (bp->req_duplex == DUPLEX_FULL)
1699 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1701 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1702 } else if (bp->req_line_speed == SPEED_10) {
1703 if (bp->req_duplex == DUPLEX_FULL)
1704 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1706 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1710 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1711 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1712 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1713 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1715 if (port == PORT_TP)
1716 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1717 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1719 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1721 spin_unlock_bh(&bp->phy_lock);
1722 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1723 spin_lock_bh(&bp->phy_lock);
1729 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1730 __releases(&bp->phy_lock)
1731 __acquires(&bp->phy_lock)
1736 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1737 return bnx2_setup_remote_phy(bp, port);
1739 if (!(bp->autoneg & AUTONEG_SPEED)) {
1741 int force_link_down = 0;
1743 if (bp->req_line_speed == SPEED_2500) {
1744 if (!bnx2_test_and_enable_2g5(bp))
1745 force_link_down = 1;
1746 } else if (bp->req_line_speed == SPEED_1000) {
1747 if (bnx2_test_and_disable_2g5(bp))
1748 force_link_down = 1;
1750 bnx2_read_phy(bp, bp->mii_adv, &adv);
1751 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1753 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1754 new_bmcr = bmcr & ~BMCR_ANENABLE;
1755 new_bmcr |= BMCR_SPEED1000;
1757 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1758 if (bp->req_line_speed == SPEED_2500)
1759 bnx2_enable_forced_2g5(bp);
1760 else if (bp->req_line_speed == SPEED_1000) {
1761 bnx2_disable_forced_2g5(bp);
1762 new_bmcr &= ~0x2000;
1765 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1766 if (bp->req_line_speed == SPEED_2500)
1767 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1769 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1772 if (bp->req_duplex == DUPLEX_FULL) {
1773 adv |= ADVERTISE_1000XFULL;
1774 new_bmcr |= BMCR_FULLDPLX;
1777 adv |= ADVERTISE_1000XHALF;
1778 new_bmcr &= ~BMCR_FULLDPLX;
1780 if ((new_bmcr != bmcr) || (force_link_down)) {
1781 /* Force a link down visible on the other side */
1783 bnx2_write_phy(bp, bp->mii_adv, adv &
1784 ~(ADVERTISE_1000XFULL |
1785 ADVERTISE_1000XHALF));
1786 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1787 BMCR_ANRESTART | BMCR_ANENABLE);
1790 netif_carrier_off(bp->dev);
1791 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1792 bnx2_report_link(bp);
1794 bnx2_write_phy(bp, bp->mii_adv, adv);
1795 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1797 bnx2_resolve_flow_ctrl(bp);
1798 bnx2_set_mac_link(bp);
1803 bnx2_test_and_enable_2g5(bp);
1805 if (bp->advertising & ADVERTISED_1000baseT_Full)
1806 new_adv |= ADVERTISE_1000XFULL;
1808 new_adv |= bnx2_phy_get_pause_adv(bp);
1810 bnx2_read_phy(bp, bp->mii_adv, &adv);
1811 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1813 bp->serdes_an_pending = 0;
1814 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1815 /* Force a link down visible on the other side */
1817 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1818 spin_unlock_bh(&bp->phy_lock);
1820 spin_lock_bh(&bp->phy_lock);
1823 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1824 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1826 /* Speed up link-up time when the link partner
1827 * does not autonegotiate which is very common
1828 * in blade servers. Some blade servers use
1829 * IPMI for kerboard input and it's important
1830 * to minimize link disruptions. Autoneg. involves
1831 * exchanging base pages plus 3 next pages and
1832 * normally completes in about 120 msec.
1834 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1835 bp->serdes_an_pending = 1;
1836 mod_timer(&bp->timer, jiffies + bp->current_interval);
1838 bnx2_resolve_flow_ctrl(bp);
1839 bnx2_set_mac_link(bp);
1845 #define ETHTOOL_ALL_FIBRE_SPEED \
1846 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1847 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1848 (ADVERTISED_1000baseT_Full)
1850 #define ETHTOOL_ALL_COPPER_SPEED \
1851 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1852 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1853 ADVERTISED_1000baseT_Full)
1855 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1856 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1858 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1861 bnx2_set_default_remote_link(struct bnx2 *bp)
1865 if (bp->phy_port == PORT_TP)
1866 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1868 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1870 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1871 bp->req_line_speed = 0;
1872 bp->autoneg |= AUTONEG_SPEED;
1873 bp->advertising = ADVERTISED_Autoneg;
1874 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1875 bp->advertising |= ADVERTISED_10baseT_Half;
1876 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1877 bp->advertising |= ADVERTISED_10baseT_Full;
1878 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1879 bp->advertising |= ADVERTISED_100baseT_Half;
1880 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1881 bp->advertising |= ADVERTISED_100baseT_Full;
1882 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1883 bp->advertising |= ADVERTISED_1000baseT_Full;
1884 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1885 bp->advertising |= ADVERTISED_2500baseX_Full;
1888 bp->advertising = 0;
1889 bp->req_duplex = DUPLEX_FULL;
1890 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1891 bp->req_line_speed = SPEED_10;
1892 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1893 bp->req_duplex = DUPLEX_HALF;
1895 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1896 bp->req_line_speed = SPEED_100;
1897 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1898 bp->req_duplex = DUPLEX_HALF;
1900 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1901 bp->req_line_speed = SPEED_1000;
1902 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1903 bp->req_line_speed = SPEED_2500;
1908 bnx2_set_default_link(struct bnx2 *bp)
1910 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1911 bnx2_set_default_remote_link(bp);
1915 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1916 bp->req_line_speed = 0;
1917 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1920 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1922 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1923 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1924 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1926 bp->req_line_speed = bp->line_speed = SPEED_1000;
1927 bp->req_duplex = DUPLEX_FULL;
1930 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1934 bnx2_send_heart_beat(struct bnx2 *bp)
1939 spin_lock(&bp->indirect_lock);
1940 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1941 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1942 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1943 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1944 spin_unlock(&bp->indirect_lock);
1948 bnx2_remote_phy_event(struct bnx2 *bp)
1951 u8 link_up = bp->link_up;
1954 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1956 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1957 bnx2_send_heart_beat(bp);
1959 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1961 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1967 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1968 bp->duplex = DUPLEX_FULL;
1970 case BNX2_LINK_STATUS_10HALF:
1971 bp->duplex = DUPLEX_HALF;
1973 case BNX2_LINK_STATUS_10FULL:
1974 bp->line_speed = SPEED_10;
1976 case BNX2_LINK_STATUS_100HALF:
1977 bp->duplex = DUPLEX_HALF;
1979 case BNX2_LINK_STATUS_100BASE_T4:
1980 case BNX2_LINK_STATUS_100FULL:
1981 bp->line_speed = SPEED_100;
1983 case BNX2_LINK_STATUS_1000HALF:
1984 bp->duplex = DUPLEX_HALF;
1986 case BNX2_LINK_STATUS_1000FULL:
1987 bp->line_speed = SPEED_1000;
1989 case BNX2_LINK_STATUS_2500HALF:
1990 bp->duplex = DUPLEX_HALF;
1992 case BNX2_LINK_STATUS_2500FULL:
1993 bp->line_speed = SPEED_2500;
2001 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2002 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2003 if (bp->duplex == DUPLEX_FULL)
2004 bp->flow_ctrl = bp->req_flow_ctrl;
2006 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2007 bp->flow_ctrl |= FLOW_CTRL_TX;
2008 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2009 bp->flow_ctrl |= FLOW_CTRL_RX;
2012 old_port = bp->phy_port;
2013 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2014 bp->phy_port = PORT_FIBRE;
2016 bp->phy_port = PORT_TP;
2018 if (old_port != bp->phy_port)
2019 bnx2_set_default_link(bp);
2022 if (bp->link_up != link_up)
2023 bnx2_report_link(bp);
2025 bnx2_set_mac_link(bp);
2029 bnx2_set_remote_link(struct bnx2 *bp)
2033 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2035 case BNX2_FW_EVT_CODE_LINK_EVENT:
2036 bnx2_remote_phy_event(bp);
2038 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2040 bnx2_send_heart_beat(bp);
2047 bnx2_setup_copper_phy(struct bnx2 *bp)
2048 __releases(&bp->phy_lock)
2049 __acquires(&bp->phy_lock)
2054 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2056 if (bp->autoneg & AUTONEG_SPEED) {
2057 u32 adv_reg, adv1000_reg;
2059 u32 new_adv1000 = 0;
2061 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2062 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2063 ADVERTISE_PAUSE_ASYM);
2065 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2066 adv1000_reg &= PHY_ALL_1000_SPEED;
2068 new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
2069 new_adv |= ADVERTISE_CSMA;
2070 new_adv |= bnx2_phy_get_pause_adv(bp);
2072 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2074 if ((adv1000_reg != new_adv1000) ||
2075 (adv_reg != new_adv) ||
2076 ((bmcr & BMCR_ANENABLE) == 0)) {
2078 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2079 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2080 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2083 else if (bp->link_up) {
2084 /* Flow ctrl may have changed from auto to forced */
2085 /* or vice-versa. */
2087 bnx2_resolve_flow_ctrl(bp);
2088 bnx2_set_mac_link(bp);
2094 if (bp->req_line_speed == SPEED_100) {
2095 new_bmcr |= BMCR_SPEED100;
2097 if (bp->req_duplex == DUPLEX_FULL) {
2098 new_bmcr |= BMCR_FULLDPLX;
2100 if (new_bmcr != bmcr) {
2103 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2104 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2106 if (bmsr & BMSR_LSTATUS) {
2107 /* Force link down */
2108 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2109 spin_unlock_bh(&bp->phy_lock);
2111 spin_lock_bh(&bp->phy_lock);
2113 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2114 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2117 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2119 /* Normally, the new speed is setup after the link has
2120 * gone down and up again. In some cases, link will not go
2121 * down so we need to set up the new speed here.
2123 if (bmsr & BMSR_LSTATUS) {
2124 bp->line_speed = bp->req_line_speed;
2125 bp->duplex = bp->req_duplex;
2126 bnx2_resolve_flow_ctrl(bp);
2127 bnx2_set_mac_link(bp);
2130 bnx2_resolve_flow_ctrl(bp);
2131 bnx2_set_mac_link(bp);
2137 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2138 __releases(&bp->phy_lock)
2139 __acquires(&bp->phy_lock)
2141 if (bp->loopback == MAC_LOOPBACK)
2144 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2145 return bnx2_setup_serdes_phy(bp, port);
2148 return bnx2_setup_copper_phy(bp);
2153 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2157 bp->mii_bmcr = MII_BMCR + 0x10;
2158 bp->mii_bmsr = MII_BMSR + 0x10;
2159 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2160 bp->mii_adv = MII_ADVERTISE + 0x10;
2161 bp->mii_lpa = MII_LPA + 0x10;
2162 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2164 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2165 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2167 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2171 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2173 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2174 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2175 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2176 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2178 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2179 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2180 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2181 val |= BCM5708S_UP1_2G5;
2183 val &= ~BCM5708S_UP1_2G5;
2184 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2186 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2187 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2188 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2189 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2191 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2193 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2194 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2195 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2197 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2203 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2210 bp->mii_up1 = BCM5708S_UP1;
2212 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2213 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2214 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2216 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2217 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2218 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2220 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2221 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2222 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2224 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2225 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2226 val |= BCM5708S_UP1_2G5;
2227 bnx2_write_phy(bp, BCM5708S_UP1, val);
2230 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2231 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2232 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2233 /* increase tx signal amplitude */
2234 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2235 BCM5708S_BLK_ADDR_TX_MISC);
2236 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2237 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2238 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2239 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2242 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2243 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2248 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2249 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2250 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2251 BCM5708S_BLK_ADDR_TX_MISC);
2252 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2253 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2254 BCM5708S_BLK_ADDR_DIG);
2261 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2266 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2268 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2269 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2271 if (bp->dev->mtu > 1500) {
2274 /* Set extended packet length bit */
2275 bnx2_write_phy(bp, 0x18, 0x7);
2276 bnx2_read_phy(bp, 0x18, &val);
2277 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2279 bnx2_write_phy(bp, 0x1c, 0x6c00);
2280 bnx2_read_phy(bp, 0x1c, &val);
2281 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2286 bnx2_write_phy(bp, 0x18, 0x7);
2287 bnx2_read_phy(bp, 0x18, &val);
2288 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2290 bnx2_write_phy(bp, 0x1c, 0x6c00);
2291 bnx2_read_phy(bp, 0x1c, &val);
2292 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2299 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2306 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2307 bnx2_write_phy(bp, 0x18, 0x0c00);
2308 bnx2_write_phy(bp, 0x17, 0x000a);
2309 bnx2_write_phy(bp, 0x15, 0x310b);
2310 bnx2_write_phy(bp, 0x17, 0x201f);
2311 bnx2_write_phy(bp, 0x15, 0x9506);
2312 bnx2_write_phy(bp, 0x17, 0x401f);
2313 bnx2_write_phy(bp, 0x15, 0x14e2);
2314 bnx2_write_phy(bp, 0x18, 0x0400);
2317 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2318 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2319 MII_BNX2_DSP_EXPAND_REG | 0x8);
2320 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2322 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2325 if (bp->dev->mtu > 1500) {
2326 /* Set extended packet length bit */
2327 bnx2_write_phy(bp, 0x18, 0x7);
2328 bnx2_read_phy(bp, 0x18, &val);
2329 bnx2_write_phy(bp, 0x18, val | 0x4000);
2331 bnx2_read_phy(bp, 0x10, &val);
2332 bnx2_write_phy(bp, 0x10, val | 0x1);
2335 bnx2_write_phy(bp, 0x18, 0x7);
2336 bnx2_read_phy(bp, 0x18, &val);
2337 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2339 bnx2_read_phy(bp, 0x10, &val);
2340 bnx2_write_phy(bp, 0x10, val & ~0x1);
2343 /* ethernet@wirespeed */
2344 bnx2_write_phy(bp, 0x18, 0x7007);
2345 bnx2_read_phy(bp, 0x18, &val);
2346 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2352 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2353 __releases(&bp->phy_lock)
2354 __acquires(&bp->phy_lock)
2359 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2360 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2362 bp->mii_bmcr = MII_BMCR;
2363 bp->mii_bmsr = MII_BMSR;
2364 bp->mii_bmsr1 = MII_BMSR;
2365 bp->mii_adv = MII_ADVERTISE;
2366 bp->mii_lpa = MII_LPA;
2368 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2370 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2373 bnx2_read_phy(bp, MII_PHYSID1, &val);
2374 bp->phy_id = val << 16;
2375 bnx2_read_phy(bp, MII_PHYSID2, &val);
2376 bp->phy_id |= val & 0xffff;
2378 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2379 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2380 rc = bnx2_init_5706s_phy(bp, reset_phy);
2381 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2382 rc = bnx2_init_5708s_phy(bp, reset_phy);
2383 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2384 rc = bnx2_init_5709s_phy(bp, reset_phy);
2387 rc = bnx2_init_copper_phy(bp, reset_phy);
2392 rc = bnx2_setup_phy(bp, bp->phy_port);
2398 bnx2_set_mac_loopback(struct bnx2 *bp)
2402 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2403 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2404 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2405 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2410 static int bnx2_test_link(struct bnx2 *);
2413 bnx2_set_phy_loopback(struct bnx2 *bp)
2418 spin_lock_bh(&bp->phy_lock);
2419 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2421 spin_unlock_bh(&bp->phy_lock);
2425 for (i = 0; i < 10; i++) {
2426 if (bnx2_test_link(bp) == 0)
2431 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2432 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2433 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2434 BNX2_EMAC_MODE_25G_MODE);
2436 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2437 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2443 bnx2_dump_mcp_state(struct bnx2 *bp)
2445 struct net_device *dev = bp->dev;
2448 netdev_err(dev, "<--- start MCP states dump --->\n");
2449 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2450 mcp_p0 = BNX2_MCP_STATE_P0;
2451 mcp_p1 = BNX2_MCP_STATE_P1;
2453 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2454 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2456 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2457 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2458 netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2459 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2460 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2461 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2462 netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2463 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2464 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2465 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2466 netdev_err(dev, "DEBUG: shmem states:\n");
2467 netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2468 bnx2_shmem_rd(bp, BNX2_DRV_MB),
2469 bnx2_shmem_rd(bp, BNX2_FW_MB),
2470 bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2471 pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2472 netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2473 bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2474 bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2475 pr_cont(" condition[%08x]\n",
2476 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2477 DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2478 DP_SHMEM_LINE(bp, 0x3cc);
2479 DP_SHMEM_LINE(bp, 0x3dc);
2480 DP_SHMEM_LINE(bp, 0x3ec);
2481 netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2482 netdev_err(dev, "<--- end MCP states dump --->\n");
2486 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2492 msg_data |= bp->fw_wr_seq;
2494 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2499 /* wait for an acknowledgement. */
2500 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2503 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2505 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2508 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2511 /* If we timed out, inform the firmware that this is the case. */
2512 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2513 msg_data &= ~BNX2_DRV_MSG_CODE;
2514 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2516 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2518 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2519 bnx2_dump_mcp_state(bp);
2525 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2532 bnx2_init_5709_context(struct bnx2 *bp)
2537 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2538 val |= (BNX2_PAGE_BITS - 8) << 16;
2539 BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2540 for (i = 0; i < 10; i++) {
2541 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2542 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2546 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2549 for (i = 0; i < bp->ctx_pages; i++) {
2553 memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2557 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2558 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2559 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2560 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2561 (u64) bp->ctx_blk_mapping[i] >> 32);
2562 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2563 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2564 for (j = 0; j < 10; j++) {
2566 val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2567 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2571 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2580 bnx2_init_context(struct bnx2 *bp)
2586 u32 vcid_addr, pcid_addr, offset;
2591 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2594 vcid_addr = GET_PCID_ADDR(vcid);
2596 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2601 pcid_addr = GET_PCID_ADDR(new_vcid);
2604 vcid_addr = GET_CID_ADDR(vcid);
2605 pcid_addr = vcid_addr;
2608 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2609 vcid_addr += (i << PHY_CTX_SHIFT);
2610 pcid_addr += (i << PHY_CTX_SHIFT);
2612 BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2613 BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2615 /* Zero out the context. */
2616 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2617 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2623 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2629 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2630 if (good_mbuf == NULL)
2633 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2634 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2638 /* Allocate a bunch of mbufs and save the good ones in an array. */
2639 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2640 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2641 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2642 BNX2_RBUF_COMMAND_ALLOC_REQ);
2644 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2646 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2648 /* The addresses with Bit 9 set are bad memory blocks. */
2649 if (!(val & (1 << 9))) {
2650 good_mbuf[good_mbuf_cnt] = (u16) val;
2654 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2657 /* Free the good ones back to the mbuf pool thus discarding
2658 * all the bad ones. */
2659 while (good_mbuf_cnt) {
2662 val = good_mbuf[good_mbuf_cnt];
2663 val = (val << 9) | val | 1;
2665 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2672 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2676 val = (mac_addr[0] << 8) | mac_addr[1];
2678 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2680 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2681 (mac_addr[4] << 8) | mac_addr[5];
2683 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2687 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2690 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2691 struct bnx2_rx_bd *rxbd =
2692 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2693 struct page *page = alloc_page(gfp);
2697 mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2698 PCI_DMA_FROMDEVICE);
2699 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2705 dma_unmap_addr_set(rx_pg, mapping, mapping);
2706 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2707 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2712 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2714 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2715 struct page *page = rx_pg->page;
2720 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2721 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2728 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2731 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2733 struct bnx2_rx_bd *rxbd =
2734 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2736 data = kmalloc(bp->rx_buf_size, gfp);
2740 mapping = dma_map_single(&bp->pdev->dev,
2742 bp->rx_buf_use_size,
2743 PCI_DMA_FROMDEVICE);
2744 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2749 rx_buf->data = data;
2750 dma_unmap_addr_set(rx_buf, mapping, mapping);
2752 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2753 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2755 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2761 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2763 struct status_block *sblk = bnapi->status_blk.msi;
2764 u32 new_link_state, old_link_state;
2767 new_link_state = sblk->status_attn_bits & event;
2768 old_link_state = sblk->status_attn_bits_ack & event;
2769 if (new_link_state != old_link_state) {
2771 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2773 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2781 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2783 spin_lock(&bp->phy_lock);
2785 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2787 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2788 bnx2_set_remote_link(bp);
2790 spin_unlock(&bp->phy_lock);
2795 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2799 /* Tell compiler that status block fields can change. */
2801 cons = *bnapi->hw_tx_cons_ptr;
2803 if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2809 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2811 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2812 u16 hw_cons, sw_cons, sw_ring_cons;
2813 int tx_pkt = 0, index;
2814 unsigned int tx_bytes = 0;
2815 struct netdev_queue *txq;
2817 index = (bnapi - bp->bnx2_napi);
2818 txq = netdev_get_tx_queue(bp->dev, index);
2820 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2821 sw_cons = txr->tx_cons;
2823 while (sw_cons != hw_cons) {
2824 struct bnx2_sw_tx_bd *tx_buf;
2825 struct sk_buff *skb;
2828 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2830 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2833 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2834 prefetch(&skb->end);
2836 /* partial BD completions possible with TSO packets */
2837 if (tx_buf->is_gso) {
2838 u16 last_idx, last_ring_idx;
2840 last_idx = sw_cons + tx_buf->nr_frags + 1;
2841 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2842 if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2845 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2850 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2851 skb_headlen(skb), PCI_DMA_TODEVICE);
2854 last = tx_buf->nr_frags;
2856 for (i = 0; i < last; i++) {
2857 struct bnx2_sw_tx_bd *tx_buf;
2859 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2861 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2862 dma_unmap_page(&bp->pdev->dev,
2863 dma_unmap_addr(tx_buf, mapping),
2864 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2868 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2870 tx_bytes += skb->len;
2873 if (tx_pkt == budget)
2876 if (hw_cons == sw_cons)
2877 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2880 netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2881 txr->hw_tx_cons = hw_cons;
2882 txr->tx_cons = sw_cons;
2884 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2885 * before checking for netif_tx_queue_stopped(). Without the
2886 * memory barrier, there is a small possibility that bnx2_start_xmit()
2887 * will miss it and cause the queue to be stopped forever.
2891 if (unlikely(netif_tx_queue_stopped(txq)) &&
2892 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2893 __netif_tx_lock(txq, smp_processor_id());
2894 if ((netif_tx_queue_stopped(txq)) &&
2895 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2896 netif_tx_wake_queue(txq);
2897 __netif_tx_unlock(txq);
2904 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2905 struct sk_buff *skb, int count)
2907 struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2908 struct bnx2_rx_bd *cons_bd, *prod_bd;
2911 u16 cons = rxr->rx_pg_cons;
2913 cons_rx_pg = &rxr->rx_pg_ring[cons];
2915 /* The caller was unable to allocate a new page to replace the
2916 * last one in the frags array, so we need to recycle that page
2917 * and then free the skb.
2921 struct skb_shared_info *shinfo;
2923 shinfo = skb_shinfo(skb);
2925 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2926 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2928 cons_rx_pg->page = page;
2932 hw_prod = rxr->rx_pg_prod;
2934 for (i = 0; i < count; i++) {
2935 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2937 prod_rx_pg = &rxr->rx_pg_ring[prod];
2938 cons_rx_pg = &rxr->rx_pg_ring[cons];
2939 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2940 [BNX2_RX_IDX(cons)];
2941 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2942 [BNX2_RX_IDX(prod)];
2945 prod_rx_pg->page = cons_rx_pg->page;
2946 cons_rx_pg->page = NULL;
2947 dma_unmap_addr_set(prod_rx_pg, mapping,
2948 dma_unmap_addr(cons_rx_pg, mapping));
2950 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2951 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2954 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2955 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2957 rxr->rx_pg_prod = hw_prod;
2958 rxr->rx_pg_cons = cons;
2962 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2963 u8 *data, u16 cons, u16 prod)
2965 struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2966 struct bnx2_rx_bd *cons_bd, *prod_bd;
2968 cons_rx_buf = &rxr->rx_buf_ring[cons];
2969 prod_rx_buf = &rxr->rx_buf_ring[prod];
2971 dma_sync_single_for_device(&bp->pdev->dev,
2972 dma_unmap_addr(cons_rx_buf, mapping),
2973 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2975 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2977 prod_rx_buf->data = data;
2982 dma_unmap_addr_set(prod_rx_buf, mapping,
2983 dma_unmap_addr(cons_rx_buf, mapping));
2985 cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
2986 prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
2987 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2988 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2991 static struct sk_buff *
2992 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
2993 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2997 u16 prod = ring_idx & 0xffff;
2998 struct sk_buff *skb;
3000 err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3001 if (unlikely(err)) {
3002 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3005 unsigned int raw_len = len + 4;
3006 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3008 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3013 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3014 PCI_DMA_FROMDEVICE);
3015 skb = build_skb(data, 0);
3020 skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3025 unsigned int i, frag_len, frag_size, pages;
3026 struct bnx2_sw_pg *rx_pg;
3027 u16 pg_cons = rxr->rx_pg_cons;
3028 u16 pg_prod = rxr->rx_pg_prod;
3030 frag_size = len + 4 - hdr_len;
3031 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3032 skb_put(skb, hdr_len);
3034 for (i = 0; i < pages; i++) {
3035 dma_addr_t mapping_old;
3037 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3038 if (unlikely(frag_len <= 4)) {
3039 unsigned int tail = 4 - frag_len;
3041 rxr->rx_pg_cons = pg_cons;
3042 rxr->rx_pg_prod = pg_prod;
3043 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3050 &skb_shinfo(skb)->frags[i - 1];
3051 skb_frag_size_sub(frag, tail);
3052 skb->data_len -= tail;
3056 rx_pg = &rxr->rx_pg_ring[pg_cons];
3058 /* Don't unmap yet. If we're unable to allocate a new
3059 * page, we need to recycle the page and the DMA addr.
3061 mapping_old = dma_unmap_addr(rx_pg, mapping);
3065 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3068 err = bnx2_alloc_rx_page(bp, rxr,
3069 BNX2_RX_PG_RING_IDX(pg_prod),
3071 if (unlikely(err)) {
3072 rxr->rx_pg_cons = pg_cons;
3073 rxr->rx_pg_prod = pg_prod;
3074 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3079 dma_unmap_page(&bp->pdev->dev, mapping_old,
3080 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3082 frag_size -= frag_len;
3083 skb->data_len += frag_len;
3084 skb->truesize += PAGE_SIZE;
3085 skb->len += frag_len;
3087 pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3088 pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3090 rxr->rx_pg_prod = pg_prod;
3091 rxr->rx_pg_cons = pg_cons;
3097 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3101 /* Tell compiler that status block fields can change. */
3103 cons = *bnapi->hw_rx_cons_ptr;
3105 if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3111 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3113 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3114 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3115 struct l2_fhdr *rx_hdr;
3116 int rx_pkt = 0, pg_ring_used = 0;
3118 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3119 sw_cons = rxr->rx_cons;
3120 sw_prod = rxr->rx_prod;
3122 /* Memory barrier necessary as speculative reads of the rx
3123 * buffer can be ahead of the index in the status block
3126 while (sw_cons != hw_cons) {
3127 unsigned int len, hdr_len;
3129 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3130 struct sk_buff *skb;
3131 dma_addr_t dma_addr;
3135 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3136 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3138 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3139 data = rx_buf->data;
3140 rx_buf->data = NULL;
3142 rx_hdr = get_l2_fhdr(data);
3145 dma_addr = dma_unmap_addr(rx_buf, mapping);
3147 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3148 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3149 PCI_DMA_FROMDEVICE);
3151 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3152 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3153 prefetch(get_l2_fhdr(next_rx_buf->data));
3155 len = rx_hdr->l2_fhdr_pkt_len;
3156 status = rx_hdr->l2_fhdr_status;
3159 if (status & L2_FHDR_STATUS_SPLIT) {
3160 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3162 } else if (len > bp->rx_jumbo_thresh) {
3163 hdr_len = bp->rx_jumbo_thresh;
3167 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3168 L2_FHDR_ERRORS_PHY_DECODE |
3169 L2_FHDR_ERRORS_ALIGNMENT |
3170 L2_FHDR_ERRORS_TOO_SHORT |
3171 L2_FHDR_ERRORS_GIANT_FRAME))) {
3173 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3178 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3180 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3187 if (len <= bp->rx_copy_thresh) {
3188 skb = netdev_alloc_skb(bp->dev, len + 6);
3190 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3197 (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3199 skb_reserve(skb, 6);
3202 bnx2_reuse_rx_data(bp, rxr, data,
3203 sw_ring_cons, sw_ring_prod);
3206 skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3207 (sw_ring_cons << 16) | sw_ring_prod);
3211 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3212 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3213 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3215 skb->protocol = eth_type_trans(skb, bp->dev);
3217 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3218 (ntohs(skb->protocol) != 0x8100)) {
3225 skb_checksum_none_assert(skb);
3226 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3227 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3228 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3230 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3231 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3232 skb->ip_summed = CHECKSUM_UNNECESSARY;
3234 if ((bp->dev->features & NETIF_F_RXHASH) &&
3235 ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3236 L2_FHDR_STATUS_USE_RXHASH))
3237 skb->rxhash = rx_hdr->l2_fhdr_hash;
3239 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3240 napi_gro_receive(&bnapi->napi, skb);
3244 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3245 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3247 if ((rx_pkt == budget))
3250 /* Refresh hw_cons to see if there is new work */
3251 if (sw_cons == hw_cons) {
3252 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3256 rxr->rx_cons = sw_cons;
3257 rxr->rx_prod = sw_prod;
3260 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3262 BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3264 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3272 /* MSI ISR - The only difference between this and the INTx ISR
3273 * is that the MSI interrupt is always serviced.
3276 bnx2_msi(int irq, void *dev_instance)
3278 struct bnx2_napi *bnapi = dev_instance;
3279 struct bnx2 *bp = bnapi->bp;
3281 prefetch(bnapi->status_blk.msi);
3282 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3283 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3284 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3286 /* Return here if interrupt is disabled. */
3287 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3290 napi_schedule(&bnapi->napi);
3296 bnx2_msi_1shot(int irq, void *dev_instance)
3298 struct bnx2_napi *bnapi = dev_instance;
3299 struct bnx2 *bp = bnapi->bp;
3301 prefetch(bnapi->status_blk.msi);
3303 /* Return here if interrupt is disabled. */
3304 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3307 napi_schedule(&bnapi->napi);
3313 bnx2_interrupt(int irq, void *dev_instance)
3315 struct bnx2_napi *bnapi = dev_instance;
3316 struct bnx2 *bp = bnapi->bp;
3317 struct status_block *sblk = bnapi->status_blk.msi;
3319 /* When using INTx, it is possible for the interrupt to arrive
3320 * at the CPU before the status block posted prior to the
3321 * interrupt. Reading a register will flush the status block.
3322 * When using MSI, the MSI message will always complete after
3323 * the status block write.
3325 if ((sblk->status_idx == bnapi->last_status_idx) &&
3326 (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3327 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3330 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3331 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3332 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3334 /* Read back to deassert IRQ immediately to avoid too many
3335 * spurious interrupts.
3337 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3339 /* Return here if interrupt is shared and is disabled. */
3340 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3343 if (napi_schedule_prep(&bnapi->napi)) {
3344 bnapi->last_status_idx = sblk->status_idx;
3345 __napi_schedule(&bnapi->napi);
3352 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3354 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3355 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3357 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3358 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3363 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3364 STATUS_ATTN_BITS_TIMER_ABORT)
3367 bnx2_has_work(struct bnx2_napi *bnapi)
3369 struct status_block *sblk = bnapi->status_blk.msi;
3371 if (bnx2_has_fast_work(bnapi))
3375 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3379 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3380 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3387 bnx2_chk_missed_msi(struct bnx2 *bp)
3389 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3392 if (bnx2_has_work(bnapi)) {
3393 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3394 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3397 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3398 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3399 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3400 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3401 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3405 bp->idle_chk_status_idx = bnapi->last_status_idx;
3409 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3411 struct cnic_ops *c_ops;
3413 if (!bnapi->cnic_present)
3417 c_ops = rcu_dereference(bp->cnic_ops);
3419 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3420 bnapi->status_blk.msi);
3425 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3427 struct status_block *sblk = bnapi->status_blk.msi;
3428 u32 status_attn_bits = sblk->status_attn_bits;
3429 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3431 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3432 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3434 bnx2_phy_int(bp, bnapi);
3436 /* This is needed to take care of transient status
3437 * during link changes.
3439 BNX2_WR(bp, BNX2_HC_COMMAND,
3440 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3441 BNX2_RD(bp, BNX2_HC_COMMAND);
3445 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3446 int work_done, int budget)
3448 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3449 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3451 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3452 bnx2_tx_int(bp, bnapi, 0);
3454 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3455 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3460 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3462 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3463 struct bnx2 *bp = bnapi->bp;
3465 struct status_block_msix *sblk = bnapi->status_blk.msix;
3468 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3469 if (unlikely(work_done >= budget))
3472 bnapi->last_status_idx = sblk->status_idx;
3473 /* status idx must be read before checking for more work. */
3475 if (likely(!bnx2_has_fast_work(bnapi))) {
3477 napi_complete(napi);
3478 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3479 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3480 bnapi->last_status_idx);
3487 static int bnx2_poll(struct napi_struct *napi, int budget)
3489 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3490 struct bnx2 *bp = bnapi->bp;
3492 struct status_block *sblk = bnapi->status_blk.msi;
3495 bnx2_poll_link(bp, bnapi);
3497 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3500 bnx2_poll_cnic(bp, bnapi);
3503 /* bnapi->last_status_idx is used below to tell the hw how
3504 * much work has been processed, so we must read it before
3505 * checking for more work.
3507 bnapi->last_status_idx = sblk->status_idx;
3509 if (unlikely(work_done >= budget))
3513 if (likely(!bnx2_has_work(bnapi))) {
3514 napi_complete(napi);
3515 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3516 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3517 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3518 bnapi->last_status_idx);
3521 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3522 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3523 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3524 bnapi->last_status_idx);
3526 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3527 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3528 bnapi->last_status_idx);
3536 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3537 * from set_multicast.
3540 bnx2_set_rx_mode(struct net_device *dev)
3542 struct bnx2 *bp = netdev_priv(dev);
3543 u32 rx_mode, sort_mode;
3544 struct netdev_hw_addr *ha;
3547 if (!netif_running(dev))
3550 spin_lock_bh(&bp->phy_lock);
3552 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3553 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3554 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3555 if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3556 (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3557 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3558 if (dev->flags & IFF_PROMISC) {
3559 /* Promiscuous mode. */
3560 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3561 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3562 BNX2_RPM_SORT_USER0_PROM_VLAN;
3564 else if (dev->flags & IFF_ALLMULTI) {
3565 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3566 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3569 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3572 /* Accept one or more multicast(s). */
3573 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3578 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3580 netdev_for_each_mc_addr(ha, dev) {
3581 crc = ether_crc_le(ETH_ALEN, ha->addr);
3583 regidx = (bit & 0xe0) >> 5;
3585 mc_filter[regidx] |= (1 << bit);
3588 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3589 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3593 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3596 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3597 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3598 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3599 BNX2_RPM_SORT_USER0_PROM_VLAN;
3600 } else if (!(dev->flags & IFF_PROMISC)) {
3601 /* Add all entries into to the match filter list */
3603 netdev_for_each_uc_addr(ha, dev) {
3604 bnx2_set_mac_addr(bp, ha->addr,
3605 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3607 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3613 if (rx_mode != bp->rx_mode) {
3614 bp->rx_mode = rx_mode;
3615 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3618 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3619 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3620 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3622 spin_unlock_bh(&bp->phy_lock);
3626 check_fw_section(const struct firmware *fw,
3627 const struct bnx2_fw_file_section *section,
3628 u32 alignment, bool non_empty)
3630 u32 offset = be32_to_cpu(section->offset);
3631 u32 len = be32_to_cpu(section->len);
3633 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3635 if ((non_empty && len == 0) || len > fw->size - offset ||
3636 len & (alignment - 1))
3642 check_mips_fw_entry(const struct firmware *fw,
3643 const struct bnx2_mips_fw_file_entry *entry)
3645 if (check_fw_section(fw, &entry->text, 4, true) ||
3646 check_fw_section(fw, &entry->data, 4, false) ||
3647 check_fw_section(fw, &entry->rodata, 4, false))
3652 static void bnx2_release_firmware(struct bnx2 *bp)
3654 if (bp->rv2p_firmware) {
3655 release_firmware(bp->mips_firmware);
3656 release_firmware(bp->rv2p_firmware);
3657 bp->rv2p_firmware = NULL;
3661 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3663 const char *mips_fw_file, *rv2p_fw_file;
3664 const struct bnx2_mips_fw_file *mips_fw;
3665 const struct bnx2_rv2p_fw_file *rv2p_fw;
3668 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3669 mips_fw_file = FW_MIPS_FILE_09;
3670 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3671 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3672 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3674 rv2p_fw_file = FW_RV2P_FILE_09;
3676 mips_fw_file = FW_MIPS_FILE_06;
3677 rv2p_fw_file = FW_RV2P_FILE_06;
3680 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3682 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3686 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3688 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3689 goto err_release_mips_firmware;
3691 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3692 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3693 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3694 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3695 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3696 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3697 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3698 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3699 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3701 goto err_release_firmware;
3703 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3704 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3705 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3706 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3708 goto err_release_firmware;
3713 err_release_firmware:
3714 release_firmware(bp->rv2p_firmware);
3715 bp->rv2p_firmware = NULL;
3716 err_release_mips_firmware:
3717 release_firmware(bp->mips_firmware);
3721 static int bnx2_request_firmware(struct bnx2 *bp)
3723 return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3727 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3730 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3731 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3732 rv2p_code |= RV2P_BD_PAGE_SIZE;
3739 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3740 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3742 u32 rv2p_code_len, file_offset;
3747 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3748 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3750 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3752 if (rv2p_proc == RV2P_PROC1) {
3753 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3754 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3756 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3757 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3760 for (i = 0; i < rv2p_code_len; i += 8) {
3761 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3763 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3766 val = (i / 8) | cmd;
3767 BNX2_WR(bp, addr, val);
3770 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3771 for (i = 0; i < 8; i++) {
3774 loc = be32_to_cpu(fw_entry->fixup[i]);
3775 if (loc && ((loc * 4) < rv2p_code_len)) {
3776 code = be32_to_cpu(*(rv2p_code + loc - 1));
3777 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3778 code = be32_to_cpu(*(rv2p_code + loc));
3779 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3780 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3782 val = (loc / 2) | cmd;
3783 BNX2_WR(bp, addr, val);
3787 /* Reset the processor, un-stall is done later. */
3788 if (rv2p_proc == RV2P_PROC1) {
3789 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3792 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3799 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3800 const struct bnx2_mips_fw_file_entry *fw_entry)
3802 u32 addr, len, file_offset;
3808 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3809 val |= cpu_reg->mode_value_halt;
3810 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3811 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3813 /* Load the Text area. */
3814 addr = be32_to_cpu(fw_entry->text.addr);
3815 len = be32_to_cpu(fw_entry->text.len);
3816 file_offset = be32_to_cpu(fw_entry->text.offset);
3817 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3819 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3823 for (j = 0; j < (len / 4); j++, offset += 4)
3824 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3827 /* Load the Data area. */
3828 addr = be32_to_cpu(fw_entry->data.addr);
3829 len = be32_to_cpu(fw_entry->data.len);
3830 file_offset = be32_to_cpu(fw_entry->data.offset);
3831 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3833 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3837 for (j = 0; j < (len / 4); j++, offset += 4)
3838 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3841 /* Load the Read-Only area. */
3842 addr = be32_to_cpu(fw_entry->rodata.addr);
3843 len = be32_to_cpu(fw_entry->rodata.len);
3844 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3845 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3847 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3851 for (j = 0; j < (len / 4); j++, offset += 4)
3852 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3855 /* Clear the pre-fetch instruction. */
3856 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3858 val = be32_to_cpu(fw_entry->start_addr);
3859 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3861 /* Start the CPU. */
3862 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3863 val &= ~cpu_reg->mode_value_halt;
3864 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3865 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3871 bnx2_init_cpus(struct bnx2 *bp)
3873 const struct bnx2_mips_fw_file *mips_fw =
3874 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3875 const struct bnx2_rv2p_fw_file *rv2p_fw =
3876 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3879 /* Initialize the RV2P processor. */
3880 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3881 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3883 /* Initialize the RX Processor. */
3884 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3888 /* Initialize the TX Processor. */
3889 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3893 /* Initialize the TX Patch-up Processor. */
3894 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3898 /* Initialize the Completion Processor. */
3899 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3903 /* Initialize the Command Processor. */
3904 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3911 bnx2_setup_wol(struct bnx2 *bp)
3920 autoneg = bp->autoneg;
3921 advertising = bp->advertising;
3923 if (bp->phy_port == PORT_TP) {
3924 bp->autoneg = AUTONEG_SPEED;
3925 bp->advertising = ADVERTISED_10baseT_Half |
3926 ADVERTISED_10baseT_Full |
3927 ADVERTISED_100baseT_Half |
3928 ADVERTISED_100baseT_Full |
3932 spin_lock_bh(&bp->phy_lock);
3933 bnx2_setup_phy(bp, bp->phy_port);
3934 spin_unlock_bh(&bp->phy_lock);
3936 bp->autoneg = autoneg;
3937 bp->advertising = advertising;
3939 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3941 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3943 /* Enable port mode. */
3944 val &= ~BNX2_EMAC_MODE_PORT;
3945 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3946 BNX2_EMAC_MODE_ACPI_RCVD |
3947 BNX2_EMAC_MODE_MPKT;
3948 if (bp->phy_port == PORT_TP) {
3949 val |= BNX2_EMAC_MODE_PORT_MII;
3951 val |= BNX2_EMAC_MODE_PORT_GMII;
3952 if (bp->line_speed == SPEED_2500)
3953 val |= BNX2_EMAC_MODE_25G_MODE;
3956 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3958 /* receive all multicast */
3959 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3960 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3963 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3965 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3966 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3967 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3968 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3970 /* Need to enable EMAC and RPM for WOL. */
3971 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3972 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3973 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3974 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3976 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3977 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3978 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3980 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3982 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3985 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3986 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
3991 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3997 pci_enable_wake(bp->pdev, PCI_D0, false);
3998 pci_set_power_state(bp->pdev, PCI_D0);
4000 val = BNX2_RD(bp, BNX2_EMAC_MODE);
4001 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4002 val &= ~BNX2_EMAC_MODE_MPKT;
4003 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4005 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4006 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4007 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4012 pci_wake_from_d3(bp->pdev, bp->wol);
4013 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4014 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4017 pci_set_power_state(bp->pdev, PCI_D3hot);
4019 pci_set_power_state(bp->pdev, PCI_D3hot);
4022 /* No more memory access after this point until
4023 * device is brought back to D0.
4034 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4039 /* Request access to the flash interface. */
4040 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4041 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4042 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4043 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4049 if (j >= NVRAM_TIMEOUT_COUNT)
4056 bnx2_release_nvram_lock(struct bnx2 *bp)
4061 /* Relinquish nvram interface. */
4062 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4064 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4065 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4066 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4072 if (j >= NVRAM_TIMEOUT_COUNT)
4080 bnx2_enable_nvram_write(struct bnx2 *bp)
4084 val = BNX2_RD(bp, BNX2_MISC_CFG);
4085 BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4087 if (bp->flash_info->flags & BNX2_NV_WREN) {
4090 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4091 BNX2_WR(bp, BNX2_NVM_COMMAND,
4092 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4094 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4097 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4098 if (val & BNX2_NVM_COMMAND_DONE)
4102 if (j >= NVRAM_TIMEOUT_COUNT)
4109 bnx2_disable_nvram_write(struct bnx2 *bp)
4113 val = BNX2_RD(bp, BNX2_MISC_CFG);
4114 BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4119 bnx2_enable_nvram_access(struct bnx2 *bp)
4123 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4124 /* Enable both bits, even on read. */
4125 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4126 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4130 bnx2_disable_nvram_access(struct bnx2 *bp)
4134 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4135 /* Disable both bits, even after read. */
4136 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4137 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4138 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4142 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4147 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4148 /* Buffered flash, no erase needed */
4151 /* Build an erase command */
4152 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4153 BNX2_NVM_COMMAND_DOIT;
4155 /* Need to clear DONE bit separately. */
4156 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4158 /* Address of the NVRAM to read from. */
4159 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4161 /* Issue an erase command. */
4162 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4164 /* Wait for completion. */
4165 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4170 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4171 if (val & BNX2_NVM_COMMAND_DONE)
4175 if (j >= NVRAM_TIMEOUT_COUNT)
4182 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4187 /* Build the command word. */
4188 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4190 /* Calculate an offset of a buffered flash, not needed for 5709. */
4191 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4192 offset = ((offset / bp->flash_info->page_size) <<
4193 bp->flash_info->page_bits) +
4194 (offset % bp->flash_info->page_size);
4197 /* Need to clear DONE bit separately. */
4198 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4200 /* Address of the NVRAM to read from. */
4201 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4203 /* Issue a read command. */
4204 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4206 /* Wait for completion. */
4207 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4212 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4213 if (val & BNX2_NVM_COMMAND_DONE) {
4214 __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4215 memcpy(ret_val, &v, 4);
4219 if (j >= NVRAM_TIMEOUT_COUNT)
4227 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4233 /* Build the command word. */
4234 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4236 /* Calculate an offset of a buffered flash, not needed for 5709. */
4237 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4238 offset = ((offset / bp->flash_info->page_size) <<
4239 bp->flash_info->page_bits) +
4240 (offset % bp->flash_info->page_size);
4243 /* Need to clear DONE bit separately. */
4244 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4246 memcpy(&val32, val, 4);
4248 /* Write the data. */
4249 BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4251 /* Address of the NVRAM to write to. */
4252 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4254 /* Issue the write command. */
4255 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4257 /* Wait for completion. */
4258 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4261 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4264 if (j >= NVRAM_TIMEOUT_COUNT)
4271 bnx2_init_nvram(struct bnx2 *bp)
4274 int j, entry_count, rc = 0;
4275 const struct flash_spec *flash;
4277 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4278 bp->flash_info = &flash_5709;
4279 goto get_flash_size;
4282 /* Determine the selected interface. */
4283 val = BNX2_RD(bp, BNX2_NVM_CFG1);
4285 entry_count = ARRAY_SIZE(flash_table);
4287 if (val & 0x40000000) {
4289 /* Flash interface has been reconfigured */
4290 for (j = 0, flash = &flash_table[0]; j < entry_count;
4292 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4293 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4294 bp->flash_info = flash;
4301 /* Not yet been reconfigured */
4303 if (val & (1 << 23))
4304 mask = FLASH_BACKUP_STRAP_MASK;
4306 mask = FLASH_STRAP_MASK;
4308 for (j = 0, flash = &flash_table[0]; j < entry_count;
4311 if ((val & mask) == (flash->strapping & mask)) {
4312 bp->flash_info = flash;
4314 /* Request access to the flash interface. */
4315 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4318 /* Enable access to flash interface */
4319 bnx2_enable_nvram_access(bp);
4321 /* Reconfigure the flash interface */
4322 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4323 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4324 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4325 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4327 /* Disable access to flash interface */
4328 bnx2_disable_nvram_access(bp);
4329 bnx2_release_nvram_lock(bp);
4334 } /* if (val & 0x40000000) */
4336 if (j == entry_count) {
4337 bp->flash_info = NULL;
4338 pr_alert("Unknown flash/EEPROM type\n");
4343 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4344 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4346 bp->flash_size = val;
4348 bp->flash_size = bp->flash_info->total_size;
4354 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4358 u32 cmd_flags, offset32, len32, extra;
4363 /* Request access to the flash interface. */
4364 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4367 /* Enable access to flash interface */
4368 bnx2_enable_nvram_access(bp);
4381 pre_len = 4 - (offset & 3);
4383 if (pre_len >= len32) {
4385 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4386 BNX2_NVM_COMMAND_LAST;
4389 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4392 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4397 memcpy(ret_buf, buf + (offset & 3), pre_len);
4404 extra = 4 - (len32 & 3);
4405 len32 = (len32 + 4) & ~3;
4412 cmd_flags = BNX2_NVM_COMMAND_LAST;
4414 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4415 BNX2_NVM_COMMAND_LAST;
4417 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4419 memcpy(ret_buf, buf, 4 - extra);
4421 else if (len32 > 0) {
4424 /* Read the first word. */
4428 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4430 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4432 /* Advance to the next dword. */
4437 while (len32 > 4 && rc == 0) {
4438 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4440 /* Advance to the next dword. */
4449 cmd_flags = BNX2_NVM_COMMAND_LAST;
4450 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4452 memcpy(ret_buf, buf, 4 - extra);
4455 /* Disable access to flash interface */
4456 bnx2_disable_nvram_access(bp);
4458 bnx2_release_nvram_lock(bp);
4464 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4467 u32 written, offset32, len32;
4468 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4470 int align_start, align_end;
4475 align_start = align_end = 0;
4477 if ((align_start = (offset32 & 3))) {
4479 len32 += align_start;
4482 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4487 align_end = 4 - (len32 & 3);
4489 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4493 if (align_start || align_end) {
4494 align_buf = kmalloc(len32, GFP_KERNEL);
4495 if (align_buf == NULL)
4498 memcpy(align_buf, start, 4);
4501 memcpy(align_buf + len32 - 4, end, 4);
4503 memcpy(align_buf + align_start, data_buf, buf_size);
4507 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4508 flash_buffer = kmalloc(264, GFP_KERNEL);
4509 if (flash_buffer == NULL) {
4511 goto nvram_write_end;
4516 while ((written < len32) && (rc == 0)) {
4517 u32 page_start, page_end, data_start, data_end;
4518 u32 addr, cmd_flags;
4521 /* Find the page_start addr */
4522 page_start = offset32 + written;
4523 page_start -= (page_start % bp->flash_info->page_size);
4524 /* Find the page_end addr */
4525 page_end = page_start + bp->flash_info->page_size;
4526 /* Find the data_start addr */
4527 data_start = (written == 0) ? offset32 : page_start;
4528 /* Find the data_end addr */
4529 data_end = (page_end > offset32 + len32) ?
4530 (offset32 + len32) : page_end;
4532 /* Request access to the flash interface. */
4533 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4534 goto nvram_write_end;
4536 /* Enable access to flash interface */
4537 bnx2_enable_nvram_access(bp);
4539 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4540 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4543 /* Read the whole page into the buffer
4544 * (non-buffer flash only) */
4545 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4546 if (j == (bp->flash_info->page_size - 4)) {
4547 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4549 rc = bnx2_nvram_read_dword(bp,
4555 goto nvram_write_end;
4561 /* Enable writes to flash interface (unlock write-protect) */
4562 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4563 goto nvram_write_end;
4565 /* Loop to write back the buffer data from page_start to
4568 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4569 /* Erase the page */
4570 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4571 goto nvram_write_end;
4573 /* Re-enable the write again for the actual write */
4574 bnx2_enable_nvram_write(bp);
4576 for (addr = page_start; addr < data_start;
4577 addr += 4, i += 4) {
4579 rc = bnx2_nvram_write_dword(bp, addr,
4580 &flash_buffer[i], cmd_flags);
4583 goto nvram_write_end;
4589 /* Loop to write the new data from data_start to data_end */
4590 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4591 if ((addr == page_end - 4) ||
4592 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4593 (addr == data_end - 4))) {
4595 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4597 rc = bnx2_nvram_write_dword(bp, addr, buf,
4601 goto nvram_write_end;
4607 /* Loop to write back the buffer data from data_end
4609 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4610 for (addr = data_end; addr < page_end;
4611 addr += 4, i += 4) {
4613 if (addr == page_end-4) {
4614 cmd_flags = BNX2_NVM_COMMAND_LAST;
4616 rc = bnx2_nvram_write_dword(bp, addr,
4617 &flash_buffer[i], cmd_flags);
4620 goto nvram_write_end;
4626 /* Disable writes to flash interface (lock write-protect) */
4627 bnx2_disable_nvram_write(bp);
4629 /* Disable access to flash interface */
4630 bnx2_disable_nvram_access(bp);
4631 bnx2_release_nvram_lock(bp);
4633 /* Increment written */
4634 written += data_end - data_start;
4638 kfree(flash_buffer);
4644 bnx2_init_fw_cap(struct bnx2 *bp)
4648 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4649 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4651 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4652 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4654 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4655 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4658 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4659 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4660 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4663 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4664 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4667 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4669 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4670 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4671 bp->phy_port = PORT_FIBRE;
4673 bp->phy_port = PORT_TP;
4675 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4676 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4679 if (netif_running(bp->dev) && sig)
4680 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4684 bnx2_setup_msix_tbl(struct bnx2 *bp)
4686 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4688 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4689 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4693 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4699 /* Wait for the current PCI transaction to complete before
4700 * issuing a reset. */
4701 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4702 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4703 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4704 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4705 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4706 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4707 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4708 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4711 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4712 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4713 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4714 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4716 for (i = 0; i < 100; i++) {
4718 val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4719 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4724 /* Wait for the firmware to tell us it is ok to issue a reset. */
4725 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4727 /* Deposit a driver reset signature so the firmware knows that
4728 * this is a soft reset. */
4729 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4730 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4732 /* Do a dummy read to force the chip to complete all current transaction
4733 * before we issue a reset. */
4734 val = BNX2_RD(bp, BNX2_MISC_ID);
4736 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4737 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4738 BNX2_RD(bp, BNX2_MISC_COMMAND);
4741 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4742 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4744 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4747 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4748 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4749 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4752 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4754 /* Reading back any register after chip reset will hang the
4755 * bus on 5706 A0 and A1. The msleep below provides plenty
4756 * of margin for write posting.
4758 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4759 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4762 /* Reset takes approximate 30 usec */
4763 for (i = 0; i < 10; i++) {
4764 val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4765 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4766 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4771 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4772 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4773 pr_err("Chip reset did not complete\n");
4778 /* Make sure byte swapping is properly configured. */
4779 val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4780 if (val != 0x01020304) {
4781 pr_err("Chip not in correct endian mode\n");
4785 /* Wait for the firmware to finish its initialization. */
4786 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4790 spin_lock_bh(&bp->phy_lock);
4791 old_port = bp->phy_port;
4792 bnx2_init_fw_cap(bp);
4793 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4794 old_port != bp->phy_port)
4795 bnx2_set_default_remote_link(bp);
4796 spin_unlock_bh(&bp->phy_lock);
4798 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4799 /* Adjust the voltage regular to two steps lower. The default
4800 * of this register is 0x0000000e. */
4801 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4803 /* Remove bad rbuf memory from the free pool. */
4804 rc = bnx2_alloc_bad_rbuf(bp);
4807 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4808 bnx2_setup_msix_tbl(bp);
4809 /* Prevent MSIX table reads and write from timing out */
4810 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4811 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4818 bnx2_init_chip(struct bnx2 *bp)
4823 /* Make sure the interrupt is not active. */
4824 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4826 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4827 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4829 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4831 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4832 DMA_READ_CHANS << 12 |
4833 DMA_WRITE_CHANS << 16;
4835 val |= (0x2 << 20) | (1 << 11);
4837 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4840 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4841 (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4842 !(bp->flags & BNX2_FLAG_PCIX))
4843 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4845 BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4847 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4848 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4849 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4850 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4853 if (bp->flags & BNX2_FLAG_PCIX) {
4856 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4858 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4859 val16 & ~PCI_X_CMD_ERO);
4862 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4863 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4864 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4865 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4867 /* Initialize context mapping and zero out the quick contexts. The
4868 * context block must have already been enabled. */
4869 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4870 rc = bnx2_init_5709_context(bp);
4874 bnx2_init_context(bp);
4876 if ((rc = bnx2_init_cpus(bp)) != 0)
4879 bnx2_init_nvram(bp);
4881 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4883 val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4884 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4885 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4886 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4887 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4888 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4889 val |= BNX2_MQ_CONFIG_HALT_DIS;
4892 BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4894 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4895 BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4896 BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4898 val = (BNX2_PAGE_BITS - 8) << 24;
4899 BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4901 /* Configure page size. */
4902 val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4903 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4904 val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4905 BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4907 val = bp->mac_addr[0] +
4908 (bp->mac_addr[1] << 8) +
4909 (bp->mac_addr[2] << 16) +
4911 (bp->mac_addr[4] << 8) +
4912 (bp->mac_addr[5] << 16);
4913 BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4915 /* Program the MTU. Also include 4 bytes for CRC32. */
4917 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4918 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4919 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4920 BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4925 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4926 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4927 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4929 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4930 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4931 bp->bnx2_napi[i].last_status_idx = 0;
4933 bp->idle_chk_status_idx = 0xffff;
4935 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4937 /* Set up how to generate a link change interrupt. */
4938 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4940 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
4941 (u64) bp->status_blk_mapping & 0xffffffff);
4942 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4944 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4945 (u64) bp->stats_blk_mapping & 0xffffffff);
4946 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4947 (u64) bp->stats_blk_mapping >> 32);
4949 BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4950 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4952 BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4953 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4955 BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4956 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4958 BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4960 BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4962 BNX2_WR(bp, BNX2_HC_COM_TICKS,
4963 (bp->com_ticks_int << 16) | bp->com_ticks);
4965 BNX2_WR(bp, BNX2_HC_CMD_TICKS,
4966 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4968 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4969 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
4971 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4972 BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4974 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
4975 val = BNX2_HC_CONFIG_COLLECT_STATS;
4977 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4978 BNX2_HC_CONFIG_COLLECT_STATS;
4981 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4982 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4983 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4985 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4988 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4989 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4991 BNX2_WR(bp, BNX2_HC_CONFIG, val);
4993 if (bp->rx_ticks < 25)
4994 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
4996 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
4998 for (i = 1; i < bp->irq_nvecs; i++) {
4999 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5000 BNX2_HC_SB_CONFIG_1;
5003 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5004 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5005 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5007 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5008 (bp->tx_quick_cons_trip_int << 16) |
5009 bp->tx_quick_cons_trip);
5011 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5012 (bp->tx_ticks_int << 16) | bp->tx_ticks);
5014 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5015 (bp->rx_quick_cons_trip_int << 16) |
5016 bp->rx_quick_cons_trip);
5018 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5019 (bp->rx_ticks_int << 16) | bp->rx_ticks);
5022 /* Clear internal stats counters. */
5023 BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5025 BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5027 /* Initialize the receive filter. */
5028 bnx2_set_rx_mode(bp->dev);
5030 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5031 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5032 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5033 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5035 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5038 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5039 BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5043 bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5049 bnx2_clear_ring_states(struct bnx2 *bp)
5051 struct bnx2_napi *bnapi;
5052 struct bnx2_tx_ring_info *txr;
5053 struct bnx2_rx_ring_info *rxr;
5056 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5057 bnapi = &bp->bnx2_napi[i];
5058 txr = &bnapi->tx_ring;
5059 rxr = &bnapi->rx_ring;
5062 txr->hw_tx_cons = 0;
5063 rxr->rx_prod_bseq = 0;
5066 rxr->rx_pg_prod = 0;
5067 rxr->rx_pg_cons = 0;
5072 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5074 u32 val, offset0, offset1, offset2, offset3;
5075 u32 cid_addr = GET_CID_ADDR(cid);
5077 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5078 offset0 = BNX2_L2CTX_TYPE_XI;
5079 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5080 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5081 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5083 offset0 = BNX2_L2CTX_TYPE;
5084 offset1 = BNX2_L2CTX_CMD_TYPE;
5085 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5086 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5088 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5089 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5091 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5092 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5094 val = (u64) txr->tx_desc_mapping >> 32;
5095 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5097 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5098 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5102 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5104 struct bnx2_tx_bd *txbd;
5106 struct bnx2_napi *bnapi;
5107 struct bnx2_tx_ring_info *txr;
5109 bnapi = &bp->bnx2_napi[ring_num];
5110 txr = &bnapi->tx_ring;
5115 cid = TX_TSS_CID + ring_num - 1;
5117 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5119 txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5121 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5122 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5125 txr->tx_prod_bseq = 0;
5127 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5128 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5130 bnx2_init_tx_context(bp, cid, txr);
5134 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5135 u32 buf_size, int num_rings)
5138 struct bnx2_rx_bd *rxbd;
5140 for (i = 0; i < num_rings; i++) {
5143 rxbd = &rx_ring[i][0];
5144 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5145 rxbd->rx_bd_len = buf_size;
5146 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5148 if (i == (num_rings - 1))
5152 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5153 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5158 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5161 u16 prod, ring_prod;
5162 u32 cid, rx_cid_addr, val;
5163 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5164 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5169 cid = RX_RSS_CID + ring_num - 1;
5171 rx_cid_addr = GET_CID_ADDR(cid);
5173 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5174 bp->rx_buf_use_size, bp->rx_max_ring);
5176 bnx2_init_rx_context(bp, cid);
5178 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5179 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5180 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5183 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5184 if (bp->rx_pg_ring_size) {
5185 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5186 rxr->rx_pg_desc_mapping,
5187 PAGE_SIZE, bp->rx_max_pg_ring);
5188 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5189 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5190 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5191 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5193 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5194 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5196 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5197 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5199 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5200 BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5203 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5204 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5206 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5207 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5209 ring_prod = prod = rxr->rx_pg_prod;
5210 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5211 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5212 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5213 ring_num, i, bp->rx_pg_ring_size);
5216 prod = BNX2_NEXT_RX_BD(prod);
5217 ring_prod = BNX2_RX_PG_RING_IDX(prod);
5219 rxr->rx_pg_prod = prod;
5221 ring_prod = prod = rxr->rx_prod;
5222 for (i = 0; i < bp->rx_ring_size; i++) {
5223 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5224 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5225 ring_num, i, bp->rx_ring_size);
5228 prod = BNX2_NEXT_RX_BD(prod);
5229 ring_prod = BNX2_RX_RING_IDX(prod);
5231 rxr->rx_prod = prod;
5233 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5234 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5235 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5237 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5238 BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5240 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5244 bnx2_init_all_rings(struct bnx2 *bp)
5249 bnx2_clear_ring_states(bp);
5251 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5252 for (i = 0; i < bp->num_tx_rings; i++)
5253 bnx2_init_tx_ring(bp, i);
5255 if (bp->num_tx_rings > 1)
5256 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5259 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5260 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5262 for (i = 0; i < bp->num_rx_rings; i++)
5263 bnx2_init_rx_ring(bp, i);
5265 if (bp->num_rx_rings > 1) {
5268 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5269 int shift = (i % 8) << 2;
5271 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5273 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5274 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5275 BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5276 BNX2_RLUP_RSS_COMMAND_WRITE |
5277 BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5282 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5283 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5285 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5290 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5292 u32 max, num_rings = 1;
5294 while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5295 ring_size -= BNX2_MAX_RX_DESC_CNT;
5298 /* round to next power of 2 */
5300 while ((max & num_rings) == 0)
5303 if (num_rings != max)
5310 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5312 u32 rx_size, rx_space, jumbo_size;
5314 /* 8 for CRC and VLAN */
5315 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5317 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5318 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5320 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5321 bp->rx_pg_ring_size = 0;
5322 bp->rx_max_pg_ring = 0;
5323 bp->rx_max_pg_ring_idx = 0;
5324 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5325 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5327 jumbo_size = size * pages;
5328 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5329 jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5331 bp->rx_pg_ring_size = jumbo_size;
5332 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5333 BNX2_MAX_RX_PG_RINGS);
5334 bp->rx_max_pg_ring_idx =
5335 (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5336 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5337 bp->rx_copy_thresh = 0;
5340 bp->rx_buf_use_size = rx_size;
5341 /* hw alignment + build_skb() overhead*/
5342 bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5343 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5344 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5345 bp->rx_ring_size = size;
5346 bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5347 bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5351 bnx2_free_tx_skbs(struct bnx2 *bp)
5355 for (i = 0; i < bp->num_tx_rings; i++) {
5356 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5357 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5360 if (txr->tx_buf_ring == NULL)
5363 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5364 struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5365 struct sk_buff *skb = tx_buf->skb;
5369 j = BNX2_NEXT_TX_BD(j);
5373 dma_unmap_single(&bp->pdev->dev,
5374 dma_unmap_addr(tx_buf, mapping),
5380 last = tx_buf->nr_frags;
5381 j = BNX2_NEXT_TX_BD(j);
5382 for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5383 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5384 dma_unmap_page(&bp->pdev->dev,
5385 dma_unmap_addr(tx_buf, mapping),
5386 skb_frag_size(&skb_shinfo(skb)->frags[k]),
5391 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5396 bnx2_free_rx_skbs(struct bnx2 *bp)
5400 for (i = 0; i < bp->num_rx_rings; i++) {
5401 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5402 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5405 if (rxr->rx_buf_ring == NULL)
5408 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5409 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5410 u8 *data = rx_buf->data;
5415 dma_unmap_single(&bp->pdev->dev,
5416 dma_unmap_addr(rx_buf, mapping),
5417 bp->rx_buf_use_size,
5418 PCI_DMA_FROMDEVICE);
5420 rx_buf->data = NULL;
5424 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5425 bnx2_free_rx_page(bp, rxr, j);
5430 bnx2_free_skbs(struct bnx2 *bp)
5432 bnx2_free_tx_skbs(bp);
5433 bnx2_free_rx_skbs(bp);
5437 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5441 rc = bnx2_reset_chip(bp, reset_code);
5446 if ((rc = bnx2_init_chip(bp)) != 0)
5449 bnx2_init_all_rings(bp);
5454 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5458 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5461 spin_lock_bh(&bp->phy_lock);
5462 bnx2_init_phy(bp, reset_phy);
5464 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5465 bnx2_remote_phy_event(bp);
5466 spin_unlock_bh(&bp->phy_lock);
5471 bnx2_shutdown_chip(struct bnx2 *bp)
5475 if (bp->flags & BNX2_FLAG_NO_WOL)
5476 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5478 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5480 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5482 return bnx2_reset_chip(bp, reset_code);
5486 bnx2_test_registers(struct bnx2 *bp)
5490 static const struct {
5493 #define BNX2_FL_NOT_5709 1
5497 { 0x006c, 0, 0x00000000, 0x0000003f },
5498 { 0x0090, 0, 0xffffffff, 0x00000000 },
5499 { 0x0094, 0, 0x00000000, 0x00000000 },
5501 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5502 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5503 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5504 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5505 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5506 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5507 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5508 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5509 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5511 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5512 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5513 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5514 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5515 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5516 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5518 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5519 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5520 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5522 { 0x1000, 0, 0x00000000, 0x00000001 },
5523 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5525 { 0x1408, 0, 0x01c00800, 0x00000000 },
5526 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5527 { 0x14a8, 0, 0x00000000, 0x000001ff },
5528 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5529 { 0x14b0, 0, 0x00000002, 0x00000001 },
5530 { 0x14b8, 0, 0x00000000, 0x00000000 },
5531 { 0x14c0, 0, 0x00000000, 0x00000009 },
5532 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5533 { 0x14cc, 0, 0x00000000, 0x00000001 },
5534 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5536 { 0x1800, 0, 0x00000000, 0x00000001 },
5537 { 0x1804, 0, 0x00000000, 0x00000003 },
5539 { 0x2800, 0, 0x00000000, 0x00000001 },
5540 { 0x2804, 0, 0x00000000, 0x00003f01 },
5541 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5542 { 0x2810, 0, 0xffff0000, 0x00000000 },
5543 { 0x2814, 0, 0xffff0000, 0x00000000 },
5544 { 0x2818, 0, 0xffff0000, 0x00000000 },
5545 { 0x281c, 0, 0xffff0000, 0x00000000 },
5546 { 0x2834, 0, 0xffffffff, 0x00000000 },
5547 { 0x2840, 0, 0x00000000, 0xffffffff },
5548 { 0x2844, 0, 0x00000000, 0xffffffff },
5549 { 0x2848, 0, 0xffffffff, 0x00000000 },
5550 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5552 { 0x2c00, 0, 0x00000000, 0x00000011 },
5553 { 0x2c04, 0, 0x00000000, 0x00030007 },
5555 { 0x3c00, 0, 0x00000000, 0x00000001 },
5556 { 0x3c04, 0, 0x00000000, 0x00070000 },
5557 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5558 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5559 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5560 { 0x3c14, 0, 0x00000000, 0xffffffff },
5561 { 0x3c18, 0, 0x00000000, 0xffffffff },
5562 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5563 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5565 { 0x5004, 0, 0x00000000, 0x0000007f },
5566 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5568 { 0x5c00, 0, 0x00000000, 0x00000001 },
5569 { 0x5c04, 0, 0x00000000, 0x0003000f },
5570 { 0x5c08, 0, 0x00000003, 0x00000000 },
5571 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5572 { 0x5c10, 0, 0x00000000, 0xffffffff },
5573 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5574 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5575 { 0x5c88, 0, 0x00000000, 0x00077373 },
5576 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5578 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5579 { 0x680c, 0, 0xffffffff, 0x00000000 },
5580 { 0x6810, 0, 0xffffffff, 0x00000000 },
5581 { 0x6814, 0, 0xffffffff, 0x00000000 },
5582 { 0x6818, 0, 0xffffffff, 0x00000000 },
5583 { 0x681c, 0, 0xffffffff, 0x00000000 },
5584 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5585 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5586 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5587 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5588 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5589 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5590 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5591 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5592 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5593 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5594 { 0x684c, 0, 0xffffffff, 0x00000000 },
5595 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5596 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5597 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5598 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5599 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5600 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5602 { 0xffff, 0, 0x00000000, 0x00000000 },
5607 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5610 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5611 u32 offset, rw_mask, ro_mask, save_val, val;
5612 u16 flags = reg_tbl[i].flags;
5614 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5617 offset = (u32) reg_tbl[i].offset;
5618 rw_mask = reg_tbl[i].rw_mask;
5619 ro_mask = reg_tbl[i].ro_mask;
5621 save_val = readl(bp->regview + offset);
5623 writel(0, bp->regview + offset);
5625 val = readl(bp->regview + offset);
5626 if ((val & rw_mask) != 0) {
5630 if ((val & ro_mask) != (save_val & ro_mask)) {
5634 writel(0xffffffff, bp->regview + offset);
5636 val = readl(bp->regview + offset);
5637 if ((val & rw_mask) != rw_mask) {
5641 if ((val & ro_mask) != (save_val & ro_mask)) {
5645 writel(save_val, bp->regview + offset);
5649 writel(save_val, bp->regview + offset);
5657 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5659 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5660 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5663 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5666 for (offset = 0; offset < size; offset += 4) {
5668 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5670 if (bnx2_reg_rd_ind(bp, start + offset) !=
5680 bnx2_test_memory(struct bnx2 *bp)
5684 static struct mem_entry {
5687 } mem_tbl_5706[] = {
5688 { 0x60000, 0x4000 },
5689 { 0xa0000, 0x3000 },
5690 { 0xe0000, 0x4000 },
5691 { 0x120000, 0x4000 },
5692 { 0x1a0000, 0x4000 },
5693 { 0x160000, 0x4000 },
5697 { 0x60000, 0x4000 },
5698 { 0xa0000, 0x3000 },
5699 { 0xe0000, 0x4000 },
5700 { 0x120000, 0x4000 },
5701 { 0x1a0000, 0x4000 },
5704 struct mem_entry *mem_tbl;
5706 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5707 mem_tbl = mem_tbl_5709;
5709 mem_tbl = mem_tbl_5706;
5711 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5712 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5713 mem_tbl[i].len)) != 0) {
5721 #define BNX2_MAC_LOOPBACK 0
5722 #define BNX2_PHY_LOOPBACK 1
5725 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5727 unsigned int pkt_size, num_pkts, i;
5728 struct sk_buff *skb;
5730 unsigned char *packet;
5731 u16 rx_start_idx, rx_idx;
5733 struct bnx2_tx_bd *txbd;
5734 struct bnx2_sw_bd *rx_buf;
5735 struct l2_fhdr *rx_hdr;
5737 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5738 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5739 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5743 txr = &tx_napi->tx_ring;
5744 rxr = &bnapi->rx_ring;
5745 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5746 bp->loopback = MAC_LOOPBACK;
5747 bnx2_set_mac_loopback(bp);
5749 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5750 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5753 bp->loopback = PHY_LOOPBACK;
5754 bnx2_set_phy_loopback(bp);
5759 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5760 skb = netdev_alloc_skb(bp->dev, pkt_size);
5763 packet = skb_put(skb, pkt_size);
5764 memcpy(packet, bp->dev->dev_addr, 6);
5765 memset(packet + 6, 0x0, 8);
5766 for (i = 14; i < pkt_size; i++)
5767 packet[i] = (unsigned char) (i & 0xff);
5769 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5771 if (dma_mapping_error(&bp->pdev->dev, map)) {
5776 BNX2_WR(bp, BNX2_HC_COMMAND,
5777 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5779 BNX2_RD(bp, BNX2_HC_COMMAND);
5782 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5786 txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5788 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5789 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5790 txbd->tx_bd_mss_nbytes = pkt_size;
5791 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5794 txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5795 txr->tx_prod_bseq += pkt_size;
5797 BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5798 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5802 BNX2_WR(bp, BNX2_HC_COMMAND,
5803 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5805 BNX2_RD(bp, BNX2_HC_COMMAND);
5809 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5812 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5813 goto loopback_test_done;
5815 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5816 if (rx_idx != rx_start_idx + num_pkts) {
5817 goto loopback_test_done;
5820 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5821 data = rx_buf->data;
5823 rx_hdr = get_l2_fhdr(data);
5824 data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5826 dma_sync_single_for_cpu(&bp->pdev->dev,
5827 dma_unmap_addr(rx_buf, mapping),
5828 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5830 if (rx_hdr->l2_fhdr_status &
5831 (L2_FHDR_ERRORS_BAD_CRC |
5832 L2_FHDR_ERRORS_PHY_DECODE |
5833 L2_FHDR_ERRORS_ALIGNMENT |
5834 L2_FHDR_ERRORS_TOO_SHORT |
5835 L2_FHDR_ERRORS_GIANT_FRAME)) {
5837 goto loopback_test_done;
5840 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5841 goto loopback_test_done;
5844 for (i = 14; i < pkt_size; i++) {
5845 if (*(data + i) != (unsigned char) (i & 0xff)) {
5846 goto loopback_test_done;
5857 #define BNX2_MAC_LOOPBACK_FAILED 1
5858 #define BNX2_PHY_LOOPBACK_FAILED 2
5859 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5860 BNX2_PHY_LOOPBACK_FAILED)
5863 bnx2_test_loopback(struct bnx2 *bp)
5867 if (!netif_running(bp->dev))
5868 return BNX2_LOOPBACK_FAILED;
5870 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5871 spin_lock_bh(&bp->phy_lock);
5872 bnx2_init_phy(bp, 1);
5873 spin_unlock_bh(&bp->phy_lock);
5874 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5875 rc |= BNX2_MAC_LOOPBACK_FAILED;
5876 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5877 rc |= BNX2_PHY_LOOPBACK_FAILED;
5881 #define NVRAM_SIZE 0x200
5882 #define CRC32_RESIDUAL 0xdebb20e3
5885 bnx2_test_nvram(struct bnx2 *bp)
5887 __be32 buf[NVRAM_SIZE / 4];
5888 u8 *data = (u8 *) buf;
5892 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5893 goto test_nvram_done;
5895 magic = be32_to_cpu(buf[0]);
5896 if (magic != 0x669955aa) {
5898 goto test_nvram_done;
5901 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5902 goto test_nvram_done;
5904 csum = ether_crc_le(0x100, data);
5905 if (csum != CRC32_RESIDUAL) {
5907 goto test_nvram_done;
5910 csum = ether_crc_le(0x100, data + 0x100);
5911 if (csum != CRC32_RESIDUAL) {
5920 bnx2_test_link(struct bnx2 *bp)
5924 if (!netif_running(bp->dev))
5927 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5932 spin_lock_bh(&bp->phy_lock);
5933 bnx2_enable_bmsr1(bp);
5934 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5935 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5936 bnx2_disable_bmsr1(bp);
5937 spin_unlock_bh(&bp->phy_lock);
5939 if (bmsr & BMSR_LSTATUS) {
5946 bnx2_test_intr(struct bnx2 *bp)
5951 if (!netif_running(bp->dev))
5954 status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5956 /* This register is not touched during run-time. */
5957 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5958 BNX2_RD(bp, BNX2_HC_COMMAND);
5960 for (i = 0; i < 10; i++) {
5961 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5967 msleep_interruptible(10);
5975 /* Determining link for parallel detection. */
5977 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5979 u32 mode_ctl, an_dbg, exp;
5981 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5984 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5985 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5987 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5990 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5991 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5992 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5994 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5997 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5998 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5999 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6001 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
6008 bnx2_5706_serdes_timer(struct bnx2 *bp)
6012 spin_lock(&bp->phy_lock);
6013 if (bp->serdes_an_pending) {
6014 bp->serdes_an_pending--;
6016 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6019 bp->current_interval = BNX2_TIMER_INTERVAL;
6021 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6023 if (bmcr & BMCR_ANENABLE) {
6024 if (bnx2_5706_serdes_has_link(bp)) {
6025 bmcr &= ~BMCR_ANENABLE;
6026 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6027 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6028 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6032 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6033 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6036 bnx2_write_phy(bp, 0x17, 0x0f01);
6037 bnx2_read_phy(bp, 0x15, &phy2);
6041 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6042 bmcr |= BMCR_ANENABLE;
6043 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6045 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6048 bp->current_interval = BNX2_TIMER_INTERVAL;
6053 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6054 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6055 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6057 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6058 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6059 bnx2_5706s_force_link_dn(bp, 1);
6060 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6063 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6066 spin_unlock(&bp->phy_lock);
6070 bnx2_5708_serdes_timer(struct bnx2 *bp)
6072 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6075 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6076 bp->serdes_an_pending = 0;
6080 spin_lock(&bp->phy_lock);
6081 if (bp->serdes_an_pending)
6082 bp->serdes_an_pending--;
6083 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6086 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6087 if (bmcr & BMCR_ANENABLE) {
6088 bnx2_enable_forced_2g5(bp);
6089 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6091 bnx2_disable_forced_2g5(bp);
6092 bp->serdes_an_pending = 2;
6093 bp->current_interval = BNX2_TIMER_INTERVAL;
6097 bp->current_interval = BNX2_TIMER_INTERVAL;
6099 spin_unlock(&bp->phy_lock);
6103 bnx2_timer(unsigned long data)
6105 struct bnx2 *bp = (struct bnx2 *) data;
6107 if (!netif_running(bp->dev))
6110 if (atomic_read(&bp->intr_sem) != 0)
6111 goto bnx2_restart_timer;
6113 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6114 BNX2_FLAG_USING_MSI)
6115 bnx2_chk_missed_msi(bp);
6117 bnx2_send_heart_beat(bp);
6119 bp->stats_blk->stat_FwRxDrop =
6120 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6122 /* workaround occasional corrupted counters */
6123 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6124 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6125 BNX2_HC_COMMAND_STATS_NOW);
6127 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6128 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6129 bnx2_5706_serdes_timer(bp);
6131 bnx2_5708_serdes_timer(bp);
6135 mod_timer(&bp->timer, jiffies + bp->current_interval);
6139 bnx2_request_irq(struct bnx2 *bp)
6141 unsigned long flags;
6142 struct bnx2_irq *irq;
6145 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6148 flags = IRQF_SHARED;
6150 for (i = 0; i < bp->irq_nvecs; i++) {
6151 irq = &bp->irq_tbl[i];
6152 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6162 __bnx2_free_irq(struct bnx2 *bp)
6164 struct bnx2_irq *irq;
6167 for (i = 0; i < bp->irq_nvecs; i++) {
6168 irq = &bp->irq_tbl[i];
6170 free_irq(irq->vector, &bp->bnx2_napi[i]);
6176 bnx2_free_irq(struct bnx2 *bp)
6179 __bnx2_free_irq(bp);
6180 if (bp->flags & BNX2_FLAG_USING_MSI)
6181 pci_disable_msi(bp->pdev);
6182 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6183 pci_disable_msix(bp->pdev);
6185 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6189 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6191 int i, total_vecs, rc;
6192 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6193 struct net_device *dev = bp->dev;
6194 const int len = sizeof(bp->irq_tbl[0].name);
6196 bnx2_setup_msix_tbl(bp);
6197 BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6198 BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6199 BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6201 /* Need to flush the previous three writes to ensure MSI-X
6202 * is setup properly */
6203 BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6205 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6206 msix_ent[i].entry = i;
6207 msix_ent[i].vector = 0;
6210 total_vecs = msix_vecs;
6215 while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6216 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6226 msix_vecs = total_vecs;
6230 bp->irq_nvecs = msix_vecs;
6231 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6232 for (i = 0; i < total_vecs; i++) {
6233 bp->irq_tbl[i].vector = msix_ent[i].vector;
6234 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6235 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6240 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6242 int cpus = netif_get_num_default_rss_queues();
6245 if (!bp->num_req_rx_rings)
6246 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6247 else if (!bp->num_req_tx_rings)
6248 msix_vecs = max(cpus, bp->num_req_rx_rings);
6250 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6252 msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6254 bp->irq_tbl[0].handler = bnx2_interrupt;
6255 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6257 bp->irq_tbl[0].vector = bp->pdev->irq;
6259 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6260 bnx2_enable_msix(bp, msix_vecs);
6262 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6263 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6264 if (pci_enable_msi(bp->pdev) == 0) {
6265 bp->flags |= BNX2_FLAG_USING_MSI;
6266 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6267 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6268 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6270 bp->irq_tbl[0].handler = bnx2_msi;
6272 bp->irq_tbl[0].vector = bp->pdev->irq;
6276 if (!bp->num_req_tx_rings)
6277 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6279 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6281 if (!bp->num_req_rx_rings)
6282 bp->num_rx_rings = bp->irq_nvecs;
6284 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6286 netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6288 return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6291 /* Called with rtnl_lock */
6293 bnx2_open(struct net_device *dev)
6295 struct bnx2 *bp = netdev_priv(dev);
6298 rc = bnx2_request_firmware(bp);
6302 netif_carrier_off(dev);
6304 bnx2_disable_int(bp);
6306 rc = bnx2_setup_int_mode(bp, disable_msi);
6310 bnx2_napi_enable(bp);
6311 rc = bnx2_alloc_mem(bp);
6315 rc = bnx2_request_irq(bp);
6319 rc = bnx2_init_nic(bp, 1);
6323 mod_timer(&bp->timer, jiffies + bp->current_interval);
6325 atomic_set(&bp->intr_sem, 0);
6327 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6329 bnx2_enable_int(bp);
6331 if (bp->flags & BNX2_FLAG_USING_MSI) {
6332 /* Test MSI to make sure it is working
6333 * If MSI test fails, go back to INTx mode
6335 if (bnx2_test_intr(bp) != 0) {
6336 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6338 bnx2_disable_int(bp);
6341 bnx2_setup_int_mode(bp, 1);
6343 rc = bnx2_init_nic(bp, 0);
6346 rc = bnx2_request_irq(bp);
6349 del_timer_sync(&bp->timer);
6352 bnx2_enable_int(bp);
6355 if (bp->flags & BNX2_FLAG_USING_MSI)
6356 netdev_info(dev, "using MSI\n");
6357 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6358 netdev_info(dev, "using MSIX\n");
6360 netif_tx_start_all_queues(dev);
6365 bnx2_napi_disable(bp);
6370 bnx2_release_firmware(bp);
6375 bnx2_reset_task(struct work_struct *work)
6377 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6382 if (!netif_running(bp->dev)) {
6387 bnx2_netif_stop(bp, true);
6389 pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6390 if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6391 /* in case PCI block has reset */
6392 pci_restore_state(bp->pdev);
6393 pci_save_state(bp->pdev);
6395 rc = bnx2_init_nic(bp, 1);
6397 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6398 bnx2_napi_enable(bp);
6404 atomic_set(&bp->intr_sem, 1);
6405 bnx2_netif_start(bp, true);
6409 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6412 bnx2_dump_ftq(struct bnx2 *bp)
6415 u32 reg, bdidx, cid, valid;
6416 struct net_device *dev = bp->dev;
6417 static const struct ftq_reg {
6421 BNX2_FTQ_ENTRY(RV2P_P),
6422 BNX2_FTQ_ENTRY(RV2P_T),
6423 BNX2_FTQ_ENTRY(RV2P_M),
6424 BNX2_FTQ_ENTRY(TBDR_),
6425 BNX2_FTQ_ENTRY(TDMA_),
6426 BNX2_FTQ_ENTRY(TXP_),
6427 BNX2_FTQ_ENTRY(TXP_),
6428 BNX2_FTQ_ENTRY(TPAT_),
6429 BNX2_FTQ_ENTRY(RXP_C),
6430 BNX2_FTQ_ENTRY(RXP_),
6431 BNX2_FTQ_ENTRY(COM_COMXQ_),
6432 BNX2_FTQ_ENTRY(COM_COMTQ_),
6433 BNX2_FTQ_ENTRY(COM_COMQ_),
6434 BNX2_FTQ_ENTRY(CP_CPQ_),
6437 netdev_err(dev, "<--- start FTQ dump --->\n");
6438 for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6439 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6440 bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6442 netdev_err(dev, "CPU states:\n");
6443 for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6444 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6445 reg, bnx2_reg_rd_ind(bp, reg),
6446 bnx2_reg_rd_ind(bp, reg + 4),
6447 bnx2_reg_rd_ind(bp, reg + 8),
6448 bnx2_reg_rd_ind(bp, reg + 0x1c),
6449 bnx2_reg_rd_ind(bp, reg + 0x1c),
6450 bnx2_reg_rd_ind(bp, reg + 0x20));
6452 netdev_err(dev, "<--- end FTQ dump --->\n");
6453 netdev_err(dev, "<--- start TBDC dump --->\n");
6454 netdev_err(dev, "TBDC free cnt: %ld\n",
6455 BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6456 netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
6457 for (i = 0; i < 0x20; i++) {
6460 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6461 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6462 BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6463 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6464 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6465 BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6468 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6469 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6470 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6471 netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
6472 i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6473 bdidx >> 24, (valid >> 8) & 0x0ff);
6475 netdev_err(dev, "<--- end TBDC dump --->\n");
6479 bnx2_dump_state(struct bnx2 *bp)
6481 struct net_device *dev = bp->dev;
6484 pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6485 netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6486 atomic_read(&bp->intr_sem), val1);
6487 pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6488 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6489 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6490 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6491 BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6492 BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6493 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6494 BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6495 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6496 BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6497 if (bp->flags & BNX2_FLAG_USING_MSIX)
6498 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6499 BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6503 bnx2_tx_timeout(struct net_device *dev)
6505 struct bnx2 *bp = netdev_priv(dev);
6508 bnx2_dump_state(bp);
6509 bnx2_dump_mcp_state(bp);
6511 /* This allows the netif to be shutdown gracefully before resetting */
6512 schedule_work(&bp->reset_task);
6515 /* Called with netif_tx_lock.
6516 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6517 * netif_wake_queue().
6520 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6522 struct bnx2 *bp = netdev_priv(dev);
6524 struct bnx2_tx_bd *txbd;
6525 struct bnx2_sw_tx_bd *tx_buf;
6526 u32 len, vlan_tag_flags, last_frag, mss;
6527 u16 prod, ring_prod;
6529 struct bnx2_napi *bnapi;
6530 struct bnx2_tx_ring_info *txr;
6531 struct netdev_queue *txq;
6533 /* Determine which tx ring we will be placed on */
6534 i = skb_get_queue_mapping(skb);
6535 bnapi = &bp->bnx2_napi[i];
6536 txr = &bnapi->tx_ring;
6537 txq = netdev_get_tx_queue(dev, i);
6539 if (unlikely(bnx2_tx_avail(bp, txr) <
6540 (skb_shinfo(skb)->nr_frags + 1))) {
6541 netif_tx_stop_queue(txq);
6542 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6544 return NETDEV_TX_BUSY;
6546 len = skb_headlen(skb);
6547 prod = txr->tx_prod;
6548 ring_prod = BNX2_TX_RING_IDX(prod);
6551 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6552 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6555 if (vlan_tx_tag_present(skb)) {
6557 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6560 if ((mss = skb_shinfo(skb)->gso_size)) {
6564 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6566 tcp_opt_len = tcp_optlen(skb);
6568 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6569 u32 tcp_off = skb_transport_offset(skb) -
6570 sizeof(struct ipv6hdr) - ETH_HLEN;
6572 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6573 TX_BD_FLAGS_SW_FLAGS;
6574 if (likely(tcp_off == 0))
6575 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6578 vlan_tag_flags |= ((tcp_off & 0x3) <<
6579 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6580 ((tcp_off & 0x10) <<
6581 TX_BD_FLAGS_TCP6_OFF4_SHL);
6582 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6586 if (tcp_opt_len || (iph->ihl > 5)) {
6587 vlan_tag_flags |= ((iph->ihl - 5) +
6588 (tcp_opt_len >> 2)) << 8;
6594 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6595 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6597 return NETDEV_TX_OK;
6600 tx_buf = &txr->tx_buf_ring[ring_prod];
6602 dma_unmap_addr_set(tx_buf, mapping, mapping);
6604 txbd = &txr->tx_desc_ring[ring_prod];
6606 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6607 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6608 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6609 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6611 last_frag = skb_shinfo(skb)->nr_frags;
6612 tx_buf->nr_frags = last_frag;
6613 tx_buf->is_gso = skb_is_gso(skb);
6615 for (i = 0; i < last_frag; i++) {
6616 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6618 prod = BNX2_NEXT_TX_BD(prod);
6619 ring_prod = BNX2_TX_RING_IDX(prod);
6620 txbd = &txr->tx_desc_ring[ring_prod];
6622 len = skb_frag_size(frag);
6623 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6625 if (dma_mapping_error(&bp->pdev->dev, mapping))
6627 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6630 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6631 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6632 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6633 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6636 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6638 /* Sync BD data before updating TX mailbox */
6641 netdev_tx_sent_queue(txq, skb->len);
6643 prod = BNX2_NEXT_TX_BD(prod);
6644 txr->tx_prod_bseq += skb->len;
6646 BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6647 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6651 txr->tx_prod = prod;
6653 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6654 netif_tx_stop_queue(txq);
6656 /* netif_tx_stop_queue() must be done before checking
6657 * tx index in bnx2_tx_avail() below, because in
6658 * bnx2_tx_int(), we update tx index before checking for
6659 * netif_tx_queue_stopped().
6662 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6663 netif_tx_wake_queue(txq);
6666 return NETDEV_TX_OK;
6668 /* save value of frag that failed */
6671 /* start back at beginning and unmap skb */
6672 prod = txr->tx_prod;
6673 ring_prod = BNX2_TX_RING_IDX(prod);
6674 tx_buf = &txr->tx_buf_ring[ring_prod];
6676 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6677 skb_headlen(skb), PCI_DMA_TODEVICE);
6679 /* unmap remaining mapped pages */
6680 for (i = 0; i < last_frag; i++) {
6681 prod = BNX2_NEXT_TX_BD(prod);
6682 ring_prod = BNX2_TX_RING_IDX(prod);
6683 tx_buf = &txr->tx_buf_ring[ring_prod];
6684 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6685 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6690 return NETDEV_TX_OK;
6693 /* Called with rtnl_lock */
6695 bnx2_close(struct net_device *dev)
6697 struct bnx2 *bp = netdev_priv(dev);
6699 bnx2_disable_int_sync(bp);
6700 bnx2_napi_disable(bp);
6701 netif_tx_disable(dev);
6702 del_timer_sync(&bp->timer);
6703 bnx2_shutdown_chip(bp);
6709 netif_carrier_off(bp->dev);
6714 bnx2_save_stats(struct bnx2 *bp)
6716 u32 *hw_stats = (u32 *) bp->stats_blk;
6717 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6720 /* The 1st 10 counters are 64-bit counters */
6721 for (i = 0; i < 20; i += 2) {
6725 hi = temp_stats[i] + hw_stats[i];
6726 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6727 if (lo > 0xffffffff)
6730 temp_stats[i + 1] = lo & 0xffffffff;
6733 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6734 temp_stats[i] += hw_stats[i];
6737 #define GET_64BIT_NET_STATS64(ctr) \
6738 (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6740 #define GET_64BIT_NET_STATS(ctr) \
6741 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6742 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6744 #define GET_32BIT_NET_STATS(ctr) \
6745 (unsigned long) (bp->stats_blk->ctr + \
6746 bp->temp_stats_blk->ctr)
6748 static struct rtnl_link_stats64 *
6749 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6751 struct bnx2 *bp = netdev_priv(dev);
6753 if (bp->stats_blk == NULL)
6756 net_stats->rx_packets =
6757 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6758 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6759 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6761 net_stats->tx_packets =
6762 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6763 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6764 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6766 net_stats->rx_bytes =
6767 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6769 net_stats->tx_bytes =
6770 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6772 net_stats->multicast =
6773 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6775 net_stats->collisions =
6776 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6778 net_stats->rx_length_errors =
6779 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6780 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6782 net_stats->rx_over_errors =
6783 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6784 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6786 net_stats->rx_frame_errors =
6787 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6789 net_stats->rx_crc_errors =
6790 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6792 net_stats->rx_errors = net_stats->rx_length_errors +
6793 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6794 net_stats->rx_crc_errors;
6796 net_stats->tx_aborted_errors =
6797 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6798 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6800 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6801 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6802 net_stats->tx_carrier_errors = 0;
6804 net_stats->tx_carrier_errors =
6805 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6808 net_stats->tx_errors =
6809 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6810 net_stats->tx_aborted_errors +
6811 net_stats->tx_carrier_errors;
6813 net_stats->rx_missed_errors =
6814 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6815 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6816 GET_32BIT_NET_STATS(stat_FwRxDrop);
6821 /* All ethtool functions called with rtnl_lock */
6824 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6826 struct bnx2 *bp = netdev_priv(dev);
6827 int support_serdes = 0, support_copper = 0;
6829 cmd->supported = SUPPORTED_Autoneg;
6830 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6833 } else if (bp->phy_port == PORT_FIBRE)
6838 if (support_serdes) {
6839 cmd->supported |= SUPPORTED_1000baseT_Full |
6841 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6842 cmd->supported |= SUPPORTED_2500baseX_Full;
6845 if (support_copper) {
6846 cmd->supported |= SUPPORTED_10baseT_Half |
6847 SUPPORTED_10baseT_Full |
6848 SUPPORTED_100baseT_Half |
6849 SUPPORTED_100baseT_Full |
6850 SUPPORTED_1000baseT_Full |
6855 spin_lock_bh(&bp->phy_lock);
6856 cmd->port = bp->phy_port;
6857 cmd->advertising = bp->advertising;
6859 if (bp->autoneg & AUTONEG_SPEED) {
6860 cmd->autoneg = AUTONEG_ENABLE;
6862 cmd->autoneg = AUTONEG_DISABLE;
6865 if (netif_carrier_ok(dev)) {
6866 ethtool_cmd_speed_set(cmd, bp->line_speed);
6867 cmd->duplex = bp->duplex;
6870 ethtool_cmd_speed_set(cmd, -1);
6873 spin_unlock_bh(&bp->phy_lock);
6875 cmd->transceiver = XCVR_INTERNAL;
6876 cmd->phy_address = bp->phy_addr;
6882 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6884 struct bnx2 *bp = netdev_priv(dev);
6885 u8 autoneg = bp->autoneg;
6886 u8 req_duplex = bp->req_duplex;
6887 u16 req_line_speed = bp->req_line_speed;
6888 u32 advertising = bp->advertising;
6891 spin_lock_bh(&bp->phy_lock);
6893 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6894 goto err_out_unlock;
6896 if (cmd->port != bp->phy_port &&
6897 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6898 goto err_out_unlock;
6900 /* If device is down, we can store the settings only if the user
6901 * is setting the currently active port.
6903 if (!netif_running(dev) && cmd->port != bp->phy_port)
6904 goto err_out_unlock;
6906 if (cmd->autoneg == AUTONEG_ENABLE) {
6907 autoneg |= AUTONEG_SPEED;
6909 advertising = cmd->advertising;
6910 if (cmd->port == PORT_TP) {
6911 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6913 advertising = ETHTOOL_ALL_COPPER_SPEED;
6915 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6917 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6919 advertising |= ADVERTISED_Autoneg;
6922 u32 speed = ethtool_cmd_speed(cmd);
6923 if (cmd->port == PORT_FIBRE) {
6924 if ((speed != SPEED_1000 &&
6925 speed != SPEED_2500) ||
6926 (cmd->duplex != DUPLEX_FULL))
6927 goto err_out_unlock;
6929 if (speed == SPEED_2500 &&
6930 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6931 goto err_out_unlock;
6932 } else if (speed == SPEED_1000 || speed == SPEED_2500)
6933 goto err_out_unlock;
6935 autoneg &= ~AUTONEG_SPEED;
6936 req_line_speed = speed;
6937 req_duplex = cmd->duplex;
6941 bp->autoneg = autoneg;
6942 bp->advertising = advertising;
6943 bp->req_line_speed = req_line_speed;
6944 bp->req_duplex = req_duplex;
6947 /* If device is down, the new settings will be picked up when it is
6950 if (netif_running(dev))
6951 err = bnx2_setup_phy(bp, cmd->port);
6954 spin_unlock_bh(&bp->phy_lock);
6960 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6962 struct bnx2 *bp = netdev_priv(dev);
6964 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6965 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6966 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
6967 strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
6970 #define BNX2_REGDUMP_LEN (32 * 1024)
6973 bnx2_get_regs_len(struct net_device *dev)
6975 return BNX2_REGDUMP_LEN;
6979 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6981 u32 *p = _p, i, offset;
6983 struct bnx2 *bp = netdev_priv(dev);
6984 static const u32 reg_boundaries[] = {
6985 0x0000, 0x0098, 0x0400, 0x045c,
6986 0x0800, 0x0880, 0x0c00, 0x0c10,
6987 0x0c30, 0x0d08, 0x1000, 0x101c,
6988 0x1040, 0x1048, 0x1080, 0x10a4,
6989 0x1400, 0x1490, 0x1498, 0x14f0,
6990 0x1500, 0x155c, 0x1580, 0x15dc,
6991 0x1600, 0x1658, 0x1680, 0x16d8,
6992 0x1800, 0x1820, 0x1840, 0x1854,
6993 0x1880, 0x1894, 0x1900, 0x1984,
6994 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6995 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6996 0x2000, 0x2030, 0x23c0, 0x2400,
6997 0x2800, 0x2820, 0x2830, 0x2850,
6998 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6999 0x3c00, 0x3c94, 0x4000, 0x4010,
7000 0x4080, 0x4090, 0x43c0, 0x4458,
7001 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7002 0x4fc0, 0x5010, 0x53c0, 0x5444,
7003 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7004 0x5fc0, 0x6000, 0x6400, 0x6428,
7005 0x6800, 0x6848, 0x684c, 0x6860,
7006 0x6888, 0x6910, 0x8000
7011 memset(p, 0, BNX2_REGDUMP_LEN);
7013 if (!netif_running(bp->dev))
7017 offset = reg_boundaries[0];
7019 while (offset < BNX2_REGDUMP_LEN) {
7020 *p++ = BNX2_RD(bp, offset);
7022 if (offset == reg_boundaries[i + 1]) {
7023 offset = reg_boundaries[i + 2];
7024 p = (u32 *) (orig_p + offset);
7031 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7033 struct bnx2 *bp = netdev_priv(dev);
7035 if (bp->flags & BNX2_FLAG_NO_WOL) {
7040 wol->supported = WAKE_MAGIC;
7042 wol->wolopts = WAKE_MAGIC;
7046 memset(&wol->sopass, 0, sizeof(wol->sopass));
7050 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7052 struct bnx2 *bp = netdev_priv(dev);
7054 if (wol->wolopts & ~WAKE_MAGIC)
7057 if (wol->wolopts & WAKE_MAGIC) {
7058 if (bp->flags & BNX2_FLAG_NO_WOL)
7067 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7073 bnx2_nway_reset(struct net_device *dev)
7075 struct bnx2 *bp = netdev_priv(dev);
7078 if (!netif_running(dev))
7081 if (!(bp->autoneg & AUTONEG_SPEED)) {
7085 spin_lock_bh(&bp->phy_lock);
7087 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7090 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7091 spin_unlock_bh(&bp->phy_lock);
7095 /* Force a link down visible on the other side */
7096 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7097 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7098 spin_unlock_bh(&bp->phy_lock);
7102 spin_lock_bh(&bp->phy_lock);
7104 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7105 bp->serdes_an_pending = 1;
7106 mod_timer(&bp->timer, jiffies + bp->current_interval);
7109 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7110 bmcr &= ~BMCR_LOOPBACK;
7111 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7113 spin_unlock_bh(&bp->phy_lock);
7119 bnx2_get_link(struct net_device *dev)
7121 struct bnx2 *bp = netdev_priv(dev);
7127 bnx2_get_eeprom_len(struct net_device *dev)
7129 struct bnx2 *bp = netdev_priv(dev);
7131 if (bp->flash_info == NULL)
7134 return (int) bp->flash_size;
7138 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7141 struct bnx2 *bp = netdev_priv(dev);
7144 /* parameters already validated in ethtool_get_eeprom */
7146 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7152 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7155 struct bnx2 *bp = netdev_priv(dev);
7158 /* parameters already validated in ethtool_set_eeprom */
7160 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7166 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7168 struct bnx2 *bp = netdev_priv(dev);
7170 memset(coal, 0, sizeof(struct ethtool_coalesce));
7172 coal->rx_coalesce_usecs = bp->rx_ticks;
7173 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7174 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7175 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7177 coal->tx_coalesce_usecs = bp->tx_ticks;
7178 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7179 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7180 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7182 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7188 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7190 struct bnx2 *bp = netdev_priv(dev);
7192 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7193 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7195 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7196 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7198 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7199 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7201 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7202 if (bp->rx_quick_cons_trip_int > 0xff)
7203 bp->rx_quick_cons_trip_int = 0xff;
7205 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7206 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7208 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7209 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7211 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7212 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7214 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7215 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7218 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7219 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7220 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7221 bp->stats_ticks = USEC_PER_SEC;
7223 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7224 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7225 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7227 if (netif_running(bp->dev)) {
7228 bnx2_netif_stop(bp, true);
7229 bnx2_init_nic(bp, 0);
7230 bnx2_netif_start(bp, true);
7237 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7239 struct bnx2 *bp = netdev_priv(dev);
7241 ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7242 ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7244 ering->rx_pending = bp->rx_ring_size;
7245 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7247 ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7248 ering->tx_pending = bp->tx_ring_size;
7252 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7254 if (netif_running(bp->dev)) {
7255 /* Reset will erase chipset stats; save them */
7256 bnx2_save_stats(bp);
7258 bnx2_netif_stop(bp, true);
7259 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7264 __bnx2_free_irq(bp);
7270 bnx2_set_rx_ring_size(bp, rx);
7271 bp->tx_ring_size = tx;
7273 if (netif_running(bp->dev)) {
7277 rc = bnx2_setup_int_mode(bp, disable_msi);
7282 rc = bnx2_alloc_mem(bp);
7285 rc = bnx2_request_irq(bp);
7288 rc = bnx2_init_nic(bp, 0);
7291 bnx2_napi_enable(bp);
7296 mutex_lock(&bp->cnic_lock);
7297 /* Let cnic know about the new status block. */
7298 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7299 bnx2_setup_cnic_irq_info(bp);
7300 mutex_unlock(&bp->cnic_lock);
7302 bnx2_netif_start(bp, true);
7308 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7310 struct bnx2 *bp = netdev_priv(dev);
7313 if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7314 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7315 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7319 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7325 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7327 struct bnx2 *bp = netdev_priv(dev);
7329 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7330 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7331 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7335 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7337 struct bnx2 *bp = netdev_priv(dev);
7339 bp->req_flow_ctrl = 0;
7340 if (epause->rx_pause)
7341 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7342 if (epause->tx_pause)
7343 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7345 if (epause->autoneg) {
7346 bp->autoneg |= AUTONEG_FLOW_CTRL;
7349 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7352 if (netif_running(dev)) {
7353 spin_lock_bh(&bp->phy_lock);
7354 bnx2_setup_phy(bp, bp->phy_port);
7355 spin_unlock_bh(&bp->phy_lock);
7362 char string[ETH_GSTRING_LEN];
7363 } bnx2_stats_str_arr[] = {
7365 { "rx_error_bytes" },
7367 { "tx_error_bytes" },
7368 { "rx_ucast_packets" },
7369 { "rx_mcast_packets" },
7370 { "rx_bcast_packets" },
7371 { "tx_ucast_packets" },
7372 { "tx_mcast_packets" },
7373 { "tx_bcast_packets" },
7374 { "tx_mac_errors" },
7375 { "tx_carrier_errors" },
7376 { "rx_crc_errors" },
7377 { "rx_align_errors" },
7378 { "tx_single_collisions" },
7379 { "tx_multi_collisions" },
7381 { "tx_excess_collisions" },
7382 { "tx_late_collisions" },
7383 { "tx_total_collisions" },
7386 { "rx_undersize_packets" },
7387 { "rx_oversize_packets" },
7388 { "rx_64_byte_packets" },
7389 { "rx_65_to_127_byte_packets" },
7390 { "rx_128_to_255_byte_packets" },
7391 { "rx_256_to_511_byte_packets" },
7392 { "rx_512_to_1023_byte_packets" },
7393 { "rx_1024_to_1522_byte_packets" },
7394 { "rx_1523_to_9022_byte_packets" },
7395 { "tx_64_byte_packets" },
7396 { "tx_65_to_127_byte_packets" },
7397 { "tx_128_to_255_byte_packets" },
7398 { "tx_256_to_511_byte_packets" },
7399 { "tx_512_to_1023_byte_packets" },
7400 { "tx_1024_to_1522_byte_packets" },
7401 { "tx_1523_to_9022_byte_packets" },
7402 { "rx_xon_frames" },
7403 { "rx_xoff_frames" },
7404 { "tx_xon_frames" },
7405 { "tx_xoff_frames" },
7406 { "rx_mac_ctrl_frames" },
7407 { "rx_filtered_packets" },
7408 { "rx_ftq_discards" },
7410 { "rx_fw_discards" },
7413 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7415 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7417 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7418 STATS_OFFSET32(stat_IfHCInOctets_hi),
7419 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7420 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7421 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7422 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7423 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7424 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7425 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7426 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7427 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7428 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7429 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7430 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7431 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7432 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7433 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7434 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7435 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7436 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7437 STATS_OFFSET32(stat_EtherStatsCollisions),
7438 STATS_OFFSET32(stat_EtherStatsFragments),
7439 STATS_OFFSET32(stat_EtherStatsJabbers),
7440 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7441 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7442 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7443 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7444 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7445 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7446 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7447 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7448 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7449 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7450 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7451 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7452 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7453 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7454 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7455 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7456 STATS_OFFSET32(stat_XonPauseFramesReceived),
7457 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7458 STATS_OFFSET32(stat_OutXonSent),
7459 STATS_OFFSET32(stat_OutXoffSent),
7460 STATS_OFFSET32(stat_MacControlFramesReceived),
7461 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7462 STATS_OFFSET32(stat_IfInFTQDiscards),
7463 STATS_OFFSET32(stat_IfInMBUFDiscards),
7464 STATS_OFFSET32(stat_FwRxDrop),
7467 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7468 * skipped because of errata.
7470 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7471 8,0,8,8,8,8,8,8,8,8,
7472 4,0,4,4,4,4,4,4,4,4,
7473 4,4,4,4,4,4,4,4,4,4,
7474 4,4,4,4,4,4,4,4,4,4,
7478 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7479 8,0,8,8,8,8,8,8,8,8,
7480 4,4,4,4,4,4,4,4,4,4,
7481 4,4,4,4,4,4,4,4,4,4,
7482 4,4,4,4,4,4,4,4,4,4,
7486 #define BNX2_NUM_TESTS 6
7489 char string[ETH_GSTRING_LEN];
7490 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7491 { "register_test (offline)" },
7492 { "memory_test (offline)" },
7493 { "loopback_test (offline)" },
7494 { "nvram_test (online)" },
7495 { "interrupt_test (online)" },
7496 { "link_test (online)" },
7500 bnx2_get_sset_count(struct net_device *dev, int sset)
7504 return BNX2_NUM_TESTS;
7506 return BNX2_NUM_STATS;
7513 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7515 struct bnx2 *bp = netdev_priv(dev);
7517 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7518 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7521 bnx2_netif_stop(bp, true);
7522 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7525 if (bnx2_test_registers(bp) != 0) {
7527 etest->flags |= ETH_TEST_FL_FAILED;
7529 if (bnx2_test_memory(bp) != 0) {
7531 etest->flags |= ETH_TEST_FL_FAILED;
7533 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7534 etest->flags |= ETH_TEST_FL_FAILED;
7536 if (!netif_running(bp->dev))
7537 bnx2_shutdown_chip(bp);
7539 bnx2_init_nic(bp, 1);
7540 bnx2_netif_start(bp, true);
7543 /* wait for link up */
7544 for (i = 0; i < 7; i++) {
7547 msleep_interruptible(1000);
7551 if (bnx2_test_nvram(bp) != 0) {
7553 etest->flags |= ETH_TEST_FL_FAILED;
7555 if (bnx2_test_intr(bp) != 0) {
7557 etest->flags |= ETH_TEST_FL_FAILED;
7560 if (bnx2_test_link(bp) != 0) {
7562 etest->flags |= ETH_TEST_FL_FAILED;
7568 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7570 switch (stringset) {
7572 memcpy(buf, bnx2_stats_str_arr,
7573 sizeof(bnx2_stats_str_arr));
7576 memcpy(buf, bnx2_tests_str_arr,
7577 sizeof(bnx2_tests_str_arr));
7583 bnx2_get_ethtool_stats(struct net_device *dev,
7584 struct ethtool_stats *stats, u64 *buf)
7586 struct bnx2 *bp = netdev_priv(dev);
7588 u32 *hw_stats = (u32 *) bp->stats_blk;
7589 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7590 u8 *stats_len_arr = NULL;
7592 if (hw_stats == NULL) {
7593 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7597 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7598 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7599 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7600 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7601 stats_len_arr = bnx2_5706_stats_len_arr;
7603 stats_len_arr = bnx2_5708_stats_len_arr;
7605 for (i = 0; i < BNX2_NUM_STATS; i++) {
7606 unsigned long offset;
7608 if (stats_len_arr[i] == 0) {
7609 /* skip this counter */
7614 offset = bnx2_stats_offset_arr[i];
7615 if (stats_len_arr[i] == 4) {
7616 /* 4-byte counter */
7617 buf[i] = (u64) *(hw_stats + offset) +
7618 *(temp_stats + offset);
7621 /* 8-byte counter */
7622 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7623 *(hw_stats + offset + 1) +
7624 (((u64) *(temp_stats + offset)) << 32) +
7625 *(temp_stats + offset + 1);
7630 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7632 struct bnx2 *bp = netdev_priv(dev);
7635 case ETHTOOL_ID_ACTIVE:
7636 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7637 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7638 return 1; /* cycle on/off once per second */
7641 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7642 BNX2_EMAC_LED_1000MB_OVERRIDE |
7643 BNX2_EMAC_LED_100MB_OVERRIDE |
7644 BNX2_EMAC_LED_10MB_OVERRIDE |
7645 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7646 BNX2_EMAC_LED_TRAFFIC);
7649 case ETHTOOL_ID_OFF:
7650 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7653 case ETHTOOL_ID_INACTIVE:
7654 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7655 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7662 static netdev_features_t
7663 bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7665 struct bnx2 *bp = netdev_priv(dev);
7667 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7668 features |= NETIF_F_HW_VLAN_CTAG_RX;
7674 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7676 struct bnx2 *bp = netdev_priv(dev);
7678 /* TSO with VLAN tag won't work with current firmware */
7679 if (features & NETIF_F_HW_VLAN_CTAG_TX)
7680 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7682 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7684 if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7685 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7686 netif_running(dev)) {
7687 bnx2_netif_stop(bp, false);
7688 dev->features = features;
7689 bnx2_set_rx_mode(dev);
7690 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7691 bnx2_netif_start(bp, false);
7698 static void bnx2_get_channels(struct net_device *dev,
7699 struct ethtool_channels *channels)
7701 struct bnx2 *bp = netdev_priv(dev);
7702 u32 max_rx_rings = 1;
7703 u32 max_tx_rings = 1;
7705 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7706 max_rx_rings = RX_MAX_RINGS;
7707 max_tx_rings = TX_MAX_RINGS;
7710 channels->max_rx = max_rx_rings;
7711 channels->max_tx = max_tx_rings;
7712 channels->max_other = 0;
7713 channels->max_combined = 0;
7714 channels->rx_count = bp->num_rx_rings;
7715 channels->tx_count = bp->num_tx_rings;
7716 channels->other_count = 0;
7717 channels->combined_count = 0;
7720 static int bnx2_set_channels(struct net_device *dev,
7721 struct ethtool_channels *channels)
7723 struct bnx2 *bp = netdev_priv(dev);
7724 u32 max_rx_rings = 1;
7725 u32 max_tx_rings = 1;
7728 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7729 max_rx_rings = RX_MAX_RINGS;
7730 max_tx_rings = TX_MAX_RINGS;
7732 if (channels->rx_count > max_rx_rings ||
7733 channels->tx_count > max_tx_rings)
7736 bp->num_req_rx_rings = channels->rx_count;
7737 bp->num_req_tx_rings = channels->tx_count;
7739 if (netif_running(dev))
7740 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7741 bp->tx_ring_size, true);
7746 static const struct ethtool_ops bnx2_ethtool_ops = {
7747 .get_settings = bnx2_get_settings,
7748 .set_settings = bnx2_set_settings,
7749 .get_drvinfo = bnx2_get_drvinfo,
7750 .get_regs_len = bnx2_get_regs_len,
7751 .get_regs = bnx2_get_regs,
7752 .get_wol = bnx2_get_wol,
7753 .set_wol = bnx2_set_wol,
7754 .nway_reset = bnx2_nway_reset,
7755 .get_link = bnx2_get_link,
7756 .get_eeprom_len = bnx2_get_eeprom_len,
7757 .get_eeprom = bnx2_get_eeprom,
7758 .set_eeprom = bnx2_set_eeprom,
7759 .get_coalesce = bnx2_get_coalesce,
7760 .set_coalesce = bnx2_set_coalesce,
7761 .get_ringparam = bnx2_get_ringparam,
7762 .set_ringparam = bnx2_set_ringparam,
7763 .get_pauseparam = bnx2_get_pauseparam,
7764 .set_pauseparam = bnx2_set_pauseparam,
7765 .self_test = bnx2_self_test,
7766 .get_strings = bnx2_get_strings,
7767 .set_phys_id = bnx2_set_phys_id,
7768 .get_ethtool_stats = bnx2_get_ethtool_stats,
7769 .get_sset_count = bnx2_get_sset_count,
7770 .get_channels = bnx2_get_channels,
7771 .set_channels = bnx2_set_channels,
7774 /* Called with rtnl_lock */
7776 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7778 struct mii_ioctl_data *data = if_mii(ifr);
7779 struct bnx2 *bp = netdev_priv(dev);
7784 data->phy_id = bp->phy_addr;
7790 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7793 if (!netif_running(dev))
7796 spin_lock_bh(&bp->phy_lock);
7797 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7798 spin_unlock_bh(&bp->phy_lock);
7800 data->val_out = mii_regval;
7806 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7809 if (!netif_running(dev))
7812 spin_lock_bh(&bp->phy_lock);
7813 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7814 spin_unlock_bh(&bp->phy_lock);
7825 /* Called with rtnl_lock */
7827 bnx2_change_mac_addr(struct net_device *dev, void *p)
7829 struct sockaddr *addr = p;
7830 struct bnx2 *bp = netdev_priv(dev);
7832 if (!is_valid_ether_addr(addr->sa_data))
7833 return -EADDRNOTAVAIL;
7835 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7836 if (netif_running(dev))
7837 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7842 /* Called with rtnl_lock */
7844 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7846 struct bnx2 *bp = netdev_priv(dev);
7848 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7849 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7853 return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7857 #ifdef CONFIG_NET_POLL_CONTROLLER
7859 poll_bnx2(struct net_device *dev)
7861 struct bnx2 *bp = netdev_priv(dev);
7864 for (i = 0; i < bp->irq_nvecs; i++) {
7865 struct bnx2_irq *irq = &bp->irq_tbl[i];
7867 disable_irq(irq->vector);
7868 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7869 enable_irq(irq->vector);
7875 bnx2_get_5709_media(struct bnx2 *bp)
7877 u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7878 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7881 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7883 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7884 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7888 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7889 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7891 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7893 if (bp->func == 0) {
7898 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7906 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7913 bnx2_get_pci_speed(struct bnx2 *bp)
7917 reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7918 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7921 bp->flags |= BNX2_FLAG_PCIX;
7923 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7925 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7927 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7928 bp->bus_speed_mhz = 133;
7931 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7932 bp->bus_speed_mhz = 100;
7935 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7936 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7937 bp->bus_speed_mhz = 66;
7940 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7941 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7942 bp->bus_speed_mhz = 50;
7945 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7946 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7947 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7948 bp->bus_speed_mhz = 33;
7953 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7954 bp->bus_speed_mhz = 66;
7956 bp->bus_speed_mhz = 33;
7959 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7960 bp->flags |= BNX2_FLAG_PCI_32BIT;
7965 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7969 unsigned int block_end, rosize, len;
7971 #define BNX2_VPD_NVRAM_OFFSET 0x300
7972 #define BNX2_VPD_LEN 128
7973 #define BNX2_MAX_VER_SLEN 30
7975 data = kmalloc(256, GFP_KERNEL);
7979 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7984 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7985 data[i] = data[i + BNX2_VPD_LEN + 3];
7986 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7987 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7988 data[i + 3] = data[i + BNX2_VPD_LEN];
7991 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7995 rosize = pci_vpd_lrdt_size(&data[i]);
7996 i += PCI_VPD_LRDT_TAG_SIZE;
7997 block_end = i + rosize;
7999 if (block_end > BNX2_VPD_LEN)
8002 j = pci_vpd_find_info_keyword(data, i, rosize,
8003 PCI_VPD_RO_KEYWORD_MFR_ID);
8007 len = pci_vpd_info_field_size(&data[j]);
8009 j += PCI_VPD_INFO_FLD_HDR_SIZE;
8010 if (j + len > block_end || len != 4 ||
8011 memcmp(&data[j], "1028", 4))
8014 j = pci_vpd_find_info_keyword(data, i, rosize,
8015 PCI_VPD_RO_KEYWORD_VENDOR0);
8019 len = pci_vpd_info_field_size(&data[j]);
8021 j += PCI_VPD_INFO_FLD_HDR_SIZE;
8022 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8025 memcpy(bp->fw_version, &data[j], len);
8026 bp->fw_version[len] = ' ';
8033 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8038 u64 dma_mask, persist_dma_mask;
8041 SET_NETDEV_DEV(dev, &pdev->dev);
8042 bp = netdev_priv(dev);
8047 bp->temp_stats_blk =
8048 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8050 if (bp->temp_stats_blk == NULL) {
8055 /* enable device (incl. PCI PM wakeup), and bus-mastering */
8056 rc = pci_enable_device(pdev);
8058 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8062 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8064 "Cannot find PCI device base address, aborting\n");
8066 goto err_out_disable;
8069 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8071 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8072 goto err_out_disable;
8075 pci_set_master(pdev);
8077 bp->pm_cap = pdev->pm_cap;
8078 if (bp->pm_cap == 0) {
8080 "Cannot find power management capability, aborting\n");
8082 goto err_out_release;
8088 spin_lock_init(&bp->phy_lock);
8089 spin_lock_init(&bp->indirect_lock);
8091 mutex_init(&bp->cnic_lock);
8093 INIT_WORK(&bp->reset_task, bnx2_reset_task);
8095 bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8096 TX_MAX_TSS_RINGS + 1));
8098 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8100 goto err_out_release;
8103 /* Configure byte swap and enable write to the reg_window registers.
8104 * Rely on CPU to do target byte swapping on big endian systems
8105 * The chip's target access swapping will not swap all accesses
8107 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8108 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8109 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8111 bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8113 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8114 if (!pci_is_pcie(pdev)) {
8115 dev_err(&pdev->dev, "Not PCIE, aborting\n");
8119 bp->flags |= BNX2_FLAG_PCIE;
8120 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8121 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8123 /* AER (Advanced Error Reporting) hooks */
8124 err = pci_enable_pcie_error_reporting(pdev);
8126 bp->flags |= BNX2_FLAG_AER_ENABLED;
8129 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8130 if (bp->pcix_cap == 0) {
8132 "Cannot find PCIX capability, aborting\n");
8136 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8139 if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8140 BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8142 bp->flags |= BNX2_FLAG_MSIX_CAP;
8145 if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8146 BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8148 bp->flags |= BNX2_FLAG_MSI_CAP;
8151 /* 5708 cannot support DMA addresses > 40-bit. */
8152 if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8153 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8155 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8157 /* Configure DMA attributes. */
8158 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8159 dev->features |= NETIF_F_HIGHDMA;
8160 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8163 "pci_set_consistent_dma_mask failed, aborting\n");
8166 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8167 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8171 if (!(bp->flags & BNX2_FLAG_PCIE))
8172 bnx2_get_pci_speed(bp);
8174 /* 5706A0 may falsely detect SERR and PERR. */
8175 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8176 reg = BNX2_RD(bp, PCI_COMMAND);
8177 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8178 BNX2_WR(bp, PCI_COMMAND, reg);
8179 } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8180 !(bp->flags & BNX2_FLAG_PCIX)) {
8183 "5706 A1 can only be used in a PCIX bus, aborting\n");
8187 bnx2_init_nvram(bp);
8189 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8191 if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8194 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8195 BNX2_SHM_HDR_SIGNATURE_SIG) {
8196 u32 off = bp->func << 2;
8198 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8200 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8202 /* Get the permanent MAC address. First we need to make sure the
8203 * firmware is actually running.
8205 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8207 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8208 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8209 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8214 bnx2_read_vpd_fw_ver(bp);
8216 j = strlen(bp->fw_version);
8217 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8218 for (i = 0; i < 3 && j < 24; i++) {
8222 bp->fw_version[j++] = 'b';
8223 bp->fw_version[j++] = 'c';
8224 bp->fw_version[j++] = ' ';
8226 num = (u8) (reg >> (24 - (i * 8)));
8227 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8228 if (num >= k || !skip0 || k == 1) {
8229 bp->fw_version[j++] = (num / k) + '0';
8234 bp->fw_version[j++] = '.';
8236 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8237 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8240 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8241 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8243 for (i = 0; i < 30; i++) {
8244 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8245 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8250 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8251 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8252 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8253 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8254 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8257 bp->fw_version[j++] = ' ';
8258 for (i = 0; i < 3 && j < 28; i++) {
8259 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8260 reg = be32_to_cpu(reg);
8261 memcpy(&bp->fw_version[j], ®, 4);
8266 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8267 bp->mac_addr[0] = (u8) (reg >> 8);
8268 bp->mac_addr[1] = (u8) reg;
8270 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8271 bp->mac_addr[2] = (u8) (reg >> 24);
8272 bp->mac_addr[3] = (u8) (reg >> 16);
8273 bp->mac_addr[4] = (u8) (reg >> 8);
8274 bp->mac_addr[5] = (u8) reg;
8276 bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8277 bnx2_set_rx_ring_size(bp, 255);
8279 bp->tx_quick_cons_trip_int = 2;
8280 bp->tx_quick_cons_trip = 20;
8281 bp->tx_ticks_int = 18;
8284 bp->rx_quick_cons_trip_int = 2;
8285 bp->rx_quick_cons_trip = 12;
8286 bp->rx_ticks_int = 18;
8289 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8291 bp->current_interval = BNX2_TIMER_INTERVAL;
8295 /* Disable WOL support if we are running on a SERDES chip. */
8296 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8297 bnx2_get_5709_media(bp);
8298 else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8299 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8301 bp->phy_port = PORT_TP;
8302 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8303 bp->phy_port = PORT_FIBRE;
8304 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8305 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8306 bp->flags |= BNX2_FLAG_NO_WOL;
8309 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8310 /* Don't do parallel detect on this board because of
8311 * some board problems. The link will not go down
8312 * if we do parallel detect.
8314 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8315 pdev->subsystem_device == 0x310c)
8316 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8319 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8320 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8322 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8323 BNX2_CHIP(bp) == BNX2_CHIP_5708)
8324 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8325 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8326 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8327 BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8328 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8330 bnx2_init_fw_cap(bp);
8332 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8333 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8334 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8335 !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8336 bp->flags |= BNX2_FLAG_NO_WOL;
8340 if (bp->flags & BNX2_FLAG_NO_WOL)
8341 device_set_wakeup_capable(&bp->pdev->dev, false);
8343 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8345 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8346 bp->tx_quick_cons_trip_int =
8347 bp->tx_quick_cons_trip;
8348 bp->tx_ticks_int = bp->tx_ticks;
8349 bp->rx_quick_cons_trip_int =
8350 bp->rx_quick_cons_trip;
8351 bp->rx_ticks_int = bp->rx_ticks;
8352 bp->comp_prod_trip_int = bp->comp_prod_trip;
8353 bp->com_ticks_int = bp->com_ticks;
8354 bp->cmd_ticks_int = bp->cmd_ticks;
8357 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8359 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8360 * with byte enables disabled on the unused 32-bit word. This is legal
8361 * but causes problems on the AMD 8132 which will eventually stop
8362 * responding after a while.
8364 * AMD believes this incompatibility is unique to the 5706, and
8365 * prefers to locally disable MSI rather than globally disabling it.
8367 if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8368 struct pci_dev *amd_8132 = NULL;
8370 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8371 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8374 if (amd_8132->revision >= 0x10 &&
8375 amd_8132->revision <= 0x13) {
8377 pci_dev_put(amd_8132);
8383 bnx2_set_default_link(bp);
8384 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8386 init_timer(&bp->timer);
8387 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8388 bp->timer.data = (unsigned long) bp;
8389 bp->timer.function = bnx2_timer;
8392 if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8393 bp->cnic_eth_dev.max_iscsi_conn =
8394 (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8395 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8396 bp->cnic_probe = bnx2_cnic_probe;
8398 pci_save_state(pdev);
8403 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8404 pci_disable_pcie_error_reporting(pdev);
8405 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8408 pci_iounmap(pdev, bp->regview);
8412 pci_release_regions(pdev);
8415 pci_disable_device(pdev);
8416 pci_set_drvdata(pdev, NULL);
8423 bnx2_bus_string(struct bnx2 *bp, char *str)
8427 if (bp->flags & BNX2_FLAG_PCIE) {
8428 s += sprintf(s, "PCI Express");
8430 s += sprintf(s, "PCI");
8431 if (bp->flags & BNX2_FLAG_PCIX)
8432 s += sprintf(s, "-X");
8433 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8434 s += sprintf(s, " 32-bit");
8436 s += sprintf(s, " 64-bit");
8437 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8443 bnx2_del_napi(struct bnx2 *bp)
8447 for (i = 0; i < bp->irq_nvecs; i++)
8448 netif_napi_del(&bp->bnx2_napi[i].napi);
8452 bnx2_init_napi(struct bnx2 *bp)
8456 for (i = 0; i < bp->irq_nvecs; i++) {
8457 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8458 int (*poll)(struct napi_struct *, int);
8463 poll = bnx2_poll_msix;
8465 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8470 static const struct net_device_ops bnx2_netdev_ops = {
8471 .ndo_open = bnx2_open,
8472 .ndo_start_xmit = bnx2_start_xmit,
8473 .ndo_stop = bnx2_close,
8474 .ndo_get_stats64 = bnx2_get_stats64,
8475 .ndo_set_rx_mode = bnx2_set_rx_mode,
8476 .ndo_do_ioctl = bnx2_ioctl,
8477 .ndo_validate_addr = eth_validate_addr,
8478 .ndo_set_mac_address = bnx2_change_mac_addr,
8479 .ndo_change_mtu = bnx2_change_mtu,
8480 .ndo_fix_features = bnx2_fix_features,
8481 .ndo_set_features = bnx2_set_features,
8482 .ndo_tx_timeout = bnx2_tx_timeout,
8483 #ifdef CONFIG_NET_POLL_CONTROLLER
8484 .ndo_poll_controller = poll_bnx2,
8489 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8491 static int version_printed = 0;
8492 struct net_device *dev;
8497 if (version_printed++ == 0)
8498 pr_info("%s", version);
8500 /* dev zeroed in init_etherdev */
8501 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8505 rc = bnx2_init_board(pdev, dev);
8509 dev->netdev_ops = &bnx2_netdev_ops;
8510 dev->watchdog_timeo = TX_TIMEOUT;
8511 dev->ethtool_ops = &bnx2_ethtool_ops;
8513 bp = netdev_priv(dev);
8515 pci_set_drvdata(pdev, dev);
8517 memcpy(dev->dev_addr, bp->mac_addr, 6);
8519 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8520 NETIF_F_TSO | NETIF_F_TSO_ECN |
8521 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8523 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8524 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8526 dev->vlan_features = dev->hw_features;
8527 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8528 dev->features |= dev->hw_features;
8529 dev->priv_flags |= IFF_UNICAST_FLT;
8531 if ((rc = register_netdev(dev))) {
8532 dev_err(&pdev->dev, "Cannot register net device\n");
8536 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8537 "node addr %pM\n", board_info[ent->driver_data].name,
8538 ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8539 ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8540 bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8541 pdev->irq, dev->dev_addr);
8546 pci_iounmap(pdev, bp->regview);
8547 pci_release_regions(pdev);
8548 pci_disable_device(pdev);
8549 pci_set_drvdata(pdev, NULL);
8556 bnx2_remove_one(struct pci_dev *pdev)
8558 struct net_device *dev = pci_get_drvdata(pdev);
8559 struct bnx2 *bp = netdev_priv(dev);
8561 unregister_netdev(dev);
8563 del_timer_sync(&bp->timer);
8564 cancel_work_sync(&bp->reset_task);
8566 pci_iounmap(bp->pdev, bp->regview);
8568 kfree(bp->temp_stats_blk);
8570 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8571 pci_disable_pcie_error_reporting(pdev);
8572 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8575 bnx2_release_firmware(bp);
8579 pci_release_regions(pdev);
8580 pci_disable_device(pdev);
8581 pci_set_drvdata(pdev, NULL);
8585 bnx2_suspend(struct device *device)
8587 struct pci_dev *pdev = to_pci_dev(device);
8588 struct net_device *dev = pci_get_drvdata(pdev);
8589 struct bnx2 *bp = netdev_priv(dev);
8591 if (netif_running(dev)) {
8592 cancel_work_sync(&bp->reset_task);
8593 bnx2_netif_stop(bp, true);
8594 netif_device_detach(dev);
8595 del_timer_sync(&bp->timer);
8596 bnx2_shutdown_chip(bp);
8597 __bnx2_free_irq(bp);
8605 bnx2_resume(struct device *device)
8607 struct pci_dev *pdev = to_pci_dev(device);
8608 struct net_device *dev = pci_get_drvdata(pdev);
8609 struct bnx2 *bp = netdev_priv(dev);
8611 if (!netif_running(dev))
8614 bnx2_set_power_state(bp, PCI_D0);
8615 netif_device_attach(dev);
8616 bnx2_request_irq(bp);
8617 bnx2_init_nic(bp, 1);
8618 bnx2_netif_start(bp, true);
8622 #ifdef CONFIG_PM_SLEEP
8623 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8624 #define BNX2_PM_OPS (&bnx2_pm_ops)
8628 #define BNX2_PM_OPS NULL
8630 #endif /* CONFIG_PM_SLEEP */
8632 * bnx2_io_error_detected - called when PCI error is detected
8633 * @pdev: Pointer to PCI device
8634 * @state: The current pci connection state
8636 * This function is called after a PCI bus error affecting
8637 * this device has been detected.
8639 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8640 pci_channel_state_t state)
8642 struct net_device *dev = pci_get_drvdata(pdev);
8643 struct bnx2 *bp = netdev_priv(dev);
8646 netif_device_detach(dev);
8648 if (state == pci_channel_io_perm_failure) {
8650 return PCI_ERS_RESULT_DISCONNECT;
8653 if (netif_running(dev)) {
8654 bnx2_netif_stop(bp, true);
8655 del_timer_sync(&bp->timer);
8656 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8659 pci_disable_device(pdev);
8662 /* Request a slot slot reset. */
8663 return PCI_ERS_RESULT_NEED_RESET;
8667 * bnx2_io_slot_reset - called after the pci bus has been reset.
8668 * @pdev: Pointer to PCI device
8670 * Restart the card from scratch, as if from a cold-boot.
8672 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8674 struct net_device *dev = pci_get_drvdata(pdev);
8675 struct bnx2 *bp = netdev_priv(dev);
8676 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8680 if (pci_enable_device(pdev)) {
8682 "Cannot re-enable PCI device after reset\n");
8684 pci_set_master(pdev);
8685 pci_restore_state(pdev);
8686 pci_save_state(pdev);
8688 if (netif_running(dev))
8689 err = bnx2_init_nic(bp, 1);
8692 result = PCI_ERS_RESULT_RECOVERED;
8695 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8696 bnx2_napi_enable(bp);
8701 if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8704 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8707 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8708 err); /* non-fatal, continue */
8715 * bnx2_io_resume - called when traffic can start flowing again.
8716 * @pdev: Pointer to PCI device
8718 * This callback is called when the error recovery driver tells us that
8719 * its OK to resume normal operation.
8721 static void bnx2_io_resume(struct pci_dev *pdev)
8723 struct net_device *dev = pci_get_drvdata(pdev);
8724 struct bnx2 *bp = netdev_priv(dev);
8727 if (netif_running(dev))
8728 bnx2_netif_start(bp, true);
8730 netif_device_attach(dev);
8734 static void bnx2_shutdown(struct pci_dev *pdev)
8736 struct net_device *dev = pci_get_drvdata(pdev);
8742 bp = netdev_priv(dev);
8747 if (netif_running(dev))
8750 if (system_state == SYSTEM_POWER_OFF)
8751 bnx2_set_power_state(bp, PCI_D3hot);
8756 static const struct pci_error_handlers bnx2_err_handler = {
8757 .error_detected = bnx2_io_error_detected,
8758 .slot_reset = bnx2_io_slot_reset,
8759 .resume = bnx2_io_resume,
8762 static struct pci_driver bnx2_pci_driver = {
8763 .name = DRV_MODULE_NAME,
8764 .id_table = bnx2_pci_tbl,
8765 .probe = bnx2_init_one,
8766 .remove = bnx2_remove_one,
8767 .driver.pm = BNX2_PM_OPS,
8768 .err_handler = &bnx2_err_handler,
8769 .shutdown = bnx2_shutdown,
8772 module_pci_driver(bnx2_pci_driver);