bnx2: Add GRO support.
[cascardo/linux.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
41 #define BCM_VLAN 1
42 #endif
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define DRV_MODULE_VERSION      "2.0.9"
62 #define DRV_MODULE_RELDATE      "April 27, 2010"
63 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-5.0.0.j9.fw"
66 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73
74 static char version[] __devinitdata =
75         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93         BCM5706 = 0,
94         NC370T,
95         NC370I,
96         BCM5706S,
97         NC370F,
98         BCM5708,
99         BCM5708S,
100         BCM5709,
101         BCM5709S,
102         BCM5716,
103         BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111         { "HP NC370T Multifunction Gigabit Server Adapter" },
112         { "HP NC370i Multifunction Gigabit Server Adapter" },
113         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114         { "HP NC370F Multifunction Gigabit Server Adapter" },
115         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121         };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142         { PCI_VENDOR_ID_BROADCOM, 0x163b,
143           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144         { PCI_VENDOR_ID_BROADCOM, 0x163c,
145           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146         { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
153         /* Slow EEPROM */
154         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157          "EEPROM - slow"},
158         /* Expansion entry 0001 */
159         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162          "Entry 0001"},
163         /* Saifun SA25F010 (non-buffered flash) */
164         /* strap, cfg1, & write1 need updates */
165         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168          "Non-buffered flash (128kB)"},
169         /* Saifun SA25F020 (non-buffered flash) */
170         /* strap, cfg1, & write1 need updates */
171         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174          "Non-buffered flash (256kB)"},
175         /* Expansion entry 0100 */
176         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179          "Entry 0100"},
180         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190         /* Saifun SA25F005 (non-buffered flash) */
191         /* strap, cfg1, & write1 need updates */
192         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195          "Non-buffered flash (64kB)"},
196         /* Fast EEPROM */
197         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200          "EEPROM - fast"},
201         /* Expansion entry 1001 */
202         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1001"},
206         /* Expansion entry 1010 */
207         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1010"},
211         /* ATMEL AT45DB011B (buffered flash) */
212         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215          "Buffered flash (128kB)"},
216         /* Expansion entry 1100 */
217         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220          "Entry 1100"},
221         /* Expansion entry 1101 */
222         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225          "Entry 1101"},
226         /* Ateml Expansion entry 1110 */
227         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230          "Entry 1110 (Atmel)"},
231         /* ATMEL AT45DB021B (buffered flash) */
232         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235          "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239         .flags          = BNX2_NV_BUFFERED,
240         .page_bits      = BCM5709_FLASH_PAGE_BITS,
241         .page_size      = BCM5709_FLASH_PAGE_SIZE,
242         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
243         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
244         .name           = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static void bnx2_init_napi(struct bnx2 *bp);
250
251 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
252 {
253         u32 diff;
254
255         smp_mb();
256
257         /* The ring uses 256 indices for 255 entries, one of them
258          * needs to be skipped.
259          */
260         diff = txr->tx_prod - txr->tx_cons;
261         if (unlikely(diff >= TX_DESC_CNT)) {
262                 diff &= 0xffff;
263                 if (diff == TX_DESC_CNT)
264                         diff = MAX_TX_DESC_CNT;
265         }
266         return (bp->tx_ring_size - diff);
267 }
268
269 static u32
270 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
271 {
272         u32 val;
273
274         spin_lock_bh(&bp->indirect_lock);
275         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
276         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
277         spin_unlock_bh(&bp->indirect_lock);
278         return val;
279 }
280
281 static void
282 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
283 {
284         spin_lock_bh(&bp->indirect_lock);
285         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
286         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
287         spin_unlock_bh(&bp->indirect_lock);
288 }
289
290 static void
291 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
292 {
293         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
294 }
295
296 static u32
297 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
298 {
299         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
300 }
301
302 static void
303 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
304 {
305         offset += cid_addr;
306         spin_lock_bh(&bp->indirect_lock);
307         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
308                 int i;
309
310                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
311                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
312                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
313                 for (i = 0; i < 5; i++) {
314                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
315                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
316                                 break;
317                         udelay(5);
318                 }
319         } else {
320                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
321                 REG_WR(bp, BNX2_CTX_DATA, val);
322         }
323         spin_unlock_bh(&bp->indirect_lock);
324 }
325
326 #ifdef BCM_CNIC
327 static int
328 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
329 {
330         struct bnx2 *bp = netdev_priv(dev);
331         struct drv_ctl_io *io = &info->data.io;
332
333         switch (info->cmd) {
334         case DRV_CTL_IO_WR_CMD:
335                 bnx2_reg_wr_ind(bp, io->offset, io->data);
336                 break;
337         case DRV_CTL_IO_RD_CMD:
338                 io->data = bnx2_reg_rd_ind(bp, io->offset);
339                 break;
340         case DRV_CTL_CTX_WR_CMD:
341                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
342                 break;
343         default:
344                 return -EINVAL;
345         }
346         return 0;
347 }
348
349 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
350 {
351         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
352         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
353         int sb_id;
354
355         if (bp->flags & BNX2_FLAG_USING_MSIX) {
356                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
357                 bnapi->cnic_present = 0;
358                 sb_id = bp->irq_nvecs;
359                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
360         } else {
361                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
362                 bnapi->cnic_tag = bnapi->last_status_idx;
363                 bnapi->cnic_present = 1;
364                 sb_id = 0;
365                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
366         }
367
368         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
369         cp->irq_arr[0].status_blk = (void *)
370                 ((unsigned long) bnapi->status_blk.msi +
371                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
372         cp->irq_arr[0].status_blk_num = sb_id;
373         cp->num_irq = 1;
374 }
375
376 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
377                               void *data)
378 {
379         struct bnx2 *bp = netdev_priv(dev);
380         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
381
382         if (ops == NULL)
383                 return -EINVAL;
384
385         if (cp->drv_state & CNIC_DRV_STATE_REGD)
386                 return -EBUSY;
387
388         bp->cnic_data = data;
389         rcu_assign_pointer(bp->cnic_ops, ops);
390
391         cp->num_irq = 0;
392         cp->drv_state = CNIC_DRV_STATE_REGD;
393
394         bnx2_setup_cnic_irq_info(bp);
395
396         return 0;
397 }
398
399 static int bnx2_unregister_cnic(struct net_device *dev)
400 {
401         struct bnx2 *bp = netdev_priv(dev);
402         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
403         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
404
405         mutex_lock(&bp->cnic_lock);
406         cp->drv_state = 0;
407         bnapi->cnic_present = 0;
408         rcu_assign_pointer(bp->cnic_ops, NULL);
409         mutex_unlock(&bp->cnic_lock);
410         synchronize_rcu();
411         return 0;
412 }
413
414 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
415 {
416         struct bnx2 *bp = netdev_priv(dev);
417         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
418
419         cp->drv_owner = THIS_MODULE;
420         cp->chip_id = bp->chip_id;
421         cp->pdev = bp->pdev;
422         cp->io_base = bp->regview;
423         cp->drv_ctl = bnx2_drv_ctl;
424         cp->drv_register_cnic = bnx2_register_cnic;
425         cp->drv_unregister_cnic = bnx2_unregister_cnic;
426
427         return cp;
428 }
429 EXPORT_SYMBOL(bnx2_cnic_probe);
430
431 static void
432 bnx2_cnic_stop(struct bnx2 *bp)
433 {
434         struct cnic_ops *c_ops;
435         struct cnic_ctl_info info;
436
437         mutex_lock(&bp->cnic_lock);
438         c_ops = bp->cnic_ops;
439         if (c_ops) {
440                 info.cmd = CNIC_CTL_STOP_CMD;
441                 c_ops->cnic_ctl(bp->cnic_data, &info);
442         }
443         mutex_unlock(&bp->cnic_lock);
444 }
445
446 static void
447 bnx2_cnic_start(struct bnx2 *bp)
448 {
449         struct cnic_ops *c_ops;
450         struct cnic_ctl_info info;
451
452         mutex_lock(&bp->cnic_lock);
453         c_ops = bp->cnic_ops;
454         if (c_ops) {
455                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
456                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
457
458                         bnapi->cnic_tag = bnapi->last_status_idx;
459                 }
460                 info.cmd = CNIC_CTL_START_CMD;
461                 c_ops->cnic_ctl(bp->cnic_data, &info);
462         }
463         mutex_unlock(&bp->cnic_lock);
464 }
465
466 #else
467
468 static void
469 bnx2_cnic_stop(struct bnx2 *bp)
470 {
471 }
472
473 static void
474 bnx2_cnic_start(struct bnx2 *bp)
475 {
476 }
477
478 #endif
479
480 static int
481 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
482 {
483         u32 val1;
484         int i, ret;
485
486         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
487                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
488                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
489
490                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
491                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
492
493                 udelay(40);
494         }
495
496         val1 = (bp->phy_addr << 21) | (reg << 16) |
497                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
498                 BNX2_EMAC_MDIO_COMM_START_BUSY;
499         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
500
501         for (i = 0; i < 50; i++) {
502                 udelay(10);
503
504                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
505                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
506                         udelay(5);
507
508                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
509                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
510
511                         break;
512                 }
513         }
514
515         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
516                 *val = 0x0;
517                 ret = -EBUSY;
518         }
519         else {
520                 *val = val1;
521                 ret = 0;
522         }
523
524         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
525                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
526                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
527
528                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
529                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
530
531                 udelay(40);
532         }
533
534         return ret;
535 }
536
537 static int
538 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
539 {
540         u32 val1;
541         int i, ret;
542
543         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
544                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
545                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
546
547                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
548                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
549
550                 udelay(40);
551         }
552
553         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
554                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
555                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
556         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
557
558         for (i = 0; i < 50; i++) {
559                 udelay(10);
560
561                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
562                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
563                         udelay(5);
564                         break;
565                 }
566         }
567
568         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
569                 ret = -EBUSY;
570         else
571                 ret = 0;
572
573         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
574                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
575                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
576
577                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
578                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
579
580                 udelay(40);
581         }
582
583         return ret;
584 }
585
586 static void
587 bnx2_disable_int(struct bnx2 *bp)
588 {
589         int i;
590         struct bnx2_napi *bnapi;
591
592         for (i = 0; i < bp->irq_nvecs; i++) {
593                 bnapi = &bp->bnx2_napi[i];
594                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
595                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
596         }
597         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
598 }
599
600 static void
601 bnx2_enable_int(struct bnx2 *bp)
602 {
603         int i;
604         struct bnx2_napi *bnapi;
605
606         for (i = 0; i < bp->irq_nvecs; i++) {
607                 bnapi = &bp->bnx2_napi[i];
608
609                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
610                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
611                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
612                        bnapi->last_status_idx);
613
614                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
615                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
616                        bnapi->last_status_idx);
617         }
618         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
619 }
620
621 static void
622 bnx2_disable_int_sync(struct bnx2 *bp)
623 {
624         int i;
625
626         atomic_inc(&bp->intr_sem);
627         if (!netif_running(bp->dev))
628                 return;
629
630         bnx2_disable_int(bp);
631         for (i = 0; i < bp->irq_nvecs; i++)
632                 synchronize_irq(bp->irq_tbl[i].vector);
633 }
634
635 static void
636 bnx2_napi_disable(struct bnx2 *bp)
637 {
638         int i;
639
640         for (i = 0; i < bp->irq_nvecs; i++)
641                 napi_disable(&bp->bnx2_napi[i].napi);
642 }
643
644 static void
645 bnx2_napi_enable(struct bnx2 *bp)
646 {
647         int i;
648
649         for (i = 0; i < bp->irq_nvecs; i++)
650                 napi_enable(&bp->bnx2_napi[i].napi);
651 }
652
653 static void
654 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
655 {
656         if (stop_cnic)
657                 bnx2_cnic_stop(bp);
658         if (netif_running(bp->dev)) {
659                 int i;
660
661                 bnx2_napi_disable(bp);
662                 netif_tx_disable(bp->dev);
663                 /* prevent tx timeout */
664                 for (i = 0; i <  bp->dev->num_tx_queues; i++) {
665                         struct netdev_queue *txq;
666
667                         txq = netdev_get_tx_queue(bp->dev, i);
668                         txq->trans_start = jiffies;
669                 }
670         }
671         bnx2_disable_int_sync(bp);
672 }
673
674 static void
675 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
676 {
677         if (atomic_dec_and_test(&bp->intr_sem)) {
678                 if (netif_running(bp->dev)) {
679                         netif_tx_wake_all_queues(bp->dev);
680                         bnx2_napi_enable(bp);
681                         bnx2_enable_int(bp);
682                         if (start_cnic)
683                                 bnx2_cnic_start(bp);
684                 }
685         }
686 }
687
688 static void
689 bnx2_free_tx_mem(struct bnx2 *bp)
690 {
691         int i;
692
693         for (i = 0; i < bp->num_tx_rings; i++) {
694                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
695                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
696
697                 if (txr->tx_desc_ring) {
698                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
699                                             txr->tx_desc_ring,
700                                             txr->tx_desc_mapping);
701                         txr->tx_desc_ring = NULL;
702                 }
703                 kfree(txr->tx_buf_ring);
704                 txr->tx_buf_ring = NULL;
705         }
706 }
707
708 static void
709 bnx2_free_rx_mem(struct bnx2 *bp)
710 {
711         int i;
712
713         for (i = 0; i < bp->num_rx_rings; i++) {
714                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
715                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
716                 int j;
717
718                 for (j = 0; j < bp->rx_max_ring; j++) {
719                         if (rxr->rx_desc_ring[j])
720                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
721                                                     rxr->rx_desc_ring[j],
722                                                     rxr->rx_desc_mapping[j]);
723                         rxr->rx_desc_ring[j] = NULL;
724                 }
725                 vfree(rxr->rx_buf_ring);
726                 rxr->rx_buf_ring = NULL;
727
728                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
729                         if (rxr->rx_pg_desc_ring[j])
730                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
731                                                     rxr->rx_pg_desc_ring[j],
732                                                     rxr->rx_pg_desc_mapping[j]);
733                         rxr->rx_pg_desc_ring[j] = NULL;
734                 }
735                 vfree(rxr->rx_pg_ring);
736                 rxr->rx_pg_ring = NULL;
737         }
738 }
739
740 static int
741 bnx2_alloc_tx_mem(struct bnx2 *bp)
742 {
743         int i;
744
745         for (i = 0; i < bp->num_tx_rings; i++) {
746                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
747                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
748
749                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
750                 if (txr->tx_buf_ring == NULL)
751                         return -ENOMEM;
752
753                 txr->tx_desc_ring =
754                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
755                                              &txr->tx_desc_mapping);
756                 if (txr->tx_desc_ring == NULL)
757                         return -ENOMEM;
758         }
759         return 0;
760 }
761
762 static int
763 bnx2_alloc_rx_mem(struct bnx2 *bp)
764 {
765         int i;
766
767         for (i = 0; i < bp->num_rx_rings; i++) {
768                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
769                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
770                 int j;
771
772                 rxr->rx_buf_ring =
773                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
774                 if (rxr->rx_buf_ring == NULL)
775                         return -ENOMEM;
776
777                 memset(rxr->rx_buf_ring, 0,
778                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
779
780                 for (j = 0; j < bp->rx_max_ring; j++) {
781                         rxr->rx_desc_ring[j] =
782                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
783                                                      &rxr->rx_desc_mapping[j]);
784                         if (rxr->rx_desc_ring[j] == NULL)
785                                 return -ENOMEM;
786
787                 }
788
789                 if (bp->rx_pg_ring_size) {
790                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
791                                                   bp->rx_max_pg_ring);
792                         if (rxr->rx_pg_ring == NULL)
793                                 return -ENOMEM;
794
795                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
796                                bp->rx_max_pg_ring);
797                 }
798
799                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
800                         rxr->rx_pg_desc_ring[j] =
801                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
802                                                 &rxr->rx_pg_desc_mapping[j]);
803                         if (rxr->rx_pg_desc_ring[j] == NULL)
804                                 return -ENOMEM;
805
806                 }
807         }
808         return 0;
809 }
810
811 static void
812 bnx2_free_mem(struct bnx2 *bp)
813 {
814         int i;
815         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
816
817         bnx2_free_tx_mem(bp);
818         bnx2_free_rx_mem(bp);
819
820         for (i = 0; i < bp->ctx_pages; i++) {
821                 if (bp->ctx_blk[i]) {
822                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
823                                             bp->ctx_blk[i],
824                                             bp->ctx_blk_mapping[i]);
825                         bp->ctx_blk[i] = NULL;
826                 }
827         }
828         if (bnapi->status_blk.msi) {
829                 pci_free_consistent(bp->pdev, bp->status_stats_size,
830                                     bnapi->status_blk.msi,
831                                     bp->status_blk_mapping);
832                 bnapi->status_blk.msi = NULL;
833                 bp->stats_blk = NULL;
834         }
835 }
836
837 static int
838 bnx2_alloc_mem(struct bnx2 *bp)
839 {
840         int i, status_blk_size, err;
841         struct bnx2_napi *bnapi;
842         void *status_blk;
843
844         /* Combine status and statistics blocks into one allocation. */
845         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
846         if (bp->flags & BNX2_FLAG_MSIX_CAP)
847                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
848                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
849         bp->status_stats_size = status_blk_size +
850                                 sizeof(struct statistics_block);
851
852         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
853                                           &bp->status_blk_mapping);
854         if (status_blk == NULL)
855                 goto alloc_mem_err;
856
857         memset(status_blk, 0, bp->status_stats_size);
858
859         bnapi = &bp->bnx2_napi[0];
860         bnapi->status_blk.msi = status_blk;
861         bnapi->hw_tx_cons_ptr =
862                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
863         bnapi->hw_rx_cons_ptr =
864                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
865         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
866                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
867                         struct status_block_msix *sblk;
868
869                         bnapi = &bp->bnx2_napi[i];
870
871                         sblk = (void *) (status_blk +
872                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
873                         bnapi->status_blk.msix = sblk;
874                         bnapi->hw_tx_cons_ptr =
875                                 &sblk->status_tx_quick_consumer_index;
876                         bnapi->hw_rx_cons_ptr =
877                                 &sblk->status_rx_quick_consumer_index;
878                         bnapi->int_num = i << 24;
879                 }
880         }
881
882         bp->stats_blk = status_blk + status_blk_size;
883
884         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
885
886         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
887                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
888                 if (bp->ctx_pages == 0)
889                         bp->ctx_pages = 1;
890                 for (i = 0; i < bp->ctx_pages; i++) {
891                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
892                                                 BCM_PAGE_SIZE,
893                                                 &bp->ctx_blk_mapping[i]);
894                         if (bp->ctx_blk[i] == NULL)
895                                 goto alloc_mem_err;
896                 }
897         }
898
899         err = bnx2_alloc_rx_mem(bp);
900         if (err)
901                 goto alloc_mem_err;
902
903         err = bnx2_alloc_tx_mem(bp);
904         if (err)
905                 goto alloc_mem_err;
906
907         return 0;
908
909 alloc_mem_err:
910         bnx2_free_mem(bp);
911         return -ENOMEM;
912 }
913
914 static void
915 bnx2_report_fw_link(struct bnx2 *bp)
916 {
917         u32 fw_link_status = 0;
918
919         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
920                 return;
921
922         if (bp->link_up) {
923                 u32 bmsr;
924
925                 switch (bp->line_speed) {
926                 case SPEED_10:
927                         if (bp->duplex == DUPLEX_HALF)
928                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
929                         else
930                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
931                         break;
932                 case SPEED_100:
933                         if (bp->duplex == DUPLEX_HALF)
934                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
935                         else
936                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
937                         break;
938                 case SPEED_1000:
939                         if (bp->duplex == DUPLEX_HALF)
940                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
941                         else
942                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
943                         break;
944                 case SPEED_2500:
945                         if (bp->duplex == DUPLEX_HALF)
946                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
947                         else
948                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
949                         break;
950                 }
951
952                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
953
954                 if (bp->autoneg) {
955                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
956
957                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
958                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
959
960                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
961                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
962                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
963                         else
964                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
965                 }
966         }
967         else
968                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
969
970         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
971 }
972
973 static char *
974 bnx2_xceiver_str(struct bnx2 *bp)
975 {
976         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
977                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
978                  "Copper"));
979 }
980
981 static void
982 bnx2_report_link(struct bnx2 *bp)
983 {
984         if (bp->link_up) {
985                 netif_carrier_on(bp->dev);
986                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
987                             bnx2_xceiver_str(bp),
988                             bp->line_speed,
989                             bp->duplex == DUPLEX_FULL ? "full" : "half");
990
991                 if (bp->flow_ctrl) {
992                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
993                                 pr_cont(", receive ");
994                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
995                                         pr_cont("& transmit ");
996                         }
997                         else {
998                                 pr_cont(", transmit ");
999                         }
1000                         pr_cont("flow control ON");
1001                 }
1002                 pr_cont("\n");
1003         } else {
1004                 netif_carrier_off(bp->dev);
1005                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1006                            bnx2_xceiver_str(bp));
1007         }
1008
1009         bnx2_report_fw_link(bp);
1010 }
1011
1012 static void
1013 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1014 {
1015         u32 local_adv, remote_adv;
1016
1017         bp->flow_ctrl = 0;
1018         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1019                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1020
1021                 if (bp->duplex == DUPLEX_FULL) {
1022                         bp->flow_ctrl = bp->req_flow_ctrl;
1023                 }
1024                 return;
1025         }
1026
1027         if (bp->duplex != DUPLEX_FULL) {
1028                 return;
1029         }
1030
1031         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1032             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1033                 u32 val;
1034
1035                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1036                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1037                         bp->flow_ctrl |= FLOW_CTRL_TX;
1038                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1039                         bp->flow_ctrl |= FLOW_CTRL_RX;
1040                 return;
1041         }
1042
1043         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1044         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1045
1046         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1047                 u32 new_local_adv = 0;
1048                 u32 new_remote_adv = 0;
1049
1050                 if (local_adv & ADVERTISE_1000XPAUSE)
1051                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1052                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1053                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1054                 if (remote_adv & ADVERTISE_1000XPAUSE)
1055                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1056                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1057                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1058
1059                 local_adv = new_local_adv;
1060                 remote_adv = new_remote_adv;
1061         }
1062
1063         /* See Table 28B-3 of 802.3ab-1999 spec. */
1064         if (local_adv & ADVERTISE_PAUSE_CAP) {
1065                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1066                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1067                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1068                         }
1069                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1070                                 bp->flow_ctrl = FLOW_CTRL_RX;
1071                         }
1072                 }
1073                 else {
1074                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1075                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1076                         }
1077                 }
1078         }
1079         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1080                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1081                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1082
1083                         bp->flow_ctrl = FLOW_CTRL_TX;
1084                 }
1085         }
1086 }
1087
1088 static int
1089 bnx2_5709s_linkup(struct bnx2 *bp)
1090 {
1091         u32 val, speed;
1092
1093         bp->link_up = 1;
1094
1095         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1096         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1097         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1098
1099         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1100                 bp->line_speed = bp->req_line_speed;
1101                 bp->duplex = bp->req_duplex;
1102                 return 0;
1103         }
1104         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1105         switch (speed) {
1106                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1107                         bp->line_speed = SPEED_10;
1108                         break;
1109                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1110                         bp->line_speed = SPEED_100;
1111                         break;
1112                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1113                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1114                         bp->line_speed = SPEED_1000;
1115                         break;
1116                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1117                         bp->line_speed = SPEED_2500;
1118                         break;
1119         }
1120         if (val & MII_BNX2_GP_TOP_AN_FD)
1121                 bp->duplex = DUPLEX_FULL;
1122         else
1123                 bp->duplex = DUPLEX_HALF;
1124         return 0;
1125 }
1126
1127 static int
1128 bnx2_5708s_linkup(struct bnx2 *bp)
1129 {
1130         u32 val;
1131
1132         bp->link_up = 1;
1133         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1134         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1135                 case BCM5708S_1000X_STAT1_SPEED_10:
1136                         bp->line_speed = SPEED_10;
1137                         break;
1138                 case BCM5708S_1000X_STAT1_SPEED_100:
1139                         bp->line_speed = SPEED_100;
1140                         break;
1141                 case BCM5708S_1000X_STAT1_SPEED_1G:
1142                         bp->line_speed = SPEED_1000;
1143                         break;
1144                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1145                         bp->line_speed = SPEED_2500;
1146                         break;
1147         }
1148         if (val & BCM5708S_1000X_STAT1_FD)
1149                 bp->duplex = DUPLEX_FULL;
1150         else
1151                 bp->duplex = DUPLEX_HALF;
1152
1153         return 0;
1154 }
1155
1156 static int
1157 bnx2_5706s_linkup(struct bnx2 *bp)
1158 {
1159         u32 bmcr, local_adv, remote_adv, common;
1160
1161         bp->link_up = 1;
1162         bp->line_speed = SPEED_1000;
1163
1164         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1165         if (bmcr & BMCR_FULLDPLX) {
1166                 bp->duplex = DUPLEX_FULL;
1167         }
1168         else {
1169                 bp->duplex = DUPLEX_HALF;
1170         }
1171
1172         if (!(bmcr & BMCR_ANENABLE)) {
1173                 return 0;
1174         }
1175
1176         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1177         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1178
1179         common = local_adv & remote_adv;
1180         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1181
1182                 if (common & ADVERTISE_1000XFULL) {
1183                         bp->duplex = DUPLEX_FULL;
1184                 }
1185                 else {
1186                         bp->duplex = DUPLEX_HALF;
1187                 }
1188         }
1189
1190         return 0;
1191 }
1192
1193 static int
1194 bnx2_copper_linkup(struct bnx2 *bp)
1195 {
1196         u32 bmcr;
1197
1198         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1199         if (bmcr & BMCR_ANENABLE) {
1200                 u32 local_adv, remote_adv, common;
1201
1202                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1203                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1204
1205                 common = local_adv & (remote_adv >> 2);
1206                 if (common & ADVERTISE_1000FULL) {
1207                         bp->line_speed = SPEED_1000;
1208                         bp->duplex = DUPLEX_FULL;
1209                 }
1210                 else if (common & ADVERTISE_1000HALF) {
1211                         bp->line_speed = SPEED_1000;
1212                         bp->duplex = DUPLEX_HALF;
1213                 }
1214                 else {
1215                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1216                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1217
1218                         common = local_adv & remote_adv;
1219                         if (common & ADVERTISE_100FULL) {
1220                                 bp->line_speed = SPEED_100;
1221                                 bp->duplex = DUPLEX_FULL;
1222                         }
1223                         else if (common & ADVERTISE_100HALF) {
1224                                 bp->line_speed = SPEED_100;
1225                                 bp->duplex = DUPLEX_HALF;
1226                         }
1227                         else if (common & ADVERTISE_10FULL) {
1228                                 bp->line_speed = SPEED_10;
1229                                 bp->duplex = DUPLEX_FULL;
1230                         }
1231                         else if (common & ADVERTISE_10HALF) {
1232                                 bp->line_speed = SPEED_10;
1233                                 bp->duplex = DUPLEX_HALF;
1234                         }
1235                         else {
1236                                 bp->line_speed = 0;
1237                                 bp->link_up = 0;
1238                         }
1239                 }
1240         }
1241         else {
1242                 if (bmcr & BMCR_SPEED100) {
1243                         bp->line_speed = SPEED_100;
1244                 }
1245                 else {
1246                         bp->line_speed = SPEED_10;
1247                 }
1248                 if (bmcr & BMCR_FULLDPLX) {
1249                         bp->duplex = DUPLEX_FULL;
1250                 }
1251                 else {
1252                         bp->duplex = DUPLEX_HALF;
1253                 }
1254         }
1255
1256         return 0;
1257 }
1258
1259 static void
1260 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1261 {
1262         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1263
1264         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1265         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1266         val |= 0x02 << 8;
1267
1268         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1269                 u32 lo_water, hi_water;
1270
1271                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1272                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1273                 else
1274                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1275                 if (lo_water >= bp->rx_ring_size)
1276                         lo_water = 0;
1277
1278                 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1279
1280                 if (hi_water <= lo_water)
1281                         lo_water = 0;
1282
1283                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1284                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1285
1286                 if (hi_water > 0xf)
1287                         hi_water = 0xf;
1288                 else if (hi_water == 0)
1289                         lo_water = 0;
1290                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1291         }
1292         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1293 }
1294
1295 static void
1296 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1297 {
1298         int i;
1299         u32 cid;
1300
1301         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1302                 if (i == 1)
1303                         cid = RX_RSS_CID;
1304                 bnx2_init_rx_context(bp, cid);
1305         }
1306 }
1307
1308 static void
1309 bnx2_set_mac_link(struct bnx2 *bp)
1310 {
1311         u32 val;
1312
1313         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1314         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1315                 (bp->duplex == DUPLEX_HALF)) {
1316                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1317         }
1318
1319         /* Configure the EMAC mode register. */
1320         val = REG_RD(bp, BNX2_EMAC_MODE);
1321
1322         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1323                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1324                 BNX2_EMAC_MODE_25G_MODE);
1325
1326         if (bp->link_up) {
1327                 switch (bp->line_speed) {
1328                         case SPEED_10:
1329                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1330                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1331                                         break;
1332                                 }
1333                                 /* fall through */
1334                         case SPEED_100:
1335                                 val |= BNX2_EMAC_MODE_PORT_MII;
1336                                 break;
1337                         case SPEED_2500:
1338                                 val |= BNX2_EMAC_MODE_25G_MODE;
1339                                 /* fall through */
1340                         case SPEED_1000:
1341                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1342                                 break;
1343                 }
1344         }
1345         else {
1346                 val |= BNX2_EMAC_MODE_PORT_GMII;
1347         }
1348
1349         /* Set the MAC to operate in the appropriate duplex mode. */
1350         if (bp->duplex == DUPLEX_HALF)
1351                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1352         REG_WR(bp, BNX2_EMAC_MODE, val);
1353
1354         /* Enable/disable rx PAUSE. */
1355         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1356
1357         if (bp->flow_ctrl & FLOW_CTRL_RX)
1358                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1359         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1360
1361         /* Enable/disable tx PAUSE. */
1362         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1363         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1364
1365         if (bp->flow_ctrl & FLOW_CTRL_TX)
1366                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1367         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1368
1369         /* Acknowledge the interrupt. */
1370         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1371
1372         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1373                 bnx2_init_all_rx_contexts(bp);
1374 }
1375
1376 static void
1377 bnx2_enable_bmsr1(struct bnx2 *bp)
1378 {
1379         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1380             (CHIP_NUM(bp) == CHIP_NUM_5709))
1381                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1382                                MII_BNX2_BLK_ADDR_GP_STATUS);
1383 }
1384
1385 static void
1386 bnx2_disable_bmsr1(struct bnx2 *bp)
1387 {
1388         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1389             (CHIP_NUM(bp) == CHIP_NUM_5709))
1390                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1391                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1392 }
1393
1394 static int
1395 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1396 {
1397         u32 up1;
1398         int ret = 1;
1399
1400         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1401                 return 0;
1402
1403         if (bp->autoneg & AUTONEG_SPEED)
1404                 bp->advertising |= ADVERTISED_2500baseX_Full;
1405
1406         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1407                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1408
1409         bnx2_read_phy(bp, bp->mii_up1, &up1);
1410         if (!(up1 & BCM5708S_UP1_2G5)) {
1411                 up1 |= BCM5708S_UP1_2G5;
1412                 bnx2_write_phy(bp, bp->mii_up1, up1);
1413                 ret = 0;
1414         }
1415
1416         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1417                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1418                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1419
1420         return ret;
1421 }
1422
1423 static int
1424 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1425 {
1426         u32 up1;
1427         int ret = 0;
1428
1429         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1430                 return 0;
1431
1432         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1433                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1434
1435         bnx2_read_phy(bp, bp->mii_up1, &up1);
1436         if (up1 & BCM5708S_UP1_2G5) {
1437                 up1 &= ~BCM5708S_UP1_2G5;
1438                 bnx2_write_phy(bp, bp->mii_up1, up1);
1439                 ret = 1;
1440         }
1441
1442         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1443                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1444                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1445
1446         return ret;
1447 }
1448
1449 static void
1450 bnx2_enable_forced_2g5(struct bnx2 *bp)
1451 {
1452         u32 bmcr;
1453
1454         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1455                 return;
1456
1457         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1458                 u32 val;
1459
1460                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1461                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1462                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1463                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1464                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1465                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1466
1467                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1468                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1469                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1470
1471         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1472                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1473                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1474         } else {
1475                 return;
1476         }
1477
1478         if (bp->autoneg & AUTONEG_SPEED) {
1479                 bmcr &= ~BMCR_ANENABLE;
1480                 if (bp->req_duplex == DUPLEX_FULL)
1481                         bmcr |= BMCR_FULLDPLX;
1482         }
1483         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1484 }
1485
1486 static void
1487 bnx2_disable_forced_2g5(struct bnx2 *bp)
1488 {
1489         u32 bmcr;
1490
1491         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1492                 return;
1493
1494         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1495                 u32 val;
1496
1497                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1498                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1499                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1500                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1501                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1502
1503                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1504                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1505                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1506
1507         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1508                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1509                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1510         } else {
1511                 return;
1512         }
1513
1514         if (bp->autoneg & AUTONEG_SPEED)
1515                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1516         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1517 }
1518
1519 static void
1520 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1521 {
1522         u32 val;
1523
1524         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1525         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1526         if (start)
1527                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1528         else
1529                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1530 }
1531
1532 static int
1533 bnx2_set_link(struct bnx2 *bp)
1534 {
1535         u32 bmsr;
1536         u8 link_up;
1537
1538         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1539                 bp->link_up = 1;
1540                 return 0;
1541         }
1542
1543         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1544                 return 0;
1545
1546         link_up = bp->link_up;
1547
1548         bnx2_enable_bmsr1(bp);
1549         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1550         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1551         bnx2_disable_bmsr1(bp);
1552
1553         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1554             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1555                 u32 val, an_dbg;
1556
1557                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1558                         bnx2_5706s_force_link_dn(bp, 0);
1559                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1560                 }
1561                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1562
1563                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1564                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1565                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1566
1567                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1568                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1569                         bmsr |= BMSR_LSTATUS;
1570                 else
1571                         bmsr &= ~BMSR_LSTATUS;
1572         }
1573
1574         if (bmsr & BMSR_LSTATUS) {
1575                 bp->link_up = 1;
1576
1577                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1578                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1579                                 bnx2_5706s_linkup(bp);
1580                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1581                                 bnx2_5708s_linkup(bp);
1582                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1583                                 bnx2_5709s_linkup(bp);
1584                 }
1585                 else {
1586                         bnx2_copper_linkup(bp);
1587                 }
1588                 bnx2_resolve_flow_ctrl(bp);
1589         }
1590         else {
1591                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1592                     (bp->autoneg & AUTONEG_SPEED))
1593                         bnx2_disable_forced_2g5(bp);
1594
1595                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1596                         u32 bmcr;
1597
1598                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1599                         bmcr |= BMCR_ANENABLE;
1600                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1601
1602                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1603                 }
1604                 bp->link_up = 0;
1605         }
1606
1607         if (bp->link_up != link_up) {
1608                 bnx2_report_link(bp);
1609         }
1610
1611         bnx2_set_mac_link(bp);
1612
1613         return 0;
1614 }
1615
1616 static int
1617 bnx2_reset_phy(struct bnx2 *bp)
1618 {
1619         int i;
1620         u32 reg;
1621
1622         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1623
1624 #define PHY_RESET_MAX_WAIT 100
1625         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1626                 udelay(10);
1627
1628                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1629                 if (!(reg & BMCR_RESET)) {
1630                         udelay(20);
1631                         break;
1632                 }
1633         }
1634         if (i == PHY_RESET_MAX_WAIT) {
1635                 return -EBUSY;
1636         }
1637         return 0;
1638 }
1639
1640 static u32
1641 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1642 {
1643         u32 adv = 0;
1644
1645         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1646                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1647
1648                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1649                         adv = ADVERTISE_1000XPAUSE;
1650                 }
1651                 else {
1652                         adv = ADVERTISE_PAUSE_CAP;
1653                 }
1654         }
1655         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1656                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1657                         adv = ADVERTISE_1000XPSE_ASYM;
1658                 }
1659                 else {
1660                         adv = ADVERTISE_PAUSE_ASYM;
1661                 }
1662         }
1663         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1664                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1665                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1666                 }
1667                 else {
1668                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1669                 }
1670         }
1671         return adv;
1672 }
1673
1674 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1675
1676 static int
1677 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1678 __releases(&bp->phy_lock)
1679 __acquires(&bp->phy_lock)
1680 {
1681         u32 speed_arg = 0, pause_adv;
1682
1683         pause_adv = bnx2_phy_get_pause_adv(bp);
1684
1685         if (bp->autoneg & AUTONEG_SPEED) {
1686                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1687                 if (bp->advertising & ADVERTISED_10baseT_Half)
1688                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1689                 if (bp->advertising & ADVERTISED_10baseT_Full)
1690                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1691                 if (bp->advertising & ADVERTISED_100baseT_Half)
1692                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1693                 if (bp->advertising & ADVERTISED_100baseT_Full)
1694                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1695                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1696                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1697                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1698                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1699         } else {
1700                 if (bp->req_line_speed == SPEED_2500)
1701                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1702                 else if (bp->req_line_speed == SPEED_1000)
1703                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1704                 else if (bp->req_line_speed == SPEED_100) {
1705                         if (bp->req_duplex == DUPLEX_FULL)
1706                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1707                         else
1708                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1709                 } else if (bp->req_line_speed == SPEED_10) {
1710                         if (bp->req_duplex == DUPLEX_FULL)
1711                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1712                         else
1713                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1714                 }
1715         }
1716
1717         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1718                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1719         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1720                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1721
1722         if (port == PORT_TP)
1723                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1724                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1725
1726         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1727
1728         spin_unlock_bh(&bp->phy_lock);
1729         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1730         spin_lock_bh(&bp->phy_lock);
1731
1732         return 0;
1733 }
1734
1735 static int
1736 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1737 __releases(&bp->phy_lock)
1738 __acquires(&bp->phy_lock)
1739 {
1740         u32 adv, bmcr;
1741         u32 new_adv = 0;
1742
1743         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1744                 return (bnx2_setup_remote_phy(bp, port));
1745
1746         if (!(bp->autoneg & AUTONEG_SPEED)) {
1747                 u32 new_bmcr;
1748                 int force_link_down = 0;
1749
1750                 if (bp->req_line_speed == SPEED_2500) {
1751                         if (!bnx2_test_and_enable_2g5(bp))
1752                                 force_link_down = 1;
1753                 } else if (bp->req_line_speed == SPEED_1000) {
1754                         if (bnx2_test_and_disable_2g5(bp))
1755                                 force_link_down = 1;
1756                 }
1757                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1758                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1759
1760                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1761                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1762                 new_bmcr |= BMCR_SPEED1000;
1763
1764                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1765                         if (bp->req_line_speed == SPEED_2500)
1766                                 bnx2_enable_forced_2g5(bp);
1767                         else if (bp->req_line_speed == SPEED_1000) {
1768                                 bnx2_disable_forced_2g5(bp);
1769                                 new_bmcr &= ~0x2000;
1770                         }
1771
1772                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1773                         if (bp->req_line_speed == SPEED_2500)
1774                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1775                         else
1776                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1777                 }
1778
1779                 if (bp->req_duplex == DUPLEX_FULL) {
1780                         adv |= ADVERTISE_1000XFULL;
1781                         new_bmcr |= BMCR_FULLDPLX;
1782                 }
1783                 else {
1784                         adv |= ADVERTISE_1000XHALF;
1785                         new_bmcr &= ~BMCR_FULLDPLX;
1786                 }
1787                 if ((new_bmcr != bmcr) || (force_link_down)) {
1788                         /* Force a link down visible on the other side */
1789                         if (bp->link_up) {
1790                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1791                                                ~(ADVERTISE_1000XFULL |
1792                                                  ADVERTISE_1000XHALF));
1793                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1794                                         BMCR_ANRESTART | BMCR_ANENABLE);
1795
1796                                 bp->link_up = 0;
1797                                 netif_carrier_off(bp->dev);
1798                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1799                                 bnx2_report_link(bp);
1800                         }
1801                         bnx2_write_phy(bp, bp->mii_adv, adv);
1802                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1803                 } else {
1804                         bnx2_resolve_flow_ctrl(bp);
1805                         bnx2_set_mac_link(bp);
1806                 }
1807                 return 0;
1808         }
1809
1810         bnx2_test_and_enable_2g5(bp);
1811
1812         if (bp->advertising & ADVERTISED_1000baseT_Full)
1813                 new_adv |= ADVERTISE_1000XFULL;
1814
1815         new_adv |= bnx2_phy_get_pause_adv(bp);
1816
1817         bnx2_read_phy(bp, bp->mii_adv, &adv);
1818         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1819
1820         bp->serdes_an_pending = 0;
1821         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1822                 /* Force a link down visible on the other side */
1823                 if (bp->link_up) {
1824                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1825                         spin_unlock_bh(&bp->phy_lock);
1826                         msleep(20);
1827                         spin_lock_bh(&bp->phy_lock);
1828                 }
1829
1830                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1831                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1832                         BMCR_ANENABLE);
1833                 /* Speed up link-up time when the link partner
1834                  * does not autonegotiate which is very common
1835                  * in blade servers. Some blade servers use
1836                  * IPMI for kerboard input and it's important
1837                  * to minimize link disruptions. Autoneg. involves
1838                  * exchanging base pages plus 3 next pages and
1839                  * normally completes in about 120 msec.
1840                  */
1841                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1842                 bp->serdes_an_pending = 1;
1843                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1844         } else {
1845                 bnx2_resolve_flow_ctrl(bp);
1846                 bnx2_set_mac_link(bp);
1847         }
1848
1849         return 0;
1850 }
1851
1852 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1853         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1854                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1855                 (ADVERTISED_1000baseT_Full)
1856
1857 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1858         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1859         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1860         ADVERTISED_1000baseT_Full)
1861
1862 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1863         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1864
1865 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1866
1867 static void
1868 bnx2_set_default_remote_link(struct bnx2 *bp)
1869 {
1870         u32 link;
1871
1872         if (bp->phy_port == PORT_TP)
1873                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1874         else
1875                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1876
1877         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1878                 bp->req_line_speed = 0;
1879                 bp->autoneg |= AUTONEG_SPEED;
1880                 bp->advertising = ADVERTISED_Autoneg;
1881                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1882                         bp->advertising |= ADVERTISED_10baseT_Half;
1883                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1884                         bp->advertising |= ADVERTISED_10baseT_Full;
1885                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1886                         bp->advertising |= ADVERTISED_100baseT_Half;
1887                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1888                         bp->advertising |= ADVERTISED_100baseT_Full;
1889                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1890                         bp->advertising |= ADVERTISED_1000baseT_Full;
1891                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1892                         bp->advertising |= ADVERTISED_2500baseX_Full;
1893         } else {
1894                 bp->autoneg = 0;
1895                 bp->advertising = 0;
1896                 bp->req_duplex = DUPLEX_FULL;
1897                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1898                         bp->req_line_speed = SPEED_10;
1899                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1900                                 bp->req_duplex = DUPLEX_HALF;
1901                 }
1902                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1903                         bp->req_line_speed = SPEED_100;
1904                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1905                                 bp->req_duplex = DUPLEX_HALF;
1906                 }
1907                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1908                         bp->req_line_speed = SPEED_1000;
1909                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1910                         bp->req_line_speed = SPEED_2500;
1911         }
1912 }
1913
1914 static void
1915 bnx2_set_default_link(struct bnx2 *bp)
1916 {
1917         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1918                 bnx2_set_default_remote_link(bp);
1919                 return;
1920         }
1921
1922         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1923         bp->req_line_speed = 0;
1924         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1925                 u32 reg;
1926
1927                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1928
1929                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1930                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1931                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1932                         bp->autoneg = 0;
1933                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1934                         bp->req_duplex = DUPLEX_FULL;
1935                 }
1936         } else
1937                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1938 }
1939
1940 static void
1941 bnx2_send_heart_beat(struct bnx2 *bp)
1942 {
1943         u32 msg;
1944         u32 addr;
1945
1946         spin_lock(&bp->indirect_lock);
1947         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1948         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1949         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1950         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1951         spin_unlock(&bp->indirect_lock);
1952 }
1953
1954 static void
1955 bnx2_remote_phy_event(struct bnx2 *bp)
1956 {
1957         u32 msg;
1958         u8 link_up = bp->link_up;
1959         u8 old_port;
1960
1961         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1962
1963         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1964                 bnx2_send_heart_beat(bp);
1965
1966         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1967
1968         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1969                 bp->link_up = 0;
1970         else {
1971                 u32 speed;
1972
1973                 bp->link_up = 1;
1974                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1975                 bp->duplex = DUPLEX_FULL;
1976                 switch (speed) {
1977                         case BNX2_LINK_STATUS_10HALF:
1978                                 bp->duplex = DUPLEX_HALF;
1979                         case BNX2_LINK_STATUS_10FULL:
1980                                 bp->line_speed = SPEED_10;
1981                                 break;
1982                         case BNX2_LINK_STATUS_100HALF:
1983                                 bp->duplex = DUPLEX_HALF;
1984                         case BNX2_LINK_STATUS_100BASE_T4:
1985                         case BNX2_LINK_STATUS_100FULL:
1986                                 bp->line_speed = SPEED_100;
1987                                 break;
1988                         case BNX2_LINK_STATUS_1000HALF:
1989                                 bp->duplex = DUPLEX_HALF;
1990                         case BNX2_LINK_STATUS_1000FULL:
1991                                 bp->line_speed = SPEED_1000;
1992                                 break;
1993                         case BNX2_LINK_STATUS_2500HALF:
1994                                 bp->duplex = DUPLEX_HALF;
1995                         case BNX2_LINK_STATUS_2500FULL:
1996                                 bp->line_speed = SPEED_2500;
1997                                 break;
1998                         default:
1999                                 bp->line_speed = 0;
2000                                 break;
2001                 }
2002
2003                 bp->flow_ctrl = 0;
2004                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2005                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2006                         if (bp->duplex == DUPLEX_FULL)
2007                                 bp->flow_ctrl = bp->req_flow_ctrl;
2008                 } else {
2009                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2010                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2011                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2012                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2013                 }
2014
2015                 old_port = bp->phy_port;
2016                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2017                         bp->phy_port = PORT_FIBRE;
2018                 else
2019                         bp->phy_port = PORT_TP;
2020
2021                 if (old_port != bp->phy_port)
2022                         bnx2_set_default_link(bp);
2023
2024         }
2025         if (bp->link_up != link_up)
2026                 bnx2_report_link(bp);
2027
2028         bnx2_set_mac_link(bp);
2029 }
2030
2031 static int
2032 bnx2_set_remote_link(struct bnx2 *bp)
2033 {
2034         u32 evt_code;
2035
2036         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2037         switch (evt_code) {
2038                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2039                         bnx2_remote_phy_event(bp);
2040                         break;
2041                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2042                 default:
2043                         bnx2_send_heart_beat(bp);
2044                         break;
2045         }
2046         return 0;
2047 }
2048
2049 static int
2050 bnx2_setup_copper_phy(struct bnx2 *bp)
2051 __releases(&bp->phy_lock)
2052 __acquires(&bp->phy_lock)
2053 {
2054         u32 bmcr;
2055         u32 new_bmcr;
2056
2057         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2058
2059         if (bp->autoneg & AUTONEG_SPEED) {
2060                 u32 adv_reg, adv1000_reg;
2061                 u32 new_adv_reg = 0;
2062                 u32 new_adv1000_reg = 0;
2063
2064                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2065                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2066                         ADVERTISE_PAUSE_ASYM);
2067
2068                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2069                 adv1000_reg &= PHY_ALL_1000_SPEED;
2070
2071                 if (bp->advertising & ADVERTISED_10baseT_Half)
2072                         new_adv_reg |= ADVERTISE_10HALF;
2073                 if (bp->advertising & ADVERTISED_10baseT_Full)
2074                         new_adv_reg |= ADVERTISE_10FULL;
2075                 if (bp->advertising & ADVERTISED_100baseT_Half)
2076                         new_adv_reg |= ADVERTISE_100HALF;
2077                 if (bp->advertising & ADVERTISED_100baseT_Full)
2078                         new_adv_reg |= ADVERTISE_100FULL;
2079                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2080                         new_adv1000_reg |= ADVERTISE_1000FULL;
2081
2082                 new_adv_reg |= ADVERTISE_CSMA;
2083
2084                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2085
2086                 if ((adv1000_reg != new_adv1000_reg) ||
2087                         (adv_reg != new_adv_reg) ||
2088                         ((bmcr & BMCR_ANENABLE) == 0)) {
2089
2090                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2091                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2092                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2093                                 BMCR_ANENABLE);
2094                 }
2095                 else if (bp->link_up) {
2096                         /* Flow ctrl may have changed from auto to forced */
2097                         /* or vice-versa. */
2098
2099                         bnx2_resolve_flow_ctrl(bp);
2100                         bnx2_set_mac_link(bp);
2101                 }
2102                 return 0;
2103         }
2104
2105         new_bmcr = 0;
2106         if (bp->req_line_speed == SPEED_100) {
2107                 new_bmcr |= BMCR_SPEED100;
2108         }
2109         if (bp->req_duplex == DUPLEX_FULL) {
2110                 new_bmcr |= BMCR_FULLDPLX;
2111         }
2112         if (new_bmcr != bmcr) {
2113                 u32 bmsr;
2114
2115                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2117
2118                 if (bmsr & BMSR_LSTATUS) {
2119                         /* Force link down */
2120                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2121                         spin_unlock_bh(&bp->phy_lock);
2122                         msleep(50);
2123                         spin_lock_bh(&bp->phy_lock);
2124
2125                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2126                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2127                 }
2128
2129                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2130
2131                 /* Normally, the new speed is setup after the link has
2132                  * gone down and up again. In some cases, link will not go
2133                  * down so we need to set up the new speed here.
2134                  */
2135                 if (bmsr & BMSR_LSTATUS) {
2136                         bp->line_speed = bp->req_line_speed;
2137                         bp->duplex = bp->req_duplex;
2138                         bnx2_resolve_flow_ctrl(bp);
2139                         bnx2_set_mac_link(bp);
2140                 }
2141         } else {
2142                 bnx2_resolve_flow_ctrl(bp);
2143                 bnx2_set_mac_link(bp);
2144         }
2145         return 0;
2146 }
2147
2148 static int
2149 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2150 __releases(&bp->phy_lock)
2151 __acquires(&bp->phy_lock)
2152 {
2153         if (bp->loopback == MAC_LOOPBACK)
2154                 return 0;
2155
2156         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2157                 return (bnx2_setup_serdes_phy(bp, port));
2158         }
2159         else {
2160                 return (bnx2_setup_copper_phy(bp));
2161         }
2162 }
2163
2164 static int
2165 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2166 {
2167         u32 val;
2168
2169         bp->mii_bmcr = MII_BMCR + 0x10;
2170         bp->mii_bmsr = MII_BMSR + 0x10;
2171         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2172         bp->mii_adv = MII_ADVERTISE + 0x10;
2173         bp->mii_lpa = MII_LPA + 0x10;
2174         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2175
2176         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2177         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2178
2179         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2180         if (reset_phy)
2181                 bnx2_reset_phy(bp);
2182
2183         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2184
2185         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2186         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2187         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2188         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2189
2190         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2191         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2192         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2193                 val |= BCM5708S_UP1_2G5;
2194         else
2195                 val &= ~BCM5708S_UP1_2G5;
2196         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2197
2198         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2199         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2200         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2201         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2202
2203         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2204
2205         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2206               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2207         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2208
2209         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2210
2211         return 0;
2212 }
2213
2214 static int
2215 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2216 {
2217         u32 val;
2218
2219         if (reset_phy)
2220                 bnx2_reset_phy(bp);
2221
2222         bp->mii_up1 = BCM5708S_UP1;
2223
2224         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2225         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2226         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2227
2228         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2229         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2230         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2231
2232         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2233         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2234         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2235
2236         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2237                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2238                 val |= BCM5708S_UP1_2G5;
2239                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2240         }
2241
2242         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2243             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2244             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2245                 /* increase tx signal amplitude */
2246                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2247                                BCM5708S_BLK_ADDR_TX_MISC);
2248                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2249                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2250                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2251                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2252         }
2253
2254         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2255               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2256
2257         if (val) {
2258                 u32 is_backplane;
2259
2260                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2261                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2262                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263                                        BCM5708S_BLK_ADDR_TX_MISC);
2264                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2265                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2266                                        BCM5708S_BLK_ADDR_DIG);
2267                 }
2268         }
2269         return 0;
2270 }
2271
2272 static int
2273 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2274 {
2275         if (reset_phy)
2276                 bnx2_reset_phy(bp);
2277
2278         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2279
2280         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2281                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2282
2283         if (bp->dev->mtu > 1500) {
2284                 u32 val;
2285
2286                 /* Set extended packet length bit */
2287                 bnx2_write_phy(bp, 0x18, 0x7);
2288                 bnx2_read_phy(bp, 0x18, &val);
2289                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2290
2291                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2292                 bnx2_read_phy(bp, 0x1c, &val);
2293                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2294         }
2295         else {
2296                 u32 val;
2297
2298                 bnx2_write_phy(bp, 0x18, 0x7);
2299                 bnx2_read_phy(bp, 0x18, &val);
2300                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2301
2302                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2303                 bnx2_read_phy(bp, 0x1c, &val);
2304                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2305         }
2306
2307         return 0;
2308 }
2309
2310 static int
2311 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2312 {
2313         u32 val;
2314
2315         if (reset_phy)
2316                 bnx2_reset_phy(bp);
2317
2318         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2319                 bnx2_write_phy(bp, 0x18, 0x0c00);
2320                 bnx2_write_phy(bp, 0x17, 0x000a);
2321                 bnx2_write_phy(bp, 0x15, 0x310b);
2322                 bnx2_write_phy(bp, 0x17, 0x201f);
2323                 bnx2_write_phy(bp, 0x15, 0x9506);
2324                 bnx2_write_phy(bp, 0x17, 0x401f);
2325                 bnx2_write_phy(bp, 0x15, 0x14e2);
2326                 bnx2_write_phy(bp, 0x18, 0x0400);
2327         }
2328
2329         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2330                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2331                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2332                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2333                 val &= ~(1 << 8);
2334                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2335         }
2336
2337         if (bp->dev->mtu > 1500) {
2338                 /* Set extended packet length bit */
2339                 bnx2_write_phy(bp, 0x18, 0x7);
2340                 bnx2_read_phy(bp, 0x18, &val);
2341                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2342
2343                 bnx2_read_phy(bp, 0x10, &val);
2344                 bnx2_write_phy(bp, 0x10, val | 0x1);
2345         }
2346         else {
2347                 bnx2_write_phy(bp, 0x18, 0x7);
2348                 bnx2_read_phy(bp, 0x18, &val);
2349                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2350
2351                 bnx2_read_phy(bp, 0x10, &val);
2352                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2353         }
2354
2355         /* ethernet@wirespeed */
2356         bnx2_write_phy(bp, 0x18, 0x7007);
2357         bnx2_read_phy(bp, 0x18, &val);
2358         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2359         return 0;
2360 }
2361
2362
2363 static int
2364 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2365 __releases(&bp->phy_lock)
2366 __acquires(&bp->phy_lock)
2367 {
2368         u32 val;
2369         int rc = 0;
2370
2371         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2372         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2373
2374         bp->mii_bmcr = MII_BMCR;
2375         bp->mii_bmsr = MII_BMSR;
2376         bp->mii_bmsr1 = MII_BMSR;
2377         bp->mii_adv = MII_ADVERTISE;
2378         bp->mii_lpa = MII_LPA;
2379
2380         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2381
2382         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2383                 goto setup_phy;
2384
2385         bnx2_read_phy(bp, MII_PHYSID1, &val);
2386         bp->phy_id = val << 16;
2387         bnx2_read_phy(bp, MII_PHYSID2, &val);
2388         bp->phy_id |= val & 0xffff;
2389
2390         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2391                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2392                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2393                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2394                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2395                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2396                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2397         }
2398         else {
2399                 rc = bnx2_init_copper_phy(bp, reset_phy);
2400         }
2401
2402 setup_phy:
2403         if (!rc)
2404                 rc = bnx2_setup_phy(bp, bp->phy_port);
2405
2406         return rc;
2407 }
2408
2409 static int
2410 bnx2_set_mac_loopback(struct bnx2 *bp)
2411 {
2412         u32 mac_mode;
2413
2414         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2415         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2416         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2417         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2418         bp->link_up = 1;
2419         return 0;
2420 }
2421
2422 static int bnx2_test_link(struct bnx2 *);
2423
2424 static int
2425 bnx2_set_phy_loopback(struct bnx2 *bp)
2426 {
2427         u32 mac_mode;
2428         int rc, i;
2429
2430         spin_lock_bh(&bp->phy_lock);
2431         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2432                             BMCR_SPEED1000);
2433         spin_unlock_bh(&bp->phy_lock);
2434         if (rc)
2435                 return rc;
2436
2437         for (i = 0; i < 10; i++) {
2438                 if (bnx2_test_link(bp) == 0)
2439                         break;
2440                 msleep(100);
2441         }
2442
2443         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2444         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2445                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2446                       BNX2_EMAC_MODE_25G_MODE);
2447
2448         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2449         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2450         bp->link_up = 1;
2451         return 0;
2452 }
2453
2454 static int
2455 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2456 {
2457         int i;
2458         u32 val;
2459
2460         bp->fw_wr_seq++;
2461         msg_data |= bp->fw_wr_seq;
2462
2463         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2464
2465         if (!ack)
2466                 return 0;
2467
2468         /* wait for an acknowledgement. */
2469         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2470                 msleep(10);
2471
2472                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2473
2474                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2475                         break;
2476         }
2477         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2478                 return 0;
2479
2480         /* If we timed out, inform the firmware that this is the case. */
2481         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2482                 if (!silent)
2483                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2484
2485                 msg_data &= ~BNX2_DRV_MSG_CODE;
2486                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2487
2488                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2489
2490                 return -EBUSY;
2491         }
2492
2493         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2494                 return -EIO;
2495
2496         return 0;
2497 }
2498
2499 static int
2500 bnx2_init_5709_context(struct bnx2 *bp)
2501 {
2502         int i, ret = 0;
2503         u32 val;
2504
2505         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2506         val |= (BCM_PAGE_BITS - 8) << 16;
2507         REG_WR(bp, BNX2_CTX_COMMAND, val);
2508         for (i = 0; i < 10; i++) {
2509                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2510                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2511                         break;
2512                 udelay(2);
2513         }
2514         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2515                 return -EBUSY;
2516
2517         for (i = 0; i < bp->ctx_pages; i++) {
2518                 int j;
2519
2520                 if (bp->ctx_blk[i])
2521                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2522                 else
2523                         return -ENOMEM;
2524
2525                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2526                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2527                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2528                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2529                        (u64) bp->ctx_blk_mapping[i] >> 32);
2530                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2531                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2532                 for (j = 0; j < 10; j++) {
2533
2534                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2535                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2536                                 break;
2537                         udelay(5);
2538                 }
2539                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2540                         ret = -EBUSY;
2541                         break;
2542                 }
2543         }
2544         return ret;
2545 }
2546
2547 static void
2548 bnx2_init_context(struct bnx2 *bp)
2549 {
2550         u32 vcid;
2551
2552         vcid = 96;
2553         while (vcid) {
2554                 u32 vcid_addr, pcid_addr, offset;
2555                 int i;
2556
2557                 vcid--;
2558
2559                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2560                         u32 new_vcid;
2561
2562                         vcid_addr = GET_PCID_ADDR(vcid);
2563                         if (vcid & 0x8) {
2564                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2565                         }
2566                         else {
2567                                 new_vcid = vcid;
2568                         }
2569                         pcid_addr = GET_PCID_ADDR(new_vcid);
2570                 }
2571                 else {
2572                         vcid_addr = GET_CID_ADDR(vcid);
2573                         pcid_addr = vcid_addr;
2574                 }
2575
2576                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2577                         vcid_addr += (i << PHY_CTX_SHIFT);
2578                         pcid_addr += (i << PHY_CTX_SHIFT);
2579
2580                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2581                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2582
2583                         /* Zero out the context. */
2584                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2585                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2586                 }
2587         }
2588 }
2589
2590 static int
2591 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2592 {
2593         u16 *good_mbuf;
2594         u32 good_mbuf_cnt;
2595         u32 val;
2596
2597         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2598         if (good_mbuf == NULL) {
2599                 pr_err("Failed to allocate memory in %s\n", __func__);
2600                 return -ENOMEM;
2601         }
2602
2603         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2604                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2605
2606         good_mbuf_cnt = 0;
2607
2608         /* Allocate a bunch of mbufs and save the good ones in an array. */
2609         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2610         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2611                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2612                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2613
2614                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2615
2616                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2617
2618                 /* The addresses with Bit 9 set are bad memory blocks. */
2619                 if (!(val & (1 << 9))) {
2620                         good_mbuf[good_mbuf_cnt] = (u16) val;
2621                         good_mbuf_cnt++;
2622                 }
2623
2624                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2625         }
2626
2627         /* Free the good ones back to the mbuf pool thus discarding
2628          * all the bad ones. */
2629         while (good_mbuf_cnt) {
2630                 good_mbuf_cnt--;
2631
2632                 val = good_mbuf[good_mbuf_cnt];
2633                 val = (val << 9) | val | 1;
2634
2635                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2636         }
2637         kfree(good_mbuf);
2638         return 0;
2639 }
2640
2641 static void
2642 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2643 {
2644         u32 val;
2645
2646         val = (mac_addr[0] << 8) | mac_addr[1];
2647
2648         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2649
2650         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2651                 (mac_addr[4] << 8) | mac_addr[5];
2652
2653         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2654 }
2655
2656 static inline int
2657 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2658 {
2659         dma_addr_t mapping;
2660         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2661         struct rx_bd *rxbd =
2662                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2663         struct page *page = alloc_page(GFP_ATOMIC);
2664
2665         if (!page)
2666                 return -ENOMEM;
2667         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2668                                PCI_DMA_FROMDEVICE);
2669         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2670                 __free_page(page);
2671                 return -EIO;
2672         }
2673
2674         rx_pg->page = page;
2675         dma_unmap_addr_set(rx_pg, mapping, mapping);
2676         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2677         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2678         return 0;
2679 }
2680
2681 static void
2682 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2683 {
2684         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2685         struct page *page = rx_pg->page;
2686
2687         if (!page)
2688                 return;
2689
2690         pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2691                        PCI_DMA_FROMDEVICE);
2692
2693         __free_page(page);
2694         rx_pg->page = NULL;
2695 }
2696
2697 static inline int
2698 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2699 {
2700         struct sk_buff *skb;
2701         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2702         dma_addr_t mapping;
2703         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2704         unsigned long align;
2705
2706         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2707         if (skb == NULL) {
2708                 return -ENOMEM;
2709         }
2710
2711         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2712                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2713
2714         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2715                 PCI_DMA_FROMDEVICE);
2716         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2717                 dev_kfree_skb(skb);
2718                 return -EIO;
2719         }
2720
2721         rx_buf->skb = skb;
2722         dma_unmap_addr_set(rx_buf, mapping, mapping);
2723
2724         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2725         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2726
2727         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2728
2729         return 0;
2730 }
2731
2732 static int
2733 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2734 {
2735         struct status_block *sblk = bnapi->status_blk.msi;
2736         u32 new_link_state, old_link_state;
2737         int is_set = 1;
2738
2739         new_link_state = sblk->status_attn_bits & event;
2740         old_link_state = sblk->status_attn_bits_ack & event;
2741         if (new_link_state != old_link_state) {
2742                 if (new_link_state)
2743                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2744                 else
2745                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2746         } else
2747                 is_set = 0;
2748
2749         return is_set;
2750 }
2751
2752 static void
2753 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2754 {
2755         spin_lock(&bp->phy_lock);
2756
2757         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2758                 bnx2_set_link(bp);
2759         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2760                 bnx2_set_remote_link(bp);
2761
2762         spin_unlock(&bp->phy_lock);
2763
2764 }
2765
2766 static inline u16
2767 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2768 {
2769         u16 cons;
2770
2771         /* Tell compiler that status block fields can change. */
2772         barrier();
2773         cons = *bnapi->hw_tx_cons_ptr;
2774         barrier();
2775         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2776                 cons++;
2777         return cons;
2778 }
2779
2780 static int
2781 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2782 {
2783         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2784         u16 hw_cons, sw_cons, sw_ring_cons;
2785         int tx_pkt = 0, index;
2786         struct netdev_queue *txq;
2787
2788         index = (bnapi - bp->bnx2_napi);
2789         txq = netdev_get_tx_queue(bp->dev, index);
2790
2791         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2792         sw_cons = txr->tx_cons;
2793
2794         while (sw_cons != hw_cons) {
2795                 struct sw_tx_bd *tx_buf;
2796                 struct sk_buff *skb;
2797                 int i, last;
2798
2799                 sw_ring_cons = TX_RING_IDX(sw_cons);
2800
2801                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2802                 skb = tx_buf->skb;
2803
2804                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2805                 prefetch(&skb->end);
2806
2807                 /* partial BD completions possible with TSO packets */
2808                 if (tx_buf->is_gso) {
2809                         u16 last_idx, last_ring_idx;
2810
2811                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2812                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2813                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2814                                 last_idx++;
2815                         }
2816                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2817                                 break;
2818                         }
2819                 }
2820
2821                 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
2822                         skb_headlen(skb), PCI_DMA_TODEVICE);
2823
2824                 tx_buf->skb = NULL;
2825                 last = tx_buf->nr_frags;
2826
2827                 for (i = 0; i < last; i++) {
2828                         sw_cons = NEXT_TX_BD(sw_cons);
2829
2830                         pci_unmap_page(bp->pdev,
2831                                 dma_unmap_addr(
2832                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2833                                         mapping),
2834                                 skb_shinfo(skb)->frags[i].size,
2835                                 PCI_DMA_TODEVICE);
2836                 }
2837
2838                 sw_cons = NEXT_TX_BD(sw_cons);
2839
2840                 dev_kfree_skb(skb);
2841                 tx_pkt++;
2842                 if (tx_pkt == budget)
2843                         break;
2844
2845                 if (hw_cons == sw_cons)
2846                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2847         }
2848
2849         txr->hw_tx_cons = hw_cons;
2850         txr->tx_cons = sw_cons;
2851
2852         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2853          * before checking for netif_tx_queue_stopped().  Without the
2854          * memory barrier, there is a small possibility that bnx2_start_xmit()
2855          * will miss it and cause the queue to be stopped forever.
2856          */
2857         smp_mb();
2858
2859         if (unlikely(netif_tx_queue_stopped(txq)) &&
2860                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2861                 __netif_tx_lock(txq, smp_processor_id());
2862                 if ((netif_tx_queue_stopped(txq)) &&
2863                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2864                         netif_tx_wake_queue(txq);
2865                 __netif_tx_unlock(txq);
2866         }
2867
2868         return tx_pkt;
2869 }
2870
2871 static void
2872 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2873                         struct sk_buff *skb, int count)
2874 {
2875         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2876         struct rx_bd *cons_bd, *prod_bd;
2877         int i;
2878         u16 hw_prod, prod;
2879         u16 cons = rxr->rx_pg_cons;
2880
2881         cons_rx_pg = &rxr->rx_pg_ring[cons];
2882
2883         /* The caller was unable to allocate a new page to replace the
2884          * last one in the frags array, so we need to recycle that page
2885          * and then free the skb.
2886          */
2887         if (skb) {
2888                 struct page *page;
2889                 struct skb_shared_info *shinfo;
2890
2891                 shinfo = skb_shinfo(skb);
2892                 shinfo->nr_frags--;
2893                 page = shinfo->frags[shinfo->nr_frags].page;
2894                 shinfo->frags[shinfo->nr_frags].page = NULL;
2895
2896                 cons_rx_pg->page = page;
2897                 dev_kfree_skb(skb);
2898         }
2899
2900         hw_prod = rxr->rx_pg_prod;
2901
2902         for (i = 0; i < count; i++) {
2903                 prod = RX_PG_RING_IDX(hw_prod);
2904
2905                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2906                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2907                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2908                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2909
2910                 if (prod != cons) {
2911                         prod_rx_pg->page = cons_rx_pg->page;
2912                         cons_rx_pg->page = NULL;
2913                         dma_unmap_addr_set(prod_rx_pg, mapping,
2914                                 dma_unmap_addr(cons_rx_pg, mapping));
2915
2916                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2917                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2918
2919                 }
2920                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2921                 hw_prod = NEXT_RX_BD(hw_prod);
2922         }
2923         rxr->rx_pg_prod = hw_prod;
2924         rxr->rx_pg_cons = cons;
2925 }
2926
2927 static inline void
2928 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2929                   struct sk_buff *skb, u16 cons, u16 prod)
2930 {
2931         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2932         struct rx_bd *cons_bd, *prod_bd;
2933
2934         cons_rx_buf = &rxr->rx_buf_ring[cons];
2935         prod_rx_buf = &rxr->rx_buf_ring[prod];
2936
2937         pci_dma_sync_single_for_device(bp->pdev,
2938                 dma_unmap_addr(cons_rx_buf, mapping),
2939                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2940
2941         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2942
2943         prod_rx_buf->skb = skb;
2944
2945         if (cons == prod)
2946                 return;
2947
2948         dma_unmap_addr_set(prod_rx_buf, mapping,
2949                         dma_unmap_addr(cons_rx_buf, mapping));
2950
2951         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2952         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2953         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2954         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2955 }
2956
2957 static int
2958 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2959             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2960             u32 ring_idx)
2961 {
2962         int err;
2963         u16 prod = ring_idx & 0xffff;
2964
2965         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2966         if (unlikely(err)) {
2967                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2968                 if (hdr_len) {
2969                         unsigned int raw_len = len + 4;
2970                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2971
2972                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2973                 }
2974                 return err;
2975         }
2976
2977         skb_reserve(skb, BNX2_RX_OFFSET);
2978         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2979                          PCI_DMA_FROMDEVICE);
2980
2981         if (hdr_len == 0) {
2982                 skb_put(skb, len);
2983                 return 0;
2984         } else {
2985                 unsigned int i, frag_len, frag_size, pages;
2986                 struct sw_pg *rx_pg;
2987                 u16 pg_cons = rxr->rx_pg_cons;
2988                 u16 pg_prod = rxr->rx_pg_prod;
2989
2990                 frag_size = len + 4 - hdr_len;
2991                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2992                 skb_put(skb, hdr_len);
2993
2994                 for (i = 0; i < pages; i++) {
2995                         dma_addr_t mapping_old;
2996
2997                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2998                         if (unlikely(frag_len <= 4)) {
2999                                 unsigned int tail = 4 - frag_len;
3000
3001                                 rxr->rx_pg_cons = pg_cons;
3002                                 rxr->rx_pg_prod = pg_prod;
3003                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3004                                                         pages - i);
3005                                 skb->len -= tail;
3006                                 if (i == 0) {
3007                                         skb->tail -= tail;
3008                                 } else {
3009                                         skb_frag_t *frag =
3010                                                 &skb_shinfo(skb)->frags[i - 1];
3011                                         frag->size -= tail;
3012                                         skb->data_len -= tail;
3013                                         skb->truesize -= tail;
3014                                 }
3015                                 return 0;
3016                         }
3017                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3018
3019                         /* Don't unmap yet.  If we're unable to allocate a new
3020                          * page, we need to recycle the page and the DMA addr.
3021                          */
3022                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3023                         if (i == pages - 1)
3024                                 frag_len -= 4;
3025
3026                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3027                         rx_pg->page = NULL;
3028
3029                         err = bnx2_alloc_rx_page(bp, rxr,
3030                                                  RX_PG_RING_IDX(pg_prod));
3031                         if (unlikely(err)) {
3032                                 rxr->rx_pg_cons = pg_cons;
3033                                 rxr->rx_pg_prod = pg_prod;
3034                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3035                                                         pages - i);
3036                                 return err;
3037                         }
3038
3039                         pci_unmap_page(bp->pdev, mapping_old,
3040                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3041
3042                         frag_size -= frag_len;
3043                         skb->data_len += frag_len;
3044                         skb->truesize += frag_len;
3045                         skb->len += frag_len;
3046
3047                         pg_prod = NEXT_RX_BD(pg_prod);
3048                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3049                 }
3050                 rxr->rx_pg_prod = pg_prod;
3051                 rxr->rx_pg_cons = pg_cons;
3052         }
3053         return 0;
3054 }
3055
3056 static inline u16
3057 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3058 {
3059         u16 cons;
3060
3061         /* Tell compiler that status block fields can change. */
3062         barrier();
3063         cons = *bnapi->hw_rx_cons_ptr;
3064         barrier();
3065         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3066                 cons++;
3067         return cons;
3068 }
3069
3070 static int
3071 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3072 {
3073         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3074         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3075         struct l2_fhdr *rx_hdr;
3076         int rx_pkt = 0, pg_ring_used = 0;
3077
3078         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3079         sw_cons = rxr->rx_cons;
3080         sw_prod = rxr->rx_prod;
3081
3082         /* Memory barrier necessary as speculative reads of the rx
3083          * buffer can be ahead of the index in the status block
3084          */
3085         rmb();
3086         while (sw_cons != hw_cons) {
3087                 unsigned int len, hdr_len;
3088                 u32 status;
3089                 struct sw_bd *rx_buf;
3090                 struct sk_buff *skb;
3091                 dma_addr_t dma_addr;
3092                 u16 vtag = 0;
3093                 int hw_vlan __maybe_unused = 0;
3094
3095                 sw_ring_cons = RX_RING_IDX(sw_cons);
3096                 sw_ring_prod = RX_RING_IDX(sw_prod);
3097
3098                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3099                 skb = rx_buf->skb;
3100
3101                 rx_buf->skb = NULL;
3102
3103                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3104
3105                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3106                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3107                         PCI_DMA_FROMDEVICE);
3108
3109                 rx_hdr = (struct l2_fhdr *) skb->data;
3110                 len = rx_hdr->l2_fhdr_pkt_len;
3111                 status = rx_hdr->l2_fhdr_status;
3112
3113                 hdr_len = 0;
3114                 if (status & L2_FHDR_STATUS_SPLIT) {
3115                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3116                         pg_ring_used = 1;
3117                 } else if (len > bp->rx_jumbo_thresh) {
3118                         hdr_len = bp->rx_jumbo_thresh;
3119                         pg_ring_used = 1;
3120                 }
3121
3122                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3123                                        L2_FHDR_ERRORS_PHY_DECODE |
3124                                        L2_FHDR_ERRORS_ALIGNMENT |
3125                                        L2_FHDR_ERRORS_TOO_SHORT |
3126                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3127
3128                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3129                                           sw_ring_prod);
3130                         if (pg_ring_used) {
3131                                 int pages;
3132
3133                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3134
3135                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3136                         }
3137                         goto next_rx;
3138                 }
3139
3140                 len -= 4;
3141
3142                 if (len <= bp->rx_copy_thresh) {
3143                         struct sk_buff *new_skb;
3144
3145                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3146                         if (new_skb == NULL) {
3147                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3148                                                   sw_ring_prod);
3149                                 goto next_rx;
3150                         }
3151
3152                         /* aligned copy */
3153                         skb_copy_from_linear_data_offset(skb,
3154                                                          BNX2_RX_OFFSET - 6,
3155                                       new_skb->data, len + 6);
3156                         skb_reserve(new_skb, 6);
3157                         skb_put(new_skb, len);
3158
3159                         bnx2_reuse_rx_skb(bp, rxr, skb,
3160                                 sw_ring_cons, sw_ring_prod);
3161
3162                         skb = new_skb;
3163                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3164                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3165                         goto next_rx;
3166
3167                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3168                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3169                         vtag = rx_hdr->l2_fhdr_vlan_tag;
3170 #ifdef BCM_VLAN
3171                         if (bp->vlgrp)
3172                                 hw_vlan = 1;
3173                         else
3174 #endif
3175                         {
3176                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3177                                         __skb_push(skb, 4);
3178
3179                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3180                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3181                                 ve->h_vlan_TCI = htons(vtag);
3182                                 len += 4;
3183                         }
3184                 }
3185
3186                 skb->protocol = eth_type_trans(skb, bp->dev);
3187
3188                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3189                         (ntohs(skb->protocol) != 0x8100)) {
3190
3191                         dev_kfree_skb(skb);
3192                         goto next_rx;
3193
3194                 }
3195
3196                 skb->ip_summed = CHECKSUM_NONE;
3197                 if (bp->rx_csum &&
3198                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3199                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3200
3201                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3202                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3203                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3204                 }
3205
3206                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3207
3208 #ifdef BCM_VLAN
3209                 if (hw_vlan)
3210                         vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
3211                 else
3212 #endif
3213                         napi_gro_receive(&bnapi->napi, skb);
3214
3215                 rx_pkt++;
3216
3217 next_rx:
3218                 sw_cons = NEXT_RX_BD(sw_cons);
3219                 sw_prod = NEXT_RX_BD(sw_prod);
3220
3221                 if ((rx_pkt == budget))
3222                         break;
3223
3224                 /* Refresh hw_cons to see if there is new work */
3225                 if (sw_cons == hw_cons) {
3226                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3227                         rmb();
3228                 }
3229         }
3230         rxr->rx_cons = sw_cons;
3231         rxr->rx_prod = sw_prod;
3232
3233         if (pg_ring_used)
3234                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3235
3236         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3237
3238         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3239
3240         mmiowb();
3241
3242         return rx_pkt;
3243
3244 }
3245
3246 /* MSI ISR - The only difference between this and the INTx ISR
3247  * is that the MSI interrupt is always serviced.
3248  */
3249 static irqreturn_t
3250 bnx2_msi(int irq, void *dev_instance)
3251 {
3252         struct bnx2_napi *bnapi = dev_instance;
3253         struct bnx2 *bp = bnapi->bp;
3254
3255         prefetch(bnapi->status_blk.msi);
3256         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3257                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3258                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3259
3260         /* Return here if interrupt is disabled. */
3261         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3262                 return IRQ_HANDLED;
3263
3264         napi_schedule(&bnapi->napi);
3265
3266         return IRQ_HANDLED;
3267 }
3268
3269 static irqreturn_t
3270 bnx2_msi_1shot(int irq, void *dev_instance)
3271 {
3272         struct bnx2_napi *bnapi = dev_instance;
3273         struct bnx2 *bp = bnapi->bp;
3274
3275         prefetch(bnapi->status_blk.msi);
3276
3277         /* Return here if interrupt is disabled. */
3278         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3279                 return IRQ_HANDLED;
3280
3281         napi_schedule(&bnapi->napi);
3282
3283         return IRQ_HANDLED;
3284 }
3285
3286 static irqreturn_t
3287 bnx2_interrupt(int irq, void *dev_instance)
3288 {
3289         struct bnx2_napi *bnapi = dev_instance;
3290         struct bnx2 *bp = bnapi->bp;
3291         struct status_block *sblk = bnapi->status_blk.msi;
3292
3293         /* When using INTx, it is possible for the interrupt to arrive
3294          * at the CPU before the status block posted prior to the
3295          * interrupt. Reading a register will flush the status block.
3296          * When using MSI, the MSI message will always complete after
3297          * the status block write.
3298          */
3299         if ((sblk->status_idx == bnapi->last_status_idx) &&
3300             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3301              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3302                 return IRQ_NONE;
3303
3304         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3305                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3306                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3307
3308         /* Read back to deassert IRQ immediately to avoid too many
3309          * spurious interrupts.
3310          */
3311         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3312
3313         /* Return here if interrupt is shared and is disabled. */
3314         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3315                 return IRQ_HANDLED;
3316
3317         if (napi_schedule_prep(&bnapi->napi)) {
3318                 bnapi->last_status_idx = sblk->status_idx;
3319                 __napi_schedule(&bnapi->napi);
3320         }
3321
3322         return IRQ_HANDLED;
3323 }
3324
3325 static inline int
3326 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3327 {
3328         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3329         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3330
3331         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3332             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3333                 return 1;
3334         return 0;
3335 }
3336
3337 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3338                                  STATUS_ATTN_BITS_TIMER_ABORT)
3339
3340 static inline int
3341 bnx2_has_work(struct bnx2_napi *bnapi)
3342 {
3343         struct status_block *sblk = bnapi->status_blk.msi;
3344
3345         if (bnx2_has_fast_work(bnapi))
3346                 return 1;
3347
3348 #ifdef BCM_CNIC
3349         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3350                 return 1;
3351 #endif
3352
3353         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3354             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3355                 return 1;
3356
3357         return 0;
3358 }
3359
3360 static void
3361 bnx2_chk_missed_msi(struct bnx2 *bp)
3362 {
3363         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3364         u32 msi_ctrl;
3365
3366         if (bnx2_has_work(bnapi)) {
3367                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3368                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3369                         return;
3370
3371                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3372                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3373                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3374                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3375                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3376                 }
3377         }
3378
3379         bp->idle_chk_status_idx = bnapi->last_status_idx;
3380 }
3381
3382 #ifdef BCM_CNIC
3383 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3384 {
3385         struct cnic_ops *c_ops;
3386
3387         if (!bnapi->cnic_present)
3388                 return;
3389
3390         rcu_read_lock();
3391         c_ops = rcu_dereference(bp->cnic_ops);
3392         if (c_ops)
3393                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3394                                                       bnapi->status_blk.msi);
3395         rcu_read_unlock();
3396 }
3397 #endif
3398
3399 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3400 {
3401         struct status_block *sblk = bnapi->status_blk.msi;
3402         u32 status_attn_bits = sblk->status_attn_bits;
3403         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3404
3405         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3406             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3407
3408                 bnx2_phy_int(bp, bnapi);
3409
3410                 /* This is needed to take care of transient status
3411                  * during link changes.
3412                  */
3413                 REG_WR(bp, BNX2_HC_COMMAND,
3414                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3415                 REG_RD(bp, BNX2_HC_COMMAND);
3416         }
3417 }
3418
3419 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3420                           int work_done, int budget)
3421 {
3422         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3423         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3424
3425         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3426                 bnx2_tx_int(bp, bnapi, 0);
3427
3428         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3429                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3430
3431         return work_done;
3432 }
3433
3434 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3435 {
3436         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3437         struct bnx2 *bp = bnapi->bp;
3438         int work_done = 0;
3439         struct status_block_msix *sblk = bnapi->status_blk.msix;
3440
3441         while (1) {
3442                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3443                 if (unlikely(work_done >= budget))
3444                         break;
3445
3446                 bnapi->last_status_idx = sblk->status_idx;
3447                 /* status idx must be read before checking for more work. */
3448                 rmb();
3449                 if (likely(!bnx2_has_fast_work(bnapi))) {
3450
3451                         napi_complete(napi);
3452                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3453                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3454                                bnapi->last_status_idx);
3455                         break;
3456                 }
3457         }
3458         return work_done;
3459 }
3460
3461 static int bnx2_poll(struct napi_struct *napi, int budget)
3462 {
3463         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3464         struct bnx2 *bp = bnapi->bp;
3465         int work_done = 0;
3466         struct status_block *sblk = bnapi->status_blk.msi;
3467
3468         while (1) {
3469                 bnx2_poll_link(bp, bnapi);
3470
3471                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3472
3473 #ifdef BCM_CNIC
3474                 bnx2_poll_cnic(bp, bnapi);
3475 #endif
3476
3477                 /* bnapi->last_status_idx is used below to tell the hw how
3478                  * much work has been processed, so we must read it before
3479                  * checking for more work.
3480                  */
3481                 bnapi->last_status_idx = sblk->status_idx;
3482
3483                 if (unlikely(work_done >= budget))
3484                         break;
3485
3486                 rmb();
3487                 if (likely(!bnx2_has_work(bnapi))) {
3488                         napi_complete(napi);
3489                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3490                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3491                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3492                                        bnapi->last_status_idx);
3493                                 break;
3494                         }
3495                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3496                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3497                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3498                                bnapi->last_status_idx);
3499
3500                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3501                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3502                                bnapi->last_status_idx);
3503                         break;
3504                 }
3505         }
3506
3507         return work_done;
3508 }
3509
3510 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3511  * from set_multicast.
3512  */
3513 static void
3514 bnx2_set_rx_mode(struct net_device *dev)
3515 {
3516         struct bnx2 *bp = netdev_priv(dev);
3517         u32 rx_mode, sort_mode;
3518         struct netdev_hw_addr *ha;
3519         int i;
3520
3521         if (!netif_running(dev))
3522                 return;
3523
3524         spin_lock_bh(&bp->phy_lock);
3525
3526         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3527                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3528         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3529 #ifdef BCM_VLAN
3530         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3531                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3532 #else
3533         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3534                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3535 #endif
3536         if (dev->flags & IFF_PROMISC) {
3537                 /* Promiscuous mode. */
3538                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3539                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3540                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3541         }
3542         else if (dev->flags & IFF_ALLMULTI) {
3543                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3544                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3545                                0xffffffff);
3546                 }
3547                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3548         }
3549         else {
3550                 /* Accept one or more multicast(s). */
3551                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3552                 u32 regidx;
3553                 u32 bit;
3554                 u32 crc;
3555
3556                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3557
3558                 netdev_for_each_mc_addr(ha, dev) {
3559                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3560                         bit = crc & 0xff;
3561                         regidx = (bit & 0xe0) >> 5;
3562                         bit &= 0x1f;
3563                         mc_filter[regidx] |= (1 << bit);
3564                 }
3565
3566                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3567                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3568                                mc_filter[i]);
3569                 }
3570
3571                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3572         }
3573
3574         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3575                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3576                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3577                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3578         } else if (!(dev->flags & IFF_PROMISC)) {
3579                 /* Add all entries into to the match filter list */
3580                 i = 0;
3581                 netdev_for_each_uc_addr(ha, dev) {
3582                         bnx2_set_mac_addr(bp, ha->addr,
3583                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3584                         sort_mode |= (1 <<
3585                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3586                         i++;
3587                 }
3588
3589         }
3590
3591         if (rx_mode != bp->rx_mode) {
3592                 bp->rx_mode = rx_mode;
3593                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3594         }
3595
3596         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3597         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3598         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3599
3600         spin_unlock_bh(&bp->phy_lock);
3601 }
3602
3603 static int __devinit
3604 check_fw_section(const struct firmware *fw,
3605                  const struct bnx2_fw_file_section *section,
3606                  u32 alignment, bool non_empty)
3607 {
3608         u32 offset = be32_to_cpu(section->offset);
3609         u32 len = be32_to_cpu(section->len);
3610
3611         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3612                 return -EINVAL;
3613         if ((non_empty && len == 0) || len > fw->size - offset ||
3614             len & (alignment - 1))
3615                 return -EINVAL;
3616         return 0;
3617 }
3618
3619 static int __devinit
3620 check_mips_fw_entry(const struct firmware *fw,
3621                     const struct bnx2_mips_fw_file_entry *entry)
3622 {
3623         if (check_fw_section(fw, &entry->text, 4, true) ||
3624             check_fw_section(fw, &entry->data, 4, false) ||
3625             check_fw_section(fw, &entry->rodata, 4, false))
3626                 return -EINVAL;
3627         return 0;
3628 }
3629
3630 static int __devinit
3631 bnx2_request_firmware(struct bnx2 *bp)
3632 {
3633         const char *mips_fw_file, *rv2p_fw_file;
3634         const struct bnx2_mips_fw_file *mips_fw;
3635         const struct bnx2_rv2p_fw_file *rv2p_fw;
3636         int rc;
3637
3638         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3639                 mips_fw_file = FW_MIPS_FILE_09;
3640                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3641                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3642                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3643                 else
3644                         rv2p_fw_file = FW_RV2P_FILE_09;
3645         } else {
3646                 mips_fw_file = FW_MIPS_FILE_06;
3647                 rv2p_fw_file = FW_RV2P_FILE_06;
3648         }
3649
3650         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3651         if (rc) {
3652                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3653                 return rc;
3654         }
3655
3656         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3657         if (rc) {
3658                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3659                 return rc;
3660         }
3661         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3662         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3663         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3664             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3665             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3666             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3667             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3668             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3669                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3670                 return -EINVAL;
3671         }
3672         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3673             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3674             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3675                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3676                 return -EINVAL;
3677         }
3678
3679         return 0;
3680 }
3681
3682 static u32
3683 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3684 {
3685         switch (idx) {
3686         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3687                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3688                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3689                 break;
3690         }
3691         return rv2p_code;
3692 }
3693
3694 static int
3695 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3696              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3697 {
3698         u32 rv2p_code_len, file_offset;
3699         __be32 *rv2p_code;
3700         int i;
3701         u32 val, cmd, addr;
3702
3703         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3704         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3705
3706         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3707
3708         if (rv2p_proc == RV2P_PROC1) {
3709                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3710                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3711         } else {
3712                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3713                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3714         }
3715
3716         for (i = 0; i < rv2p_code_len; i += 8) {
3717                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3718                 rv2p_code++;
3719                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3720                 rv2p_code++;
3721
3722                 val = (i / 8) | cmd;
3723                 REG_WR(bp, addr, val);
3724         }
3725
3726         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3727         for (i = 0; i < 8; i++) {
3728                 u32 loc, code;
3729
3730                 loc = be32_to_cpu(fw_entry->fixup[i]);
3731                 if (loc && ((loc * 4) < rv2p_code_len)) {
3732                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3733                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3734                         code = be32_to_cpu(*(rv2p_code + loc));
3735                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3736                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3737
3738                         val = (loc / 2) | cmd;
3739                         REG_WR(bp, addr, val);
3740                 }
3741         }
3742
3743         /* Reset the processor, un-stall is done later. */
3744         if (rv2p_proc == RV2P_PROC1) {
3745                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3746         }
3747         else {
3748                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3749         }
3750
3751         return 0;
3752 }
3753
3754 static int
3755 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3756             const struct bnx2_mips_fw_file_entry *fw_entry)
3757 {
3758         u32 addr, len, file_offset;
3759         __be32 *data;
3760         u32 offset;
3761         u32 val;
3762
3763         /* Halt the CPU. */
3764         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3765         val |= cpu_reg->mode_value_halt;
3766         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3767         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3768
3769         /* Load the Text area. */
3770         addr = be32_to_cpu(fw_entry->text.addr);
3771         len = be32_to_cpu(fw_entry->text.len);
3772         file_offset = be32_to_cpu(fw_entry->text.offset);
3773         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3774
3775         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3776         if (len) {
3777                 int j;
3778
3779                 for (j = 0; j < (len / 4); j++, offset += 4)
3780                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3781         }
3782
3783         /* Load the Data area. */
3784         addr = be32_to_cpu(fw_entry->data.addr);
3785         len = be32_to_cpu(fw_entry->data.len);
3786         file_offset = be32_to_cpu(fw_entry->data.offset);
3787         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3788
3789         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3790         if (len) {
3791                 int j;
3792
3793                 for (j = 0; j < (len / 4); j++, offset += 4)
3794                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3795         }
3796
3797         /* Load the Read-Only area. */
3798         addr = be32_to_cpu(fw_entry->rodata.addr);
3799         len = be32_to_cpu(fw_entry->rodata.len);
3800         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3801         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3802
3803         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3804         if (len) {
3805                 int j;
3806
3807                 for (j = 0; j < (len / 4); j++, offset += 4)
3808                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3809         }
3810
3811         /* Clear the pre-fetch instruction. */
3812         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3813
3814         val = be32_to_cpu(fw_entry->start_addr);
3815         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3816
3817         /* Start the CPU. */
3818         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3819         val &= ~cpu_reg->mode_value_halt;
3820         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3821         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3822
3823         return 0;
3824 }
3825
3826 static int
3827 bnx2_init_cpus(struct bnx2 *bp)
3828 {
3829         const struct bnx2_mips_fw_file *mips_fw =
3830                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3831         const struct bnx2_rv2p_fw_file *rv2p_fw =
3832                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3833         int rc;
3834
3835         /* Initialize the RV2P processor. */
3836         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3837         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3838
3839         /* Initialize the RX Processor. */
3840         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3841         if (rc)
3842                 goto init_cpu_err;
3843
3844         /* Initialize the TX Processor. */
3845         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3846         if (rc)
3847                 goto init_cpu_err;
3848
3849         /* Initialize the TX Patch-up Processor. */
3850         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3851         if (rc)
3852                 goto init_cpu_err;
3853
3854         /* Initialize the Completion Processor. */
3855         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3856         if (rc)
3857                 goto init_cpu_err;
3858
3859         /* Initialize the Command Processor. */
3860         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3861
3862 init_cpu_err:
3863         return rc;
3864 }
3865
3866 static int
3867 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3868 {
3869         u16 pmcsr;
3870
3871         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3872
3873         switch (state) {
3874         case PCI_D0: {
3875                 u32 val;
3876
3877                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3878                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3879                         PCI_PM_CTRL_PME_STATUS);
3880
3881                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3882                         /* delay required during transition out of D3hot */
3883                         msleep(20);
3884
3885                 val = REG_RD(bp, BNX2_EMAC_MODE);
3886                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3887                 val &= ~BNX2_EMAC_MODE_MPKT;
3888                 REG_WR(bp, BNX2_EMAC_MODE, val);
3889
3890                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3891                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3892                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3893                 break;
3894         }
3895         case PCI_D3hot: {
3896                 int i;
3897                 u32 val, wol_msg;
3898
3899                 if (bp->wol) {
3900                         u32 advertising;
3901                         u8 autoneg;
3902
3903                         autoneg = bp->autoneg;
3904                         advertising = bp->advertising;
3905
3906                         if (bp->phy_port == PORT_TP) {
3907                                 bp->autoneg = AUTONEG_SPEED;
3908                                 bp->advertising = ADVERTISED_10baseT_Half |
3909                                         ADVERTISED_10baseT_Full |
3910                                         ADVERTISED_100baseT_Half |
3911                                         ADVERTISED_100baseT_Full |
3912                                         ADVERTISED_Autoneg;
3913                         }
3914
3915                         spin_lock_bh(&bp->phy_lock);
3916                         bnx2_setup_phy(bp, bp->phy_port);
3917                         spin_unlock_bh(&bp->phy_lock);
3918
3919                         bp->autoneg = autoneg;
3920                         bp->advertising = advertising;
3921
3922                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3923
3924                         val = REG_RD(bp, BNX2_EMAC_MODE);
3925
3926                         /* Enable port mode. */
3927                         val &= ~BNX2_EMAC_MODE_PORT;
3928                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3929                                BNX2_EMAC_MODE_ACPI_RCVD |
3930                                BNX2_EMAC_MODE_MPKT;
3931                         if (bp->phy_port == PORT_TP)
3932                                 val |= BNX2_EMAC_MODE_PORT_MII;
3933                         else {
3934                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3935                                 if (bp->line_speed == SPEED_2500)
3936                                         val |= BNX2_EMAC_MODE_25G_MODE;
3937                         }
3938
3939                         REG_WR(bp, BNX2_EMAC_MODE, val);
3940
3941                         /* receive all multicast */
3942                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3943                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3944                                        0xffffffff);
3945                         }
3946                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3947                                BNX2_EMAC_RX_MODE_SORT_MODE);
3948
3949                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3950                               BNX2_RPM_SORT_USER0_MC_EN;
3951                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3952                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3953                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3954                                BNX2_RPM_SORT_USER0_ENA);
3955
3956                         /* Need to enable EMAC and RPM for WOL. */
3957                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3958                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3959                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3960                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3961
3962                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3963                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3964                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3965
3966                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3967                 }
3968                 else {
3969                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3970                 }
3971
3972                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3973                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3974                                      1, 0);
3975
3976                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3977                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3978                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3979
3980                         if (bp->wol)
3981                                 pmcsr |= 3;
3982                 }
3983                 else {
3984                         pmcsr |= 3;
3985                 }
3986                 if (bp->wol) {
3987                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3988                 }
3989                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3990                                       pmcsr);
3991
3992                 /* No more memory access after this point until
3993                  * device is brought back to D0.
3994                  */
3995                 udelay(50);
3996                 break;
3997         }
3998         default:
3999                 return -EINVAL;
4000         }
4001         return 0;
4002 }
4003
4004 static int
4005 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4006 {
4007         u32 val;
4008         int j;
4009
4010         /* Request access to the flash interface. */
4011         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4012         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4013                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4014                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4015                         break;
4016
4017                 udelay(5);
4018         }
4019
4020         if (j >= NVRAM_TIMEOUT_COUNT)
4021                 return -EBUSY;
4022
4023         return 0;
4024 }
4025
4026 static int
4027 bnx2_release_nvram_lock(struct bnx2 *bp)
4028 {
4029         int j;
4030         u32 val;
4031
4032         /* Relinquish nvram interface. */
4033         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4034
4035         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4036                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4037                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4038                         break;
4039
4040                 udelay(5);
4041         }
4042
4043         if (j >= NVRAM_TIMEOUT_COUNT)
4044                 return -EBUSY;
4045
4046         return 0;
4047 }
4048
4049
4050 static int
4051 bnx2_enable_nvram_write(struct bnx2 *bp)
4052 {
4053         u32 val;
4054
4055         val = REG_RD(bp, BNX2_MISC_CFG);
4056         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4057
4058         if (bp->flash_info->flags & BNX2_NV_WREN) {
4059                 int j;
4060
4061                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4062                 REG_WR(bp, BNX2_NVM_COMMAND,
4063                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4064
4065                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4066                         udelay(5);
4067
4068                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4069                         if (val & BNX2_NVM_COMMAND_DONE)
4070                                 break;
4071                 }
4072
4073                 if (j >= NVRAM_TIMEOUT_COUNT)
4074                         return -EBUSY;
4075         }
4076         return 0;
4077 }
4078
4079 static void
4080 bnx2_disable_nvram_write(struct bnx2 *bp)
4081 {
4082         u32 val;
4083
4084         val = REG_RD(bp, BNX2_MISC_CFG);
4085         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4086 }
4087
4088
4089 static void
4090 bnx2_enable_nvram_access(struct bnx2 *bp)
4091 {
4092         u32 val;
4093
4094         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4095         /* Enable both bits, even on read. */
4096         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4097                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4098 }
4099
4100 static void
4101 bnx2_disable_nvram_access(struct bnx2 *bp)
4102 {
4103         u32 val;
4104
4105         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4106         /* Disable both bits, even after read. */
4107         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4108                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4109                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4110 }
4111
4112 static int
4113 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4114 {
4115         u32 cmd;
4116         int j;
4117
4118         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4119                 /* Buffered flash, no erase needed */
4120                 return 0;
4121
4122         /* Build an erase command */
4123         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4124               BNX2_NVM_COMMAND_DOIT;
4125
4126         /* Need to clear DONE bit separately. */
4127         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4128
4129         /* Address of the NVRAM to read from. */
4130         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4131
4132         /* Issue an erase command. */
4133         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4134
4135         /* Wait for completion. */
4136         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4137                 u32 val;
4138
4139                 udelay(5);
4140
4141                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4142                 if (val & BNX2_NVM_COMMAND_DONE)
4143                         break;
4144         }
4145
4146         if (j >= NVRAM_TIMEOUT_COUNT)
4147                 return -EBUSY;
4148
4149         return 0;
4150 }
4151
4152 static int
4153 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4154 {
4155         u32 cmd;
4156         int j;
4157
4158         /* Build the command word. */
4159         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4160
4161         /* Calculate an offset of a buffered flash, not needed for 5709. */
4162         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4163                 offset = ((offset / bp->flash_info->page_size) <<
4164                            bp->flash_info->page_bits) +
4165                           (offset % bp->flash_info->page_size);
4166         }
4167
4168         /* Need to clear DONE bit separately. */
4169         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4170
4171         /* Address of the NVRAM to read from. */
4172         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4173
4174         /* Issue a read command. */
4175         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4176
4177         /* Wait for completion. */
4178         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4179                 u32 val;
4180
4181                 udelay(5);
4182
4183                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4184                 if (val & BNX2_NVM_COMMAND_DONE) {
4185                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4186                         memcpy(ret_val, &v, 4);
4187                         break;
4188                 }
4189         }
4190         if (j >= NVRAM_TIMEOUT_COUNT)
4191                 return -EBUSY;
4192
4193         return 0;
4194 }
4195
4196
4197 static int
4198 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4199 {
4200         u32 cmd;
4201         __be32 val32;
4202         int j;
4203
4204         /* Build the command word. */
4205         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4206
4207         /* Calculate an offset of a buffered flash, not needed for 5709. */
4208         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4209                 offset = ((offset / bp->flash_info->page_size) <<
4210                           bp->flash_info->page_bits) +
4211                          (offset % bp->flash_info->page_size);
4212         }
4213
4214         /* Need to clear DONE bit separately. */
4215         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4216
4217         memcpy(&val32, val, 4);
4218
4219         /* Write the data. */
4220         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4221
4222         /* Address of the NVRAM to write to. */
4223         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4224
4225         /* Issue the write command. */
4226         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4227
4228         /* Wait for completion. */
4229         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4230                 udelay(5);
4231
4232                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4233                         break;
4234         }
4235         if (j >= NVRAM_TIMEOUT_COUNT)
4236                 return -EBUSY;
4237
4238         return 0;
4239 }
4240
4241 static int
4242 bnx2_init_nvram(struct bnx2 *bp)
4243 {
4244         u32 val;
4245         int j, entry_count, rc = 0;
4246         const struct flash_spec *flash;
4247
4248         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4249                 bp->flash_info = &flash_5709;
4250                 goto get_flash_size;
4251         }
4252
4253         /* Determine the selected interface. */
4254         val = REG_RD(bp, BNX2_NVM_CFG1);
4255
4256         entry_count = ARRAY_SIZE(flash_table);
4257
4258         if (val & 0x40000000) {
4259
4260                 /* Flash interface has been reconfigured */
4261                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4262                      j++, flash++) {
4263                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4264                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4265                                 bp->flash_info = flash;
4266                                 break;
4267                         }
4268                 }
4269         }
4270         else {
4271                 u32 mask;
4272                 /* Not yet been reconfigured */
4273
4274                 if (val & (1 << 23))
4275                         mask = FLASH_BACKUP_STRAP_MASK;
4276                 else
4277                         mask = FLASH_STRAP_MASK;
4278
4279                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4280                         j++, flash++) {
4281
4282                         if ((val & mask) == (flash->strapping & mask)) {
4283                                 bp->flash_info = flash;
4284
4285                                 /* Request access to the flash interface. */
4286                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4287                                         return rc;
4288
4289                                 /* Enable access to flash interface */
4290                                 bnx2_enable_nvram_access(bp);
4291
4292                                 /* Reconfigure the flash interface */
4293                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4294                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4295                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4296                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4297
4298                                 /* Disable access to flash interface */
4299                                 bnx2_disable_nvram_access(bp);
4300                                 bnx2_release_nvram_lock(bp);
4301
4302                                 break;
4303                         }
4304                 }
4305         } /* if (val & 0x40000000) */
4306
4307         if (j == entry_count) {
4308                 bp->flash_info = NULL;
4309                 pr_alert("Unknown flash/EEPROM type\n");
4310                 return -ENODEV;
4311         }
4312
4313 get_flash_size:
4314         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4315         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4316         if (val)
4317                 bp->flash_size = val;
4318         else
4319                 bp->flash_size = bp->flash_info->total_size;
4320
4321         return rc;
4322 }
4323
4324 static int
4325 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4326                 int buf_size)
4327 {
4328         int rc = 0;
4329         u32 cmd_flags, offset32, len32, extra;
4330
4331         if (buf_size == 0)
4332                 return 0;
4333
4334         /* Request access to the flash interface. */
4335         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4336                 return rc;
4337
4338         /* Enable access to flash interface */
4339         bnx2_enable_nvram_access(bp);
4340
4341         len32 = buf_size;
4342         offset32 = offset;
4343         extra = 0;
4344
4345         cmd_flags = 0;
4346
4347         if (offset32 & 3) {
4348                 u8 buf[4];
4349                 u32 pre_len;
4350
4351                 offset32 &= ~3;
4352                 pre_len = 4 - (offset & 3);
4353
4354                 if (pre_len >= len32) {
4355                         pre_len = len32;
4356                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4357                                     BNX2_NVM_COMMAND_LAST;
4358                 }
4359                 else {
4360                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4361                 }
4362
4363                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4364
4365                 if (rc)
4366                         return rc;
4367
4368                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4369
4370                 offset32 += 4;
4371                 ret_buf += pre_len;
4372                 len32 -= pre_len;
4373         }
4374         if (len32 & 3) {
4375                 extra = 4 - (len32 & 3);
4376                 len32 = (len32 + 4) & ~3;
4377         }
4378
4379         if (len32 == 4) {
4380                 u8 buf[4];
4381
4382                 if (cmd_flags)
4383                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4384                 else
4385                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4386                                     BNX2_NVM_COMMAND_LAST;
4387
4388                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4389
4390                 memcpy(ret_buf, buf, 4 - extra);
4391         }
4392         else if (len32 > 0) {
4393                 u8 buf[4];
4394
4395                 /* Read the first word. */
4396                 if (cmd_flags)
4397                         cmd_flags = 0;
4398                 else
4399                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4400
4401                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4402
4403                 /* Advance to the next dword. */
4404                 offset32 += 4;
4405                 ret_buf += 4;
4406                 len32 -= 4;
4407
4408                 while (len32 > 4 && rc == 0) {
4409                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4410
4411                         /* Advance to the next dword. */
4412                         offset32 += 4;
4413                         ret_buf += 4;
4414                         len32 -= 4;
4415                 }
4416
4417                 if (rc)
4418                         return rc;
4419
4420                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4421                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4422
4423                 memcpy(ret_buf, buf, 4 - extra);
4424         }
4425
4426         /* Disable access to flash interface */
4427         bnx2_disable_nvram_access(bp);
4428
4429         bnx2_release_nvram_lock(bp);
4430
4431         return rc;
4432 }
4433
4434 static int
4435 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4436                 int buf_size)
4437 {
4438         u32 written, offset32, len32;
4439         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4440         int rc = 0;
4441         int align_start, align_end;
4442
4443         buf = data_buf;
4444         offset32 = offset;
4445         len32 = buf_size;
4446         align_start = align_end = 0;
4447
4448         if ((align_start = (offset32 & 3))) {
4449                 offset32 &= ~3;
4450                 len32 += align_start;
4451                 if (len32 < 4)
4452                         len32 = 4;
4453                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4454                         return rc;
4455         }
4456
4457         if (len32 & 3) {
4458                 align_end = 4 - (len32 & 3);
4459                 len32 += align_end;
4460                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4461                         return rc;
4462         }
4463
4464         if (align_start || align_end) {
4465                 align_buf = kmalloc(len32, GFP_KERNEL);
4466                 if (align_buf == NULL)
4467                         return -ENOMEM;
4468                 if (align_start) {
4469                         memcpy(align_buf, start, 4);
4470                 }
4471                 if (align_end) {
4472                         memcpy(align_buf + len32 - 4, end, 4);
4473                 }
4474                 memcpy(align_buf + align_start, data_buf, buf_size);
4475                 buf = align_buf;
4476         }
4477
4478         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4479                 flash_buffer = kmalloc(264, GFP_KERNEL);
4480                 if (flash_buffer == NULL) {
4481                         rc = -ENOMEM;
4482                         goto nvram_write_end;
4483                 }
4484         }
4485
4486         written = 0;
4487         while ((written < len32) && (rc == 0)) {
4488                 u32 page_start, page_end, data_start, data_end;
4489                 u32 addr, cmd_flags;
4490                 int i;
4491
4492                 /* Find the page_start addr */
4493                 page_start = offset32 + written;
4494                 page_start -= (page_start % bp->flash_info->page_size);
4495                 /* Find the page_end addr */
4496                 page_end = page_start + bp->flash_info->page_size;
4497                 /* Find the data_start addr */
4498                 data_start = (written == 0) ? offset32 : page_start;
4499                 /* Find the data_end addr */
4500                 data_end = (page_end > offset32 + len32) ?
4501                         (offset32 + len32) : page_end;
4502
4503                 /* Request access to the flash interface. */
4504                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4505                         goto nvram_write_end;
4506
4507                 /* Enable access to flash interface */
4508                 bnx2_enable_nvram_access(bp);
4509
4510                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4511                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4512                         int j;
4513
4514                         /* Read the whole page into the buffer
4515                          * (non-buffer flash only) */
4516                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4517                                 if (j == (bp->flash_info->page_size - 4)) {
4518                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4519                                 }
4520                                 rc = bnx2_nvram_read_dword(bp,
4521                                         page_start + j,
4522                                         &flash_buffer[j],
4523                                         cmd_flags);
4524
4525                                 if (rc)
4526                                         goto nvram_write_end;
4527
4528                                 cmd_flags = 0;
4529                         }
4530                 }
4531
4532                 /* Enable writes to flash interface (unlock write-protect) */
4533                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4534                         goto nvram_write_end;
4535
4536                 /* Loop to write back the buffer data from page_start to
4537                  * data_start */
4538                 i = 0;
4539                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4540                         /* Erase the page */
4541                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4542                                 goto nvram_write_end;
4543
4544                         /* Re-enable the write again for the actual write */
4545                         bnx2_enable_nvram_write(bp);
4546
4547                         for (addr = page_start; addr < data_start;
4548                                 addr += 4, i += 4) {
4549
4550                                 rc = bnx2_nvram_write_dword(bp, addr,
4551                                         &flash_buffer[i], cmd_flags);
4552
4553                                 if (rc != 0)
4554                                         goto nvram_write_end;
4555
4556                                 cmd_flags = 0;
4557                         }
4558                 }
4559
4560                 /* Loop to write the new data from data_start to data_end */
4561                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4562                         if ((addr == page_end - 4) ||
4563                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4564                                  (addr == data_end - 4))) {
4565
4566                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4567                         }
4568                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4569                                 cmd_flags);
4570
4571                         if (rc != 0)
4572                                 goto nvram_write_end;
4573
4574                         cmd_flags = 0;
4575                         buf += 4;
4576                 }
4577
4578                 /* Loop to write back the buffer data from data_end
4579                  * to page_end */
4580                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4581                         for (addr = data_end; addr < page_end;
4582                                 addr += 4, i += 4) {
4583
4584                                 if (addr == page_end-4) {
4585                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4586                                 }
4587                                 rc = bnx2_nvram_write_dword(bp, addr,
4588                                         &flash_buffer[i], cmd_flags);
4589
4590                                 if (rc != 0)
4591                                         goto nvram_write_end;
4592
4593                                 cmd_flags = 0;
4594                         }
4595                 }
4596
4597                 /* Disable writes to flash interface (lock write-protect) */
4598                 bnx2_disable_nvram_write(bp);
4599
4600                 /* Disable access to flash interface */
4601                 bnx2_disable_nvram_access(bp);
4602                 bnx2_release_nvram_lock(bp);
4603
4604                 /* Increment written */
4605                 written += data_end - data_start;
4606         }
4607
4608 nvram_write_end:
4609         kfree(flash_buffer);
4610         kfree(align_buf);
4611         return rc;
4612 }
4613
4614 static void
4615 bnx2_init_fw_cap(struct bnx2 *bp)
4616 {
4617         u32 val, sig = 0;
4618
4619         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4620         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4621
4622         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4623                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4624
4625         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4626         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4627                 return;
4628
4629         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4630                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4631                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4632         }
4633
4634         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4635             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4636                 u32 link;
4637
4638                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4639
4640                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4641                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4642                         bp->phy_port = PORT_FIBRE;
4643                 else
4644                         bp->phy_port = PORT_TP;
4645
4646                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4647                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4648         }
4649
4650         if (netif_running(bp->dev) && sig)
4651                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4652 }
4653
4654 static void
4655 bnx2_setup_msix_tbl(struct bnx2 *bp)
4656 {
4657         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4658
4659         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4660         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4661 }
4662
4663 static int
4664 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4665 {
4666         u32 val;
4667         int i, rc = 0;
4668         u8 old_port;
4669
4670         /* Wait for the current PCI transaction to complete before
4671          * issuing a reset. */
4672         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4673                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4674                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4675                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4676                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4677         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4678         udelay(5);
4679
4680         /* Wait for the firmware to tell us it is ok to issue a reset. */
4681         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4682
4683         /* Deposit a driver reset signature so the firmware knows that
4684          * this is a soft reset. */
4685         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4686                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4687
4688         /* Do a dummy read to force the chip to complete all current transaction
4689          * before we issue a reset. */
4690         val = REG_RD(bp, BNX2_MISC_ID);
4691
4692         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4693                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4694                 REG_RD(bp, BNX2_MISC_COMMAND);
4695                 udelay(5);
4696
4697                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4698                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4699
4700                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4701
4702         } else {
4703                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4704                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4705                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4706
4707                 /* Chip reset. */
4708                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4709
4710                 /* Reading back any register after chip reset will hang the
4711                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4712                  * of margin for write posting.
4713                  */
4714                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4715                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4716                         msleep(20);
4717
4718                 /* Reset takes approximate 30 usec */
4719                 for (i = 0; i < 10; i++) {
4720                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4721                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4722                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4723                                 break;
4724                         udelay(10);
4725                 }
4726
4727                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4728                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4729                         pr_err("Chip reset did not complete\n");
4730                         return -EBUSY;
4731                 }
4732         }
4733
4734         /* Make sure byte swapping is properly configured. */
4735         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4736         if (val != 0x01020304) {
4737                 pr_err("Chip not in correct endian mode\n");
4738                 return -ENODEV;
4739         }
4740
4741         /* Wait for the firmware to finish its initialization. */
4742         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4743         if (rc)
4744                 return rc;
4745
4746         spin_lock_bh(&bp->phy_lock);
4747         old_port = bp->phy_port;
4748         bnx2_init_fw_cap(bp);
4749         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4750             old_port != bp->phy_port)
4751                 bnx2_set_default_remote_link(bp);
4752         spin_unlock_bh(&bp->phy_lock);
4753
4754         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4755                 /* Adjust the voltage regular to two steps lower.  The default
4756                  * of this register is 0x0000000e. */
4757                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4758
4759                 /* Remove bad rbuf memory from the free pool. */
4760                 rc = bnx2_alloc_bad_rbuf(bp);
4761         }
4762
4763         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4764                 bnx2_setup_msix_tbl(bp);
4765                 /* Prevent MSIX table reads and write from timing out */
4766                 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4767                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4768         }
4769
4770         return rc;
4771 }
4772
4773 static int
4774 bnx2_init_chip(struct bnx2 *bp)
4775 {
4776         u32 val, mtu;
4777         int rc, i;
4778
4779         /* Make sure the interrupt is not active. */
4780         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4781
4782         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4783               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4784 #ifdef __BIG_ENDIAN
4785               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4786 #endif
4787               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4788               DMA_READ_CHANS << 12 |
4789               DMA_WRITE_CHANS << 16;
4790
4791         val |= (0x2 << 20) | (1 << 11);
4792
4793         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4794                 val |= (1 << 23);
4795
4796         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4797             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4798                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4799
4800         REG_WR(bp, BNX2_DMA_CONFIG, val);
4801
4802         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4803                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4804                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4805                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4806         }
4807
4808         if (bp->flags & BNX2_FLAG_PCIX) {
4809                 u16 val16;
4810
4811                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4812                                      &val16);
4813                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4814                                       val16 & ~PCI_X_CMD_ERO);
4815         }
4816
4817         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4818                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4819                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4820                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4821
4822         /* Initialize context mapping and zero out the quick contexts.  The
4823          * context block must have already been enabled. */
4824         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4825                 rc = bnx2_init_5709_context(bp);
4826                 if (rc)
4827                         return rc;
4828         } else
4829                 bnx2_init_context(bp);
4830
4831         if ((rc = bnx2_init_cpus(bp)) != 0)
4832                 return rc;
4833
4834         bnx2_init_nvram(bp);
4835
4836         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4837
4838         val = REG_RD(bp, BNX2_MQ_CONFIG);
4839         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4840         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4841         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4842                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4843                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4844                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4845         }
4846
4847         REG_WR(bp, BNX2_MQ_CONFIG, val);
4848
4849         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4850         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4851         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4852
4853         val = (BCM_PAGE_BITS - 8) << 24;
4854         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4855
4856         /* Configure page size. */
4857         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4858         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4859         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4860         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4861
4862         val = bp->mac_addr[0] +
4863               (bp->mac_addr[1] << 8) +
4864               (bp->mac_addr[2] << 16) +
4865               bp->mac_addr[3] +
4866               (bp->mac_addr[4] << 8) +
4867               (bp->mac_addr[5] << 16);
4868         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4869
4870         /* Program the MTU.  Also include 4 bytes for CRC32. */
4871         mtu = bp->dev->mtu;
4872         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4873         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4874                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4875         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4876
4877         if (mtu < 1500)
4878                 mtu = 1500;
4879
4880         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4881         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4882         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4883
4884         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4885         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4886                 bp->bnx2_napi[i].last_status_idx = 0;
4887
4888         bp->idle_chk_status_idx = 0xffff;
4889
4890         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4891
4892         /* Set up how to generate a link change interrupt. */
4893         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4894
4895         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4896                (u64) bp->status_blk_mapping & 0xffffffff);
4897         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4898
4899         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4900                (u64) bp->stats_blk_mapping & 0xffffffff);
4901         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4902                (u64) bp->stats_blk_mapping >> 32);
4903
4904         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4905                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4906
4907         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4908                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4909
4910         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4911                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4912
4913         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4914
4915         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4916
4917         REG_WR(bp, BNX2_HC_COM_TICKS,
4918                (bp->com_ticks_int << 16) | bp->com_ticks);
4919
4920         REG_WR(bp, BNX2_HC_CMD_TICKS,
4921                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4922
4923         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4924                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4925         else
4926                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4927         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4928
4929         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4930                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4931         else {
4932                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4933                       BNX2_HC_CONFIG_COLLECT_STATS;
4934         }
4935
4936         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4937                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4938                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4939
4940                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4941         }
4942
4943         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4944                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4945
4946         REG_WR(bp, BNX2_HC_CONFIG, val);
4947
4948         for (i = 1; i < bp->irq_nvecs; i++) {
4949                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4950                            BNX2_HC_SB_CONFIG_1;
4951
4952                 REG_WR(bp, base,
4953                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4954                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4955                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4956
4957                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4958                         (bp->tx_quick_cons_trip_int << 16) |
4959                          bp->tx_quick_cons_trip);
4960
4961                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4962                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4963
4964                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4965                        (bp->rx_quick_cons_trip_int << 16) |
4966                         bp->rx_quick_cons_trip);
4967
4968                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4969                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4970         }
4971
4972         /* Clear internal stats counters. */
4973         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4974
4975         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4976
4977         /* Initialize the receive filter. */
4978         bnx2_set_rx_mode(bp->dev);
4979
4980         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4981                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4982                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4983                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4984         }
4985         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4986                           1, 0);
4987
4988         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4989         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4990
4991         udelay(20);
4992
4993         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4994
4995         return rc;
4996 }
4997
4998 static void
4999 bnx2_clear_ring_states(struct bnx2 *bp)
5000 {
5001         struct bnx2_napi *bnapi;
5002         struct bnx2_tx_ring_info *txr;
5003         struct bnx2_rx_ring_info *rxr;
5004         int i;
5005
5006         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5007                 bnapi = &bp->bnx2_napi[i];
5008                 txr = &bnapi->tx_ring;
5009                 rxr = &bnapi->rx_ring;
5010
5011                 txr->tx_cons = 0;
5012                 txr->hw_tx_cons = 0;
5013                 rxr->rx_prod_bseq = 0;
5014                 rxr->rx_prod = 0;
5015                 rxr->rx_cons = 0;
5016                 rxr->rx_pg_prod = 0;
5017                 rxr->rx_pg_cons = 0;
5018         }
5019 }
5020
5021 static void
5022 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5023 {
5024         u32 val, offset0, offset1, offset2, offset3;
5025         u32 cid_addr = GET_CID_ADDR(cid);
5026
5027         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5028                 offset0 = BNX2_L2CTX_TYPE_XI;
5029                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5030                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5031                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5032         } else {
5033                 offset0 = BNX2_L2CTX_TYPE;
5034                 offset1 = BNX2_L2CTX_CMD_TYPE;
5035                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5036                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5037         }
5038         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5039         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5040
5041         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5042         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5043
5044         val = (u64) txr->tx_desc_mapping >> 32;
5045         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5046
5047         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5048         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5049 }
5050
5051 static void
5052 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5053 {
5054         struct tx_bd *txbd;
5055         u32 cid = TX_CID;
5056         struct bnx2_napi *bnapi;
5057         struct bnx2_tx_ring_info *txr;
5058
5059         bnapi = &bp->bnx2_napi[ring_num];
5060         txr = &bnapi->tx_ring;
5061
5062         if (ring_num == 0)
5063                 cid = TX_CID;
5064         else
5065                 cid = TX_TSS_CID + ring_num - 1;
5066
5067         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5068
5069         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5070
5071         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5072         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5073
5074         txr->tx_prod = 0;
5075         txr->tx_prod_bseq = 0;
5076
5077         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5078         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5079
5080         bnx2_init_tx_context(bp, cid, txr);
5081 }
5082
5083 static void
5084 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5085                      int num_rings)
5086 {
5087         int i;
5088         struct rx_bd *rxbd;
5089
5090         for (i = 0; i < num_rings; i++) {
5091                 int j;
5092
5093                 rxbd = &rx_ring[i][0];
5094                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5095                         rxbd->rx_bd_len = buf_size;
5096                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5097                 }
5098                 if (i == (num_rings - 1))
5099                         j = 0;
5100                 else
5101                         j = i + 1;
5102                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5103                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5104         }
5105 }
5106
5107 static void
5108 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5109 {
5110         int i;
5111         u16 prod, ring_prod;
5112         u32 cid, rx_cid_addr, val;
5113         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5114         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5115
5116         if (ring_num == 0)
5117                 cid = RX_CID;
5118         else
5119                 cid = RX_RSS_CID + ring_num - 1;
5120
5121         rx_cid_addr = GET_CID_ADDR(cid);
5122
5123         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5124                              bp->rx_buf_use_size, bp->rx_max_ring);
5125
5126         bnx2_init_rx_context(bp, cid);
5127
5128         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5129                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5130                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5131         }
5132
5133         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5134         if (bp->rx_pg_ring_size) {
5135                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5136                                      rxr->rx_pg_desc_mapping,
5137                                      PAGE_SIZE, bp->rx_max_pg_ring);
5138                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5139                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5140                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5141                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5142
5143                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5144                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5145
5146                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5147                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5148
5149                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5150                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5151         }
5152
5153         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5154         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5155
5156         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5157         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5158
5159         ring_prod = prod = rxr->rx_pg_prod;
5160         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5161                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5162                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5163                                     ring_num, i, bp->rx_pg_ring_size);
5164                         break;
5165                 }
5166                 prod = NEXT_RX_BD(prod);
5167                 ring_prod = RX_PG_RING_IDX(prod);
5168         }
5169         rxr->rx_pg_prod = prod;
5170
5171         ring_prod = prod = rxr->rx_prod;
5172         for (i = 0; i < bp->rx_ring_size; i++) {
5173                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5174                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5175                                     ring_num, i, bp->rx_ring_size);
5176                         break;
5177                 }
5178                 prod = NEXT_RX_BD(prod);
5179                 ring_prod = RX_RING_IDX(prod);
5180         }
5181         rxr->rx_prod = prod;
5182
5183         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5184         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5185         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5186
5187         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5188         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5189
5190         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5191 }
5192
5193 static void
5194 bnx2_init_all_rings(struct bnx2 *bp)
5195 {
5196         int i;
5197         u32 val;
5198
5199         bnx2_clear_ring_states(bp);
5200
5201         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5202         for (i = 0; i < bp->num_tx_rings; i++)
5203                 bnx2_init_tx_ring(bp, i);
5204
5205         if (bp->num_tx_rings > 1)
5206                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5207                        (TX_TSS_CID << 7));
5208
5209         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5210         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5211
5212         for (i = 0; i < bp->num_rx_rings; i++)
5213                 bnx2_init_rx_ring(bp, i);
5214
5215         if (bp->num_rx_rings > 1) {
5216                 u32 tbl_32;
5217                 u8 *tbl = (u8 *) &tbl_32;
5218
5219                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5220                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5221
5222                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5223                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5224                         if ((i % 4) == 3)
5225                                 bnx2_reg_wr_ind(bp,
5226                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5227                                                 cpu_to_be32(tbl_32));
5228                 }
5229
5230                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5231                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5232
5233                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5234
5235         }
5236 }
5237
5238 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5239 {
5240         u32 max, num_rings = 1;
5241
5242         while (ring_size > MAX_RX_DESC_CNT) {
5243                 ring_size -= MAX_RX_DESC_CNT;
5244                 num_rings++;
5245         }
5246         /* round to next power of 2 */
5247         max = max_size;
5248         while ((max & num_rings) == 0)
5249                 max >>= 1;
5250
5251         if (num_rings != max)
5252                 max <<= 1;
5253
5254         return max;
5255 }
5256
5257 static void
5258 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5259 {
5260         u32 rx_size, rx_space, jumbo_size;
5261
5262         /* 8 for CRC and VLAN */
5263         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5264
5265         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5266                 sizeof(struct skb_shared_info);
5267
5268         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5269         bp->rx_pg_ring_size = 0;
5270         bp->rx_max_pg_ring = 0;
5271         bp->rx_max_pg_ring_idx = 0;
5272         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5273                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5274
5275                 jumbo_size = size * pages;
5276                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5277                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5278
5279                 bp->rx_pg_ring_size = jumbo_size;
5280                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5281                                                         MAX_RX_PG_RINGS);
5282                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5283                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5284                 bp->rx_copy_thresh = 0;
5285         }
5286
5287         bp->rx_buf_use_size = rx_size;
5288         /* hw alignment */
5289         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5290         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5291         bp->rx_ring_size = size;
5292         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5293         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5294 }
5295
5296 static void
5297 bnx2_free_tx_skbs(struct bnx2 *bp)
5298 {
5299         int i;
5300
5301         for (i = 0; i < bp->num_tx_rings; i++) {
5302                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5303                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5304                 int j;
5305
5306                 if (txr->tx_buf_ring == NULL)
5307                         continue;
5308
5309                 for (j = 0; j < TX_DESC_CNT; ) {
5310                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5311                         struct sk_buff *skb = tx_buf->skb;
5312                         int k, last;
5313
5314                         if (skb == NULL) {
5315                                 j++;
5316                                 continue;
5317                         }
5318
5319                         pci_unmap_single(bp->pdev,
5320                                          dma_unmap_addr(tx_buf, mapping),
5321                                          skb_headlen(skb),
5322                                          PCI_DMA_TODEVICE);
5323
5324                         tx_buf->skb = NULL;
5325
5326                         last = tx_buf->nr_frags;
5327                         j++;
5328                         for (k = 0; k < last; k++, j++) {
5329                                 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5330                                 pci_unmap_page(bp->pdev,
5331                                         dma_unmap_addr(tx_buf, mapping),
5332                                         skb_shinfo(skb)->frags[k].size,
5333                                         PCI_DMA_TODEVICE);
5334                         }
5335                         dev_kfree_skb(skb);
5336                 }
5337         }
5338 }
5339
5340 static void
5341 bnx2_free_rx_skbs(struct bnx2 *bp)
5342 {
5343         int i;
5344
5345         for (i = 0; i < bp->num_rx_rings; i++) {
5346                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5347                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5348                 int j;
5349
5350                 if (rxr->rx_buf_ring == NULL)
5351                         return;
5352
5353                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5354                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5355                         struct sk_buff *skb = rx_buf->skb;
5356
5357                         if (skb == NULL)
5358                                 continue;
5359
5360                         pci_unmap_single(bp->pdev,
5361                                          dma_unmap_addr(rx_buf, mapping),
5362                                          bp->rx_buf_use_size,
5363                                          PCI_DMA_FROMDEVICE);
5364
5365                         rx_buf->skb = NULL;
5366
5367                         dev_kfree_skb(skb);
5368                 }
5369                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5370                         bnx2_free_rx_page(bp, rxr, j);
5371         }
5372 }
5373
5374 static void
5375 bnx2_free_skbs(struct bnx2 *bp)
5376 {
5377         bnx2_free_tx_skbs(bp);
5378         bnx2_free_rx_skbs(bp);
5379 }
5380
5381 static int
5382 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5383 {
5384         int rc;
5385
5386         rc = bnx2_reset_chip(bp, reset_code);
5387         bnx2_free_skbs(bp);
5388         if (rc)
5389                 return rc;
5390
5391         if ((rc = bnx2_init_chip(bp)) != 0)
5392                 return rc;
5393
5394         bnx2_init_all_rings(bp);
5395         return 0;
5396 }
5397
5398 static int
5399 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5400 {
5401         int rc;
5402
5403         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5404                 return rc;
5405
5406         spin_lock_bh(&bp->phy_lock);
5407         bnx2_init_phy(bp, reset_phy);
5408         bnx2_set_link(bp);
5409         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5410                 bnx2_remote_phy_event(bp);
5411         spin_unlock_bh(&bp->phy_lock);
5412         return 0;
5413 }
5414
5415 static int
5416 bnx2_shutdown_chip(struct bnx2 *bp)
5417 {
5418         u32 reset_code;
5419
5420         if (bp->flags & BNX2_FLAG_NO_WOL)
5421                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5422         else if (bp->wol)
5423                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5424         else
5425                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5426
5427         return bnx2_reset_chip(bp, reset_code);
5428 }
5429
5430 static int
5431 bnx2_test_registers(struct bnx2 *bp)
5432 {
5433         int ret;
5434         int i, is_5709;
5435         static const struct {
5436                 u16   offset;
5437                 u16   flags;
5438 #define BNX2_FL_NOT_5709        1
5439                 u32   rw_mask;
5440                 u32   ro_mask;
5441         } reg_tbl[] = {
5442                 { 0x006c, 0, 0x00000000, 0x0000003f },
5443                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5444                 { 0x0094, 0, 0x00000000, 0x00000000 },
5445
5446                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5447                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5448                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5449                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5450                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5451                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5452                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5453                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5454                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5455
5456                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5457                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5458                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5459                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5460                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5461                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5462
5463                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5464                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5465                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5466
5467                 { 0x1000, 0, 0x00000000, 0x00000001 },
5468                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5469
5470                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5471                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5472                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5473                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5474                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5475                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5476                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5477                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5478                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5479                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5480
5481                 { 0x1800, 0, 0x00000000, 0x00000001 },
5482                 { 0x1804, 0, 0x00000000, 0x00000003 },
5483
5484                 { 0x2800, 0, 0x00000000, 0x00000001 },
5485                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5486                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5487                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5488                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5489                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5490                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5491                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5492                 { 0x2840, 0, 0x00000000, 0xffffffff },
5493                 { 0x2844, 0, 0x00000000, 0xffffffff },
5494                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5495                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5496
5497                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5498                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5499
5500                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5501                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5502                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5503                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5504                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5505                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5506                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5507                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5508                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5509
5510                 { 0x5004, 0, 0x00000000, 0x0000007f },
5511                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5512
5513                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5514                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5515                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5516                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5517                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5518                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5519                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5520                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5521                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5522
5523                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5524                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5525                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5526                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5527                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5528                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5529                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5530                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5531                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5532                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5533                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5534                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5535                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5536                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5537                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5538                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5539                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5540                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5541                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5542                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5543                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5544                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5545                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5546
5547                 { 0xffff, 0, 0x00000000, 0x00000000 },
5548         };
5549
5550         ret = 0;
5551         is_5709 = 0;
5552         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5553                 is_5709 = 1;
5554
5555         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5556                 u32 offset, rw_mask, ro_mask, save_val, val;
5557                 u16 flags = reg_tbl[i].flags;
5558
5559                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5560                         continue;
5561
5562                 offset = (u32) reg_tbl[i].offset;
5563                 rw_mask = reg_tbl[i].rw_mask;
5564                 ro_mask = reg_tbl[i].ro_mask;
5565
5566                 save_val = readl(bp->regview + offset);
5567
5568                 writel(0, bp->regview + offset);
5569
5570                 val = readl(bp->regview + offset);
5571                 if ((val & rw_mask) != 0) {
5572                         goto reg_test_err;
5573                 }
5574
5575                 if ((val & ro_mask) != (save_val & ro_mask)) {
5576                         goto reg_test_err;
5577                 }
5578
5579                 writel(0xffffffff, bp->regview + offset);
5580
5581                 val = readl(bp->regview + offset);
5582                 if ((val & rw_mask) != rw_mask) {
5583                         goto reg_test_err;
5584                 }
5585
5586                 if ((val & ro_mask) != (save_val & ro_mask)) {
5587                         goto reg_test_err;
5588                 }
5589
5590                 writel(save_val, bp->regview + offset);
5591                 continue;
5592
5593 reg_test_err:
5594                 writel(save_val, bp->regview + offset);
5595                 ret = -ENODEV;
5596                 break;
5597         }
5598         return ret;
5599 }
5600
5601 static int
5602 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5603 {
5604         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5605                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5606         int i;
5607
5608         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5609                 u32 offset;
5610
5611                 for (offset = 0; offset < size; offset += 4) {
5612
5613                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5614
5615                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5616                                 test_pattern[i]) {
5617                                 return -ENODEV;
5618                         }
5619                 }
5620         }
5621         return 0;
5622 }
5623
5624 static int
5625 bnx2_test_memory(struct bnx2 *bp)
5626 {
5627         int ret = 0;
5628         int i;
5629         static struct mem_entry {
5630                 u32   offset;
5631                 u32   len;
5632         } mem_tbl_5706[] = {
5633                 { 0x60000,  0x4000 },
5634                 { 0xa0000,  0x3000 },
5635                 { 0xe0000,  0x4000 },
5636                 { 0x120000, 0x4000 },
5637                 { 0x1a0000, 0x4000 },
5638                 { 0x160000, 0x4000 },
5639                 { 0xffffffff, 0    },
5640         },
5641         mem_tbl_5709[] = {
5642                 { 0x60000,  0x4000 },
5643                 { 0xa0000,  0x3000 },
5644                 { 0xe0000,  0x4000 },
5645                 { 0x120000, 0x4000 },
5646                 { 0x1a0000, 0x4000 },
5647                 { 0xffffffff, 0    },
5648         };
5649         struct mem_entry *mem_tbl;
5650
5651         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5652                 mem_tbl = mem_tbl_5709;
5653         else
5654                 mem_tbl = mem_tbl_5706;
5655
5656         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5657                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5658                         mem_tbl[i].len)) != 0) {
5659                         return ret;
5660                 }
5661         }
5662
5663         return ret;
5664 }
5665
5666 #define BNX2_MAC_LOOPBACK       0
5667 #define BNX2_PHY_LOOPBACK       1
5668
5669 static int
5670 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5671 {
5672         unsigned int pkt_size, num_pkts, i;
5673         struct sk_buff *skb, *rx_skb;
5674         unsigned char *packet;
5675         u16 rx_start_idx, rx_idx;
5676         dma_addr_t map;
5677         struct tx_bd *txbd;
5678         struct sw_bd *rx_buf;
5679         struct l2_fhdr *rx_hdr;
5680         int ret = -ENODEV;
5681         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5682         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5683         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5684
5685         tx_napi = bnapi;
5686
5687         txr = &tx_napi->tx_ring;
5688         rxr = &bnapi->rx_ring;
5689         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5690                 bp->loopback = MAC_LOOPBACK;
5691                 bnx2_set_mac_loopback(bp);
5692         }
5693         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5694                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5695                         return 0;
5696
5697                 bp->loopback = PHY_LOOPBACK;
5698                 bnx2_set_phy_loopback(bp);
5699         }
5700         else
5701                 return -EINVAL;
5702
5703         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5704         skb = netdev_alloc_skb(bp->dev, pkt_size);
5705         if (!skb)
5706                 return -ENOMEM;
5707         packet = skb_put(skb, pkt_size);
5708         memcpy(packet, bp->dev->dev_addr, 6);
5709         memset(packet + 6, 0x0, 8);
5710         for (i = 14; i < pkt_size; i++)
5711                 packet[i] = (unsigned char) (i & 0xff);
5712
5713         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5714                 PCI_DMA_TODEVICE);
5715         if (pci_dma_mapping_error(bp->pdev, map)) {
5716                 dev_kfree_skb(skb);
5717                 return -EIO;
5718         }
5719
5720         REG_WR(bp, BNX2_HC_COMMAND,
5721                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5722
5723         REG_RD(bp, BNX2_HC_COMMAND);
5724
5725         udelay(5);
5726         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5727
5728         num_pkts = 0;
5729
5730         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5731
5732         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5733         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5734         txbd->tx_bd_mss_nbytes = pkt_size;
5735         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5736
5737         num_pkts++;
5738         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5739         txr->tx_prod_bseq += pkt_size;
5740
5741         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5742         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5743
5744         udelay(100);
5745
5746         REG_WR(bp, BNX2_HC_COMMAND,
5747                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5748
5749         REG_RD(bp, BNX2_HC_COMMAND);
5750
5751         udelay(5);
5752
5753         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5754         dev_kfree_skb(skb);
5755
5756         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5757                 goto loopback_test_done;
5758
5759         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5760         if (rx_idx != rx_start_idx + num_pkts) {
5761                 goto loopback_test_done;
5762         }
5763
5764         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5765         rx_skb = rx_buf->skb;
5766
5767         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5768         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5769
5770         pci_dma_sync_single_for_cpu(bp->pdev,
5771                 dma_unmap_addr(rx_buf, mapping),
5772                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5773
5774         if (rx_hdr->l2_fhdr_status &
5775                 (L2_FHDR_ERRORS_BAD_CRC |
5776                 L2_FHDR_ERRORS_PHY_DECODE |
5777                 L2_FHDR_ERRORS_ALIGNMENT |
5778                 L2_FHDR_ERRORS_TOO_SHORT |
5779                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5780
5781                 goto loopback_test_done;
5782         }
5783
5784         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5785                 goto loopback_test_done;
5786         }
5787
5788         for (i = 14; i < pkt_size; i++) {
5789                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5790                         goto loopback_test_done;
5791                 }
5792         }
5793
5794         ret = 0;
5795
5796 loopback_test_done:
5797         bp->loopback = 0;
5798         return ret;
5799 }
5800
5801 #define BNX2_MAC_LOOPBACK_FAILED        1
5802 #define BNX2_PHY_LOOPBACK_FAILED        2
5803 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5804                                          BNX2_PHY_LOOPBACK_FAILED)
5805
5806 static int
5807 bnx2_test_loopback(struct bnx2 *bp)
5808 {
5809         int rc = 0;
5810
5811         if (!netif_running(bp->dev))
5812                 return BNX2_LOOPBACK_FAILED;
5813
5814         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5815         spin_lock_bh(&bp->phy_lock);
5816         bnx2_init_phy(bp, 1);
5817         spin_unlock_bh(&bp->phy_lock);
5818         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5819                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5820         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5821                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5822         return rc;
5823 }
5824
5825 #define NVRAM_SIZE 0x200
5826 #define CRC32_RESIDUAL 0xdebb20e3
5827
5828 static int
5829 bnx2_test_nvram(struct bnx2 *bp)
5830 {
5831         __be32 buf[NVRAM_SIZE / 4];
5832         u8 *data = (u8 *) buf;
5833         int rc = 0;
5834         u32 magic, csum;
5835
5836         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5837                 goto test_nvram_done;
5838
5839         magic = be32_to_cpu(buf[0]);
5840         if (magic != 0x669955aa) {
5841                 rc = -ENODEV;
5842                 goto test_nvram_done;
5843         }
5844
5845         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5846                 goto test_nvram_done;
5847
5848         csum = ether_crc_le(0x100, data);
5849         if (csum != CRC32_RESIDUAL) {
5850                 rc = -ENODEV;
5851                 goto test_nvram_done;
5852         }
5853
5854         csum = ether_crc_le(0x100, data + 0x100);
5855         if (csum != CRC32_RESIDUAL) {
5856                 rc = -ENODEV;
5857         }
5858
5859 test_nvram_done:
5860         return rc;
5861 }
5862
5863 static int
5864 bnx2_test_link(struct bnx2 *bp)
5865 {
5866         u32 bmsr;
5867
5868         if (!netif_running(bp->dev))
5869                 return -ENODEV;
5870
5871         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5872                 if (bp->link_up)
5873                         return 0;
5874                 return -ENODEV;
5875         }
5876         spin_lock_bh(&bp->phy_lock);
5877         bnx2_enable_bmsr1(bp);
5878         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5879         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5880         bnx2_disable_bmsr1(bp);
5881         spin_unlock_bh(&bp->phy_lock);
5882
5883         if (bmsr & BMSR_LSTATUS) {
5884                 return 0;
5885         }
5886         return -ENODEV;
5887 }
5888
5889 static int
5890 bnx2_test_intr(struct bnx2 *bp)
5891 {
5892         int i;
5893         u16 status_idx;
5894
5895         if (!netif_running(bp->dev))
5896                 return -ENODEV;
5897
5898         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5899
5900         /* This register is not touched during run-time. */
5901         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5902         REG_RD(bp, BNX2_HC_COMMAND);
5903
5904         for (i = 0; i < 10; i++) {
5905                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5906                         status_idx) {
5907
5908                         break;
5909                 }
5910
5911                 msleep_interruptible(10);
5912         }
5913         if (i < 10)
5914                 return 0;
5915
5916         return -ENODEV;
5917 }
5918
5919 /* Determining link for parallel detection. */
5920 static int
5921 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5922 {
5923         u32 mode_ctl, an_dbg, exp;
5924
5925         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5926                 return 0;
5927
5928         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5929         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5930
5931         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5932                 return 0;
5933
5934         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5935         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5936         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5937
5938         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5939                 return 0;
5940
5941         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5942         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5943         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5944
5945         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5946                 return 0;
5947
5948         return 1;
5949 }
5950
5951 static void
5952 bnx2_5706_serdes_timer(struct bnx2 *bp)
5953 {
5954         int check_link = 1;
5955
5956         spin_lock(&bp->phy_lock);
5957         if (bp->serdes_an_pending) {
5958                 bp->serdes_an_pending--;
5959                 check_link = 0;
5960         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5961                 u32 bmcr;
5962
5963                 bp->current_interval = BNX2_TIMER_INTERVAL;
5964
5965                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5966
5967                 if (bmcr & BMCR_ANENABLE) {
5968                         if (bnx2_5706_serdes_has_link(bp)) {
5969                                 bmcr &= ~BMCR_ANENABLE;
5970                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5971                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5972                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5973                         }
5974                 }
5975         }
5976         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5977                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5978                 u32 phy2;
5979
5980                 bnx2_write_phy(bp, 0x17, 0x0f01);
5981                 bnx2_read_phy(bp, 0x15, &phy2);
5982                 if (phy2 & 0x20) {
5983                         u32 bmcr;
5984
5985                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5986                         bmcr |= BMCR_ANENABLE;
5987                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5988
5989                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5990                 }
5991         } else
5992                 bp->current_interval = BNX2_TIMER_INTERVAL;
5993
5994         if (check_link) {
5995                 u32 val;
5996
5997                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5998                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5999                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6000
6001                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6002                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6003                                 bnx2_5706s_force_link_dn(bp, 1);
6004                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6005                         } else
6006                                 bnx2_set_link(bp);
6007                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6008                         bnx2_set_link(bp);
6009         }
6010         spin_unlock(&bp->phy_lock);
6011 }
6012
6013 static void
6014 bnx2_5708_serdes_timer(struct bnx2 *bp)
6015 {
6016         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6017                 return;
6018
6019         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6020                 bp->serdes_an_pending = 0;
6021                 return;
6022         }
6023
6024         spin_lock(&bp->phy_lock);
6025         if (bp->serdes_an_pending)
6026                 bp->serdes_an_pending--;
6027         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6028                 u32 bmcr;
6029
6030                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6031                 if (bmcr & BMCR_ANENABLE) {
6032                         bnx2_enable_forced_2g5(bp);
6033                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6034                 } else {
6035                         bnx2_disable_forced_2g5(bp);
6036                         bp->serdes_an_pending = 2;
6037                         bp->current_interval = BNX2_TIMER_INTERVAL;
6038                 }
6039
6040         } else
6041                 bp->current_interval = BNX2_TIMER_INTERVAL;
6042
6043         spin_unlock(&bp->phy_lock);
6044 }
6045
6046 static void
6047 bnx2_timer(unsigned long data)
6048 {
6049         struct bnx2 *bp = (struct bnx2 *) data;
6050
6051         if (!netif_running(bp->dev))
6052                 return;
6053
6054         if (atomic_read(&bp->intr_sem) != 0)
6055                 goto bnx2_restart_timer;
6056
6057         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6058              BNX2_FLAG_USING_MSI)
6059                 bnx2_chk_missed_msi(bp);
6060
6061         bnx2_send_heart_beat(bp);
6062
6063         bp->stats_blk->stat_FwRxDrop =
6064                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6065
6066         /* workaround occasional corrupted counters */
6067         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6068                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6069                                             BNX2_HC_COMMAND_STATS_NOW);
6070
6071         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6072                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6073                         bnx2_5706_serdes_timer(bp);
6074                 else
6075                         bnx2_5708_serdes_timer(bp);
6076         }
6077
6078 bnx2_restart_timer:
6079         mod_timer(&bp->timer, jiffies + bp->current_interval);
6080 }
6081
6082 static int
6083 bnx2_request_irq(struct bnx2 *bp)
6084 {
6085         unsigned long flags;
6086         struct bnx2_irq *irq;
6087         int rc = 0, i;
6088
6089         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6090                 flags = 0;
6091         else
6092                 flags = IRQF_SHARED;
6093
6094         for (i = 0; i < bp->irq_nvecs; i++) {
6095                 irq = &bp->irq_tbl[i];
6096                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6097                                  &bp->bnx2_napi[i]);
6098                 if (rc)
6099                         break;
6100                 irq->requested = 1;
6101         }
6102         return rc;
6103 }
6104
6105 static void
6106 bnx2_free_irq(struct bnx2 *bp)
6107 {
6108         struct bnx2_irq *irq;
6109         int i;
6110
6111         for (i = 0; i < bp->irq_nvecs; i++) {
6112                 irq = &bp->irq_tbl[i];
6113                 if (irq->requested)
6114                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6115                 irq->requested = 0;
6116         }
6117         if (bp->flags & BNX2_FLAG_USING_MSI)
6118                 pci_disable_msi(bp->pdev);
6119         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6120                 pci_disable_msix(bp->pdev);
6121
6122         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6123 }
6124
6125 static void
6126 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6127 {
6128         int i, rc;
6129         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6130         struct net_device *dev = bp->dev;
6131         const int len = sizeof(bp->irq_tbl[0].name);
6132
6133         bnx2_setup_msix_tbl(bp);
6134         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6135         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6136         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6137
6138         /*  Need to flush the previous three writes to ensure MSI-X
6139          *  is setup properly */
6140         REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6141
6142         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6143                 msix_ent[i].entry = i;
6144                 msix_ent[i].vector = 0;
6145         }
6146
6147         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6148         if (rc != 0)
6149                 return;
6150
6151         bp->irq_nvecs = msix_vecs;
6152         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6153         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6154                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6155                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6156                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6157         }
6158 }
6159
6160 static void
6161 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6162 {
6163         int cpus = num_online_cpus();
6164         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6165
6166         bp->irq_tbl[0].handler = bnx2_interrupt;
6167         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6168         bp->irq_nvecs = 1;
6169         bp->irq_tbl[0].vector = bp->pdev->irq;
6170
6171         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6172                 bnx2_enable_msix(bp, msix_vecs);
6173
6174         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6175             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6176                 if (pci_enable_msi(bp->pdev) == 0) {
6177                         bp->flags |= BNX2_FLAG_USING_MSI;
6178                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6179                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6180                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6181                         } else
6182                                 bp->irq_tbl[0].handler = bnx2_msi;
6183
6184                         bp->irq_tbl[0].vector = bp->pdev->irq;
6185                 }
6186         }
6187
6188         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6189         bp->dev->real_num_tx_queues = bp->num_tx_rings;
6190
6191         bp->num_rx_rings = bp->irq_nvecs;
6192 }
6193
6194 /* Called with rtnl_lock */
6195 static int
6196 bnx2_open(struct net_device *dev)
6197 {
6198         struct bnx2 *bp = netdev_priv(dev);
6199         int rc;
6200
6201         netif_carrier_off(dev);
6202
6203         bnx2_set_power_state(bp, PCI_D0);
6204         bnx2_disable_int(bp);
6205
6206         bnx2_setup_int_mode(bp, disable_msi);
6207         bnx2_init_napi(bp);
6208         bnx2_napi_enable(bp);
6209         rc = bnx2_alloc_mem(bp);
6210         if (rc)
6211                 goto open_err;
6212
6213         rc = bnx2_request_irq(bp);
6214         if (rc)
6215                 goto open_err;
6216
6217         rc = bnx2_init_nic(bp, 1);
6218         if (rc)
6219                 goto open_err;
6220
6221         mod_timer(&bp->timer, jiffies + bp->current_interval);
6222
6223         atomic_set(&bp->intr_sem, 0);
6224
6225         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6226
6227         bnx2_enable_int(bp);
6228
6229         if (bp->flags & BNX2_FLAG_USING_MSI) {
6230                 /* Test MSI to make sure it is working
6231                  * If MSI test fails, go back to INTx mode
6232                  */
6233                 if (bnx2_test_intr(bp) != 0) {
6234                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6235
6236                         bnx2_disable_int(bp);
6237                         bnx2_free_irq(bp);
6238
6239                         bnx2_setup_int_mode(bp, 1);
6240
6241                         rc = bnx2_init_nic(bp, 0);
6242
6243                         if (!rc)
6244                                 rc = bnx2_request_irq(bp);
6245
6246                         if (rc) {
6247                                 del_timer_sync(&bp->timer);
6248                                 goto open_err;
6249                         }
6250                         bnx2_enable_int(bp);
6251                 }
6252         }
6253         if (bp->flags & BNX2_FLAG_USING_MSI)
6254                 netdev_info(dev, "using MSI\n");
6255         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6256                 netdev_info(dev, "using MSIX\n");
6257
6258         netif_tx_start_all_queues(dev);
6259
6260         return 0;
6261
6262 open_err:
6263         bnx2_napi_disable(bp);
6264         bnx2_free_skbs(bp);
6265         bnx2_free_irq(bp);
6266         bnx2_free_mem(bp);
6267         return rc;
6268 }
6269
6270 static void
6271 bnx2_reset_task(struct work_struct *work)
6272 {
6273         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6274
6275         rtnl_lock();
6276         if (!netif_running(bp->dev)) {
6277                 rtnl_unlock();
6278                 return;
6279         }
6280
6281         bnx2_netif_stop(bp, true);
6282
6283         bnx2_init_nic(bp, 1);
6284
6285         atomic_set(&bp->intr_sem, 1);
6286         bnx2_netif_start(bp, true);
6287         rtnl_unlock();
6288 }
6289
6290 static void
6291 bnx2_dump_state(struct bnx2 *bp)
6292 {
6293         struct net_device *dev = bp->dev;
6294
6295         netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
6296         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
6297                    REG_RD(bp, BNX2_EMAC_TX_STATUS),
6298                    REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6299         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6300                    bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
6301                    bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
6302         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6303                    REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6304         if (bp->flags & BNX2_FLAG_USING_MSIX)
6305                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6306                            REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6307 }
6308
6309 static void
6310 bnx2_tx_timeout(struct net_device *dev)
6311 {
6312         struct bnx2 *bp = netdev_priv(dev);
6313
6314         bnx2_dump_state(bp);
6315
6316         /* This allows the netif to be shutdown gracefully before resetting */
6317         schedule_work(&bp->reset_task);
6318 }
6319
6320 #ifdef BCM_VLAN
6321 /* Called with rtnl_lock */
6322 static void
6323 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6324 {
6325         struct bnx2 *bp = netdev_priv(dev);
6326
6327         if (netif_running(dev))
6328                 bnx2_netif_stop(bp, false);
6329
6330         bp->vlgrp = vlgrp;
6331
6332         if (!netif_running(dev))
6333                 return;
6334
6335         bnx2_set_rx_mode(dev);
6336         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6337                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6338
6339         bnx2_netif_start(bp, false);
6340 }
6341 #endif
6342
6343 /* Called with netif_tx_lock.
6344  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6345  * netif_wake_queue().
6346  */
6347 static netdev_tx_t
6348 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6349 {
6350         struct bnx2 *bp = netdev_priv(dev);
6351         dma_addr_t mapping;
6352         struct tx_bd *txbd;
6353         struct sw_tx_bd *tx_buf;
6354         u32 len, vlan_tag_flags, last_frag, mss;
6355         u16 prod, ring_prod;
6356         int i;
6357         struct bnx2_napi *bnapi;
6358         struct bnx2_tx_ring_info *txr;
6359         struct netdev_queue *txq;
6360
6361         /*  Determine which tx ring we will be placed on */
6362         i = skb_get_queue_mapping(skb);
6363         bnapi = &bp->bnx2_napi[i];
6364         txr = &bnapi->tx_ring;
6365         txq = netdev_get_tx_queue(dev, i);
6366
6367         if (unlikely(bnx2_tx_avail(bp, txr) <
6368             (skb_shinfo(skb)->nr_frags + 1))) {
6369                 netif_tx_stop_queue(txq);
6370                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6371
6372                 return NETDEV_TX_BUSY;
6373         }
6374         len = skb_headlen(skb);
6375         prod = txr->tx_prod;
6376         ring_prod = TX_RING_IDX(prod);
6377
6378         vlan_tag_flags = 0;
6379         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6380                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6381         }
6382
6383 #ifdef BCM_VLAN
6384         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6385                 vlan_tag_flags |=
6386                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6387         }
6388 #endif
6389         if ((mss = skb_shinfo(skb)->gso_size)) {
6390                 u32 tcp_opt_len;
6391                 struct iphdr *iph;
6392
6393                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6394
6395                 tcp_opt_len = tcp_optlen(skb);
6396
6397                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6398                         u32 tcp_off = skb_transport_offset(skb) -
6399                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6400
6401                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6402                                           TX_BD_FLAGS_SW_FLAGS;
6403                         if (likely(tcp_off == 0))
6404                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6405                         else {
6406                                 tcp_off >>= 3;
6407                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6408                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6409                                                   ((tcp_off & 0x10) <<
6410                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6411                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6412                         }
6413                 } else {
6414                         iph = ip_hdr(skb);
6415                         if (tcp_opt_len || (iph->ihl > 5)) {
6416                                 vlan_tag_flags |= ((iph->ihl - 5) +
6417                                                    (tcp_opt_len >> 2)) << 8;
6418                         }
6419                 }
6420         } else
6421                 mss = 0;
6422
6423         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6424         if (pci_dma_mapping_error(bp->pdev, mapping)) {
6425                 dev_kfree_skb(skb);
6426                 return NETDEV_TX_OK;
6427         }
6428
6429         tx_buf = &txr->tx_buf_ring[ring_prod];
6430         tx_buf->skb = skb;
6431         dma_unmap_addr_set(tx_buf, mapping, mapping);
6432
6433         txbd = &txr->tx_desc_ring[ring_prod];
6434
6435         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6436         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6437         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6438         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6439
6440         last_frag = skb_shinfo(skb)->nr_frags;
6441         tx_buf->nr_frags = last_frag;
6442         tx_buf->is_gso = skb_is_gso(skb);
6443
6444         for (i = 0; i < last_frag; i++) {
6445                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6446
6447                 prod = NEXT_TX_BD(prod);
6448                 ring_prod = TX_RING_IDX(prod);
6449                 txbd = &txr->tx_desc_ring[ring_prod];
6450
6451                 len = frag->size;
6452                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6453                         len, PCI_DMA_TODEVICE);
6454                 if (pci_dma_mapping_error(bp->pdev, mapping))
6455                         goto dma_error;
6456                 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6457                                    mapping);
6458
6459                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6460                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6461                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6462                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6463
6464         }
6465         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6466
6467         prod = NEXT_TX_BD(prod);
6468         txr->tx_prod_bseq += skb->len;
6469
6470         REG_WR16(bp, txr->tx_bidx_addr, prod);
6471         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6472
6473         mmiowb();
6474
6475         txr->tx_prod = prod;
6476
6477         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6478                 netif_tx_stop_queue(txq);
6479                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6480                         netif_tx_wake_queue(txq);
6481         }
6482
6483         return NETDEV_TX_OK;
6484 dma_error:
6485         /* save value of frag that failed */
6486         last_frag = i;
6487
6488         /* start back at beginning and unmap skb */
6489         prod = txr->tx_prod;
6490         ring_prod = TX_RING_IDX(prod);
6491         tx_buf = &txr->tx_buf_ring[ring_prod];
6492         tx_buf->skb = NULL;
6493         pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6494                          skb_headlen(skb), PCI_DMA_TODEVICE);
6495
6496         /* unmap remaining mapped pages */
6497         for (i = 0; i < last_frag; i++) {
6498                 prod = NEXT_TX_BD(prod);
6499                 ring_prod = TX_RING_IDX(prod);
6500                 tx_buf = &txr->tx_buf_ring[ring_prod];
6501                 pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6502                                skb_shinfo(skb)->frags[i].size,
6503                                PCI_DMA_TODEVICE);
6504         }
6505
6506         dev_kfree_skb(skb);
6507         return NETDEV_TX_OK;
6508 }
6509
6510 /* Called with rtnl_lock */
6511 static int
6512 bnx2_close(struct net_device *dev)
6513 {
6514         struct bnx2 *bp = netdev_priv(dev);
6515
6516         cancel_work_sync(&bp->reset_task);
6517
6518         bnx2_disable_int_sync(bp);
6519         bnx2_napi_disable(bp);
6520         del_timer_sync(&bp->timer);
6521         bnx2_shutdown_chip(bp);
6522         bnx2_free_irq(bp);
6523         bnx2_free_skbs(bp);
6524         bnx2_free_mem(bp);
6525         bp->link_up = 0;
6526         netif_carrier_off(bp->dev);
6527         bnx2_set_power_state(bp, PCI_D3hot);
6528         return 0;
6529 }
6530
6531 static void
6532 bnx2_save_stats(struct bnx2 *bp)
6533 {
6534         u32 *hw_stats = (u32 *) bp->stats_blk;
6535         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6536         int i;
6537
6538         /* The 1st 10 counters are 64-bit counters */
6539         for (i = 0; i < 20; i += 2) {
6540                 u32 hi;
6541                 u64 lo;
6542
6543                 hi = temp_stats[i] + hw_stats[i];
6544                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6545                 if (lo > 0xffffffff)
6546                         hi++;
6547                 temp_stats[i] = hi;
6548                 temp_stats[i + 1] = lo & 0xffffffff;
6549         }
6550
6551         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6552                 temp_stats[i] += hw_stats[i];
6553 }
6554
6555 #define GET_64BIT_NET_STATS64(ctr)                              \
6556         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6557         (unsigned long) (ctr##_lo)
6558
6559 #define GET_64BIT_NET_STATS32(ctr)                              \
6560         (ctr##_lo)
6561
6562 #if (BITS_PER_LONG == 64)
6563 #define GET_64BIT_NET_STATS(ctr)                                \
6564         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6565         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6566 #else
6567 #define GET_64BIT_NET_STATS(ctr)                                \
6568         GET_64BIT_NET_STATS32(bp->stats_blk->ctr) +             \
6569         GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6570 #endif
6571
6572 #define GET_32BIT_NET_STATS(ctr)                                \
6573         (unsigned long) (bp->stats_blk->ctr +                   \
6574                          bp->temp_stats_blk->ctr)
6575
6576 static struct net_device_stats *
6577 bnx2_get_stats(struct net_device *dev)
6578 {
6579         struct bnx2 *bp = netdev_priv(dev);
6580         struct net_device_stats *net_stats = &dev->stats;
6581
6582         if (bp->stats_blk == NULL) {
6583                 return net_stats;
6584         }
6585         net_stats->rx_packets =
6586                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6587                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6588                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6589
6590         net_stats->tx_packets =
6591                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6592                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6593                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6594
6595         net_stats->rx_bytes =
6596                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6597
6598         net_stats->tx_bytes =
6599                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6600
6601         net_stats->multicast =
6602                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
6603
6604         net_stats->collisions =
6605                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6606
6607         net_stats->rx_length_errors =
6608                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6609                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6610
6611         net_stats->rx_over_errors =
6612                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6613                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6614
6615         net_stats->rx_frame_errors =
6616                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6617
6618         net_stats->rx_crc_errors =
6619                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6620
6621         net_stats->rx_errors = net_stats->rx_length_errors +
6622                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6623                 net_stats->rx_crc_errors;
6624
6625         net_stats->tx_aborted_errors =
6626                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6627                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6628
6629         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6630             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6631                 net_stats->tx_carrier_errors = 0;
6632         else {
6633                 net_stats->tx_carrier_errors =
6634                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6635         }
6636
6637         net_stats->tx_errors =
6638                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6639                 net_stats->tx_aborted_errors +
6640                 net_stats->tx_carrier_errors;
6641
6642         net_stats->rx_missed_errors =
6643                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6644                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6645                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6646
6647         return net_stats;
6648 }
6649
6650 /* All ethtool functions called with rtnl_lock */
6651
6652 static int
6653 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6654 {
6655         struct bnx2 *bp = netdev_priv(dev);
6656         int support_serdes = 0, support_copper = 0;
6657
6658         cmd->supported = SUPPORTED_Autoneg;
6659         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6660                 support_serdes = 1;
6661                 support_copper = 1;
6662         } else if (bp->phy_port == PORT_FIBRE)
6663                 support_serdes = 1;
6664         else
6665                 support_copper = 1;
6666
6667         if (support_serdes) {
6668                 cmd->supported |= SUPPORTED_1000baseT_Full |
6669                         SUPPORTED_FIBRE;
6670                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6671                         cmd->supported |= SUPPORTED_2500baseX_Full;
6672
6673         }
6674         if (support_copper) {
6675                 cmd->supported |= SUPPORTED_10baseT_Half |
6676                         SUPPORTED_10baseT_Full |
6677                         SUPPORTED_100baseT_Half |
6678                         SUPPORTED_100baseT_Full |
6679                         SUPPORTED_1000baseT_Full |
6680                         SUPPORTED_TP;
6681
6682         }
6683
6684         spin_lock_bh(&bp->phy_lock);
6685         cmd->port = bp->phy_port;
6686         cmd->advertising = bp->advertising;
6687
6688         if (bp->autoneg & AUTONEG_SPEED) {
6689                 cmd->autoneg = AUTONEG_ENABLE;
6690         }
6691         else {
6692                 cmd->autoneg = AUTONEG_DISABLE;
6693         }
6694
6695         if (netif_carrier_ok(dev)) {
6696                 cmd->speed = bp->line_speed;
6697                 cmd->duplex = bp->duplex;
6698         }
6699         else {
6700                 cmd->speed = -1;
6701                 cmd->duplex = -1;
6702         }
6703         spin_unlock_bh(&bp->phy_lock);
6704
6705         cmd->transceiver = XCVR_INTERNAL;
6706         cmd->phy_address = bp->phy_addr;
6707
6708         return 0;
6709 }
6710
6711 static int
6712 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6713 {
6714         struct bnx2 *bp = netdev_priv(dev);
6715         u8 autoneg = bp->autoneg;
6716         u8 req_duplex = bp->req_duplex;
6717         u16 req_line_speed = bp->req_line_speed;
6718         u32 advertising = bp->advertising;
6719         int err = -EINVAL;
6720
6721         spin_lock_bh(&bp->phy_lock);
6722
6723         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6724                 goto err_out_unlock;
6725
6726         if (cmd->port != bp->phy_port &&
6727             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6728                 goto err_out_unlock;
6729
6730         /* If device is down, we can store the settings only if the user
6731          * is setting the currently active port.
6732          */
6733         if (!netif_running(dev) && cmd->port != bp->phy_port)
6734                 goto err_out_unlock;
6735
6736         if (cmd->autoneg == AUTONEG_ENABLE) {
6737                 autoneg |= AUTONEG_SPEED;
6738
6739                 advertising = cmd->advertising;
6740                 if (cmd->port == PORT_TP) {
6741                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6742                         if (!advertising)
6743                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6744                 } else {
6745                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6746                         if (!advertising)
6747                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6748                 }
6749                 advertising |= ADVERTISED_Autoneg;
6750         }
6751         else {
6752                 if (cmd->port == PORT_FIBRE) {
6753                         if ((cmd->speed != SPEED_1000 &&
6754                              cmd->speed != SPEED_2500) ||
6755                             (cmd->duplex != DUPLEX_FULL))
6756                                 goto err_out_unlock;
6757
6758                         if (cmd->speed == SPEED_2500 &&
6759                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6760                                 goto err_out_unlock;
6761                 }
6762                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6763                         goto err_out_unlock;
6764
6765                 autoneg &= ~AUTONEG_SPEED;
6766                 req_line_speed = cmd->speed;
6767                 req_duplex = cmd->duplex;
6768                 advertising = 0;
6769         }
6770
6771         bp->autoneg = autoneg;
6772         bp->advertising = advertising;
6773         bp->req_line_speed = req_line_speed;
6774         bp->req_duplex = req_duplex;
6775
6776         err = 0;
6777         /* If device is down, the new settings will be picked up when it is
6778          * brought up.
6779          */
6780         if (netif_running(dev))
6781                 err = bnx2_setup_phy(bp, cmd->port);
6782
6783 err_out_unlock:
6784         spin_unlock_bh(&bp->phy_lock);
6785
6786         return err;
6787 }
6788
6789 static void
6790 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6791 {
6792         struct bnx2 *bp = netdev_priv(dev);
6793
6794         strcpy(info->driver, DRV_MODULE_NAME);
6795         strcpy(info->version, DRV_MODULE_VERSION);
6796         strcpy(info->bus_info, pci_name(bp->pdev));
6797         strcpy(info->fw_version, bp->fw_version);
6798 }
6799
6800 #define BNX2_REGDUMP_LEN                (32 * 1024)
6801
6802 static int
6803 bnx2_get_regs_len(struct net_device *dev)
6804 {
6805         return BNX2_REGDUMP_LEN;
6806 }
6807
6808 static void
6809 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6810 {
6811         u32 *p = _p, i, offset;
6812         u8 *orig_p = _p;
6813         struct bnx2 *bp = netdev_priv(dev);
6814         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6815                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6816                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6817                                  0x1040, 0x1048, 0x1080, 0x10a4,
6818                                  0x1400, 0x1490, 0x1498, 0x14f0,
6819                                  0x1500, 0x155c, 0x1580, 0x15dc,
6820                                  0x1600, 0x1658, 0x1680, 0x16d8,
6821                                  0x1800, 0x1820, 0x1840, 0x1854,
6822                                  0x1880, 0x1894, 0x1900, 0x1984,
6823                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6824                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6825                                  0x2000, 0x2030, 0x23c0, 0x2400,
6826                                  0x2800, 0x2820, 0x2830, 0x2850,
6827                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6828                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6829                                  0x4080, 0x4090, 0x43c0, 0x4458,
6830                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6831                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6832                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6833                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6834                                  0x6800, 0x6848, 0x684c, 0x6860,
6835                                  0x6888, 0x6910, 0x8000 };
6836
6837         regs->version = 0;
6838
6839         memset(p, 0, BNX2_REGDUMP_LEN);
6840
6841         if (!netif_running(bp->dev))
6842                 return;
6843
6844         i = 0;
6845         offset = reg_boundaries[0];
6846         p += offset;
6847         while (offset < BNX2_REGDUMP_LEN) {
6848                 *p++ = REG_RD(bp, offset);
6849                 offset += 4;
6850                 if (offset == reg_boundaries[i + 1]) {
6851                         offset = reg_boundaries[i + 2];
6852                         p = (u32 *) (orig_p + offset);
6853                         i += 2;
6854                 }
6855         }
6856 }
6857
6858 static void
6859 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6860 {
6861         struct bnx2 *bp = netdev_priv(dev);
6862
6863         if (bp->flags & BNX2_FLAG_NO_WOL) {
6864                 wol->supported = 0;
6865                 wol->wolopts = 0;
6866         }
6867         else {
6868                 wol->supported = WAKE_MAGIC;
6869                 if (bp->wol)
6870                         wol->wolopts = WAKE_MAGIC;
6871                 else
6872                         wol->wolopts = 0;
6873         }
6874         memset(&wol->sopass, 0, sizeof(wol->sopass));
6875 }
6876
6877 static int
6878 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6879 {
6880         struct bnx2 *bp = netdev_priv(dev);
6881
6882         if (wol->wolopts & ~WAKE_MAGIC)
6883                 return -EINVAL;
6884
6885         if (wol->wolopts & WAKE_MAGIC) {
6886                 if (bp->flags & BNX2_FLAG_NO_WOL)
6887                         return -EINVAL;
6888
6889                 bp->wol = 1;
6890         }
6891         else {
6892                 bp->wol = 0;
6893         }
6894         return 0;
6895 }
6896
6897 static int
6898 bnx2_nway_reset(struct net_device *dev)
6899 {
6900         struct bnx2 *bp = netdev_priv(dev);
6901         u32 bmcr;
6902
6903         if (!netif_running(dev))
6904                 return -EAGAIN;
6905
6906         if (!(bp->autoneg & AUTONEG_SPEED)) {
6907                 return -EINVAL;
6908         }
6909
6910         spin_lock_bh(&bp->phy_lock);
6911
6912         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6913                 int rc;
6914
6915                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6916                 spin_unlock_bh(&bp->phy_lock);
6917                 return rc;
6918         }
6919
6920         /* Force a link down visible on the other side */
6921         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6922                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6923                 spin_unlock_bh(&bp->phy_lock);
6924
6925                 msleep(20);
6926
6927                 spin_lock_bh(&bp->phy_lock);
6928
6929                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6930                 bp->serdes_an_pending = 1;
6931                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6932         }
6933
6934         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6935         bmcr &= ~BMCR_LOOPBACK;
6936         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6937
6938         spin_unlock_bh(&bp->phy_lock);
6939
6940         return 0;
6941 }
6942
6943 static u32
6944 bnx2_get_link(struct net_device *dev)
6945 {
6946         struct bnx2 *bp = netdev_priv(dev);
6947
6948         return bp->link_up;
6949 }
6950
6951 static int
6952 bnx2_get_eeprom_len(struct net_device *dev)
6953 {
6954         struct bnx2 *bp = netdev_priv(dev);
6955
6956         if (bp->flash_info == NULL)
6957                 return 0;
6958
6959         return (int) bp->flash_size;
6960 }
6961
6962 static int
6963 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6964                 u8 *eebuf)
6965 {
6966         struct bnx2 *bp = netdev_priv(dev);
6967         int rc;
6968
6969         if (!netif_running(dev))
6970                 return -EAGAIN;
6971
6972         /* parameters already validated in ethtool_get_eeprom */
6973
6974         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6975
6976         return rc;
6977 }
6978
6979 static int
6980 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6981                 u8 *eebuf)
6982 {
6983         struct bnx2 *bp = netdev_priv(dev);
6984         int rc;
6985
6986         if (!netif_running(dev))
6987                 return -EAGAIN;
6988
6989         /* parameters already validated in ethtool_set_eeprom */
6990
6991         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6992
6993         return rc;
6994 }
6995
6996 static int
6997 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6998 {
6999         struct bnx2 *bp = netdev_priv(dev);
7000
7001         memset(coal, 0, sizeof(struct ethtool_coalesce));
7002
7003         coal->rx_coalesce_usecs = bp->rx_ticks;
7004         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7005         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7006         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7007
7008         coal->tx_coalesce_usecs = bp->tx_ticks;
7009         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7010         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7011         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7012
7013         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7014
7015         return 0;
7016 }
7017
7018 static int
7019 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7020 {
7021         struct bnx2 *bp = netdev_priv(dev);
7022
7023         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7024         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7025
7026         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7027         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7028
7029         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7030         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7031
7032         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7033         if (bp->rx_quick_cons_trip_int > 0xff)
7034                 bp->rx_quick_cons_trip_int = 0xff;
7035
7036         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7037         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7038
7039         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7040         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7041
7042         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7043         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7044
7045         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7046         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7047                 0xff;
7048
7049         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7050         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7051                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7052                         bp->stats_ticks = USEC_PER_SEC;
7053         }
7054         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7055                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7056         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7057
7058         if (netif_running(bp->dev)) {
7059                 bnx2_netif_stop(bp, true);
7060                 bnx2_init_nic(bp, 0);
7061                 bnx2_netif_start(bp, true);
7062         }
7063
7064         return 0;
7065 }
7066
7067 static void
7068 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7069 {
7070         struct bnx2 *bp = netdev_priv(dev);
7071
7072         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7073         ering->rx_mini_max_pending = 0;
7074         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7075
7076         ering->rx_pending = bp->rx_ring_size;
7077         ering->rx_mini_pending = 0;
7078         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7079
7080         ering->tx_max_pending = MAX_TX_DESC_CNT;
7081         ering->tx_pending = bp->tx_ring_size;
7082 }
7083
7084 static int
7085 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7086 {
7087         if (netif_running(bp->dev)) {
7088                 /* Reset will erase chipset stats; save them */
7089                 bnx2_save_stats(bp);
7090
7091                 bnx2_netif_stop(bp, true);
7092                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7093                 bnx2_free_skbs(bp);
7094                 bnx2_free_mem(bp);
7095         }
7096
7097         bnx2_set_rx_ring_size(bp, rx);
7098         bp->tx_ring_size = tx;
7099
7100         if (netif_running(bp->dev)) {
7101                 int rc;
7102
7103                 rc = bnx2_alloc_mem(bp);
7104                 if (!rc)
7105                         rc = bnx2_init_nic(bp, 0);
7106
7107                 if (rc) {
7108                         bnx2_napi_enable(bp);
7109                         dev_close(bp->dev);
7110                         return rc;
7111                 }
7112 #ifdef BCM_CNIC
7113                 mutex_lock(&bp->cnic_lock);
7114                 /* Let cnic know about the new status block. */
7115                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7116                         bnx2_setup_cnic_irq_info(bp);
7117                 mutex_unlock(&bp->cnic_lock);
7118 #endif
7119                 bnx2_netif_start(bp, true);
7120         }
7121         return 0;
7122 }
7123
7124 static int
7125 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7126 {
7127         struct bnx2 *bp = netdev_priv(dev);
7128         int rc;
7129
7130         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7131                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7132                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7133
7134                 return -EINVAL;
7135         }
7136         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7137         return rc;
7138 }
7139
7140 static void
7141 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7142 {
7143         struct bnx2 *bp = netdev_priv(dev);
7144
7145         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7146         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7147         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7148 }
7149
7150 static int
7151 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7152 {
7153         struct bnx2 *bp = netdev_priv(dev);
7154
7155         bp->req_flow_ctrl = 0;
7156         if (epause->rx_pause)
7157                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7158         if (epause->tx_pause)
7159                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7160
7161         if (epause->autoneg) {
7162                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7163         }
7164         else {
7165                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7166         }
7167
7168         if (netif_running(dev)) {
7169                 spin_lock_bh(&bp->phy_lock);
7170                 bnx2_setup_phy(bp, bp->phy_port);
7171                 spin_unlock_bh(&bp->phy_lock);
7172         }
7173
7174         return 0;
7175 }
7176
7177 static u32
7178 bnx2_get_rx_csum(struct net_device *dev)
7179 {
7180         struct bnx2 *bp = netdev_priv(dev);
7181
7182         return bp->rx_csum;
7183 }
7184
7185 static int
7186 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7187 {
7188         struct bnx2 *bp = netdev_priv(dev);
7189
7190         bp->rx_csum = data;
7191         return 0;
7192 }
7193
7194 static int
7195 bnx2_set_tso(struct net_device *dev, u32 data)
7196 {
7197         struct bnx2 *bp = netdev_priv(dev);
7198
7199         if (data) {
7200                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7201                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7202                         dev->features |= NETIF_F_TSO6;
7203         } else
7204                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7205                                    NETIF_F_TSO_ECN);
7206         return 0;
7207 }
7208
7209 static struct {
7210         char string[ETH_GSTRING_LEN];
7211 } bnx2_stats_str_arr[] = {
7212         { "rx_bytes" },
7213         { "rx_error_bytes" },
7214         { "tx_bytes" },
7215         { "tx_error_bytes" },
7216         { "rx_ucast_packets" },
7217         { "rx_mcast_packets" },
7218         { "rx_bcast_packets" },
7219         { "tx_ucast_packets" },
7220         { "tx_mcast_packets" },
7221         { "tx_bcast_packets" },
7222         { "tx_mac_errors" },
7223         { "tx_carrier_errors" },
7224         { "rx_crc_errors" },
7225         { "rx_align_errors" },
7226         { "tx_single_collisions" },
7227         { "tx_multi_collisions" },
7228         { "tx_deferred" },
7229         { "tx_excess_collisions" },
7230         { "tx_late_collisions" },
7231         { "tx_total_collisions" },
7232         { "rx_fragments" },
7233         { "rx_jabbers" },
7234         { "rx_undersize_packets" },
7235         { "rx_oversize_packets" },
7236         { "rx_64_byte_packets" },
7237         { "rx_65_to_127_byte_packets" },
7238         { "rx_128_to_255_byte_packets" },
7239         { "rx_256_to_511_byte_packets" },
7240         { "rx_512_to_1023_byte_packets" },
7241         { "rx_1024_to_1522_byte_packets" },
7242         { "rx_1523_to_9022_byte_packets" },
7243         { "tx_64_byte_packets" },
7244         { "tx_65_to_127_byte_packets" },
7245         { "tx_128_to_255_byte_packets" },
7246         { "tx_256_to_511_byte_packets" },
7247         { "tx_512_to_1023_byte_packets" },
7248         { "tx_1024_to_1522_byte_packets" },
7249         { "tx_1523_to_9022_byte_packets" },
7250         { "rx_xon_frames" },
7251         { "rx_xoff_frames" },
7252         { "tx_xon_frames" },
7253         { "tx_xoff_frames" },
7254         { "rx_mac_ctrl_frames" },
7255         { "rx_filtered_packets" },
7256         { "rx_ftq_discards" },
7257         { "rx_discards" },
7258         { "rx_fw_discards" },
7259 };
7260
7261 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7262                         sizeof(bnx2_stats_str_arr[0]))
7263
7264 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7265
7266 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7267     STATS_OFFSET32(stat_IfHCInOctets_hi),
7268     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7269     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7270     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7271     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7272     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7273     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7274     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7275     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7276     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7277     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7278     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7279     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7280     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7281     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7282     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7283     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7284     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7285     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7286     STATS_OFFSET32(stat_EtherStatsCollisions),
7287     STATS_OFFSET32(stat_EtherStatsFragments),
7288     STATS_OFFSET32(stat_EtherStatsJabbers),
7289     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7290     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7291     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7292     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7293     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7294     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7295     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7296     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7297     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7298     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7299     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7300     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7301     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7302     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7303     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7304     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7305     STATS_OFFSET32(stat_XonPauseFramesReceived),
7306     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7307     STATS_OFFSET32(stat_OutXonSent),
7308     STATS_OFFSET32(stat_OutXoffSent),
7309     STATS_OFFSET32(stat_MacControlFramesReceived),
7310     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7311     STATS_OFFSET32(stat_IfInFTQDiscards),
7312     STATS_OFFSET32(stat_IfInMBUFDiscards),
7313     STATS_OFFSET32(stat_FwRxDrop),
7314 };
7315
7316 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7317  * skipped because of errata.
7318  */
7319 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7320         8,0,8,8,8,8,8,8,8,8,
7321         4,0,4,4,4,4,4,4,4,4,
7322         4,4,4,4,4,4,4,4,4,4,
7323         4,4,4,4,4,4,4,4,4,4,
7324         4,4,4,4,4,4,4,
7325 };
7326
7327 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7328         8,0,8,8,8,8,8,8,8,8,
7329         4,4,4,4,4,4,4,4,4,4,
7330         4,4,4,4,4,4,4,4,4,4,
7331         4,4,4,4,4,4,4,4,4,4,
7332         4,4,4,4,4,4,4,
7333 };
7334
7335 #define BNX2_NUM_TESTS 6
7336
7337 static struct {
7338         char string[ETH_GSTRING_LEN];
7339 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7340         { "register_test (offline)" },
7341         { "memory_test (offline)" },
7342         { "loopback_test (offline)" },
7343         { "nvram_test (online)" },
7344         { "interrupt_test (online)" },
7345         { "link_test (online)" },
7346 };
7347
7348 static int
7349 bnx2_get_sset_count(struct net_device *dev, int sset)
7350 {
7351         switch (sset) {
7352         case ETH_SS_TEST:
7353                 return BNX2_NUM_TESTS;
7354         case ETH_SS_STATS:
7355                 return BNX2_NUM_STATS;
7356         default:
7357                 return -EOPNOTSUPP;
7358         }
7359 }
7360
7361 static void
7362 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7363 {
7364         struct bnx2 *bp = netdev_priv(dev);
7365
7366         bnx2_set_power_state(bp, PCI_D0);
7367
7368         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7369         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7370                 int i;
7371
7372                 bnx2_netif_stop(bp, true);
7373                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7374                 bnx2_free_skbs(bp);
7375
7376                 if (bnx2_test_registers(bp) != 0) {
7377                         buf[0] = 1;
7378                         etest->flags |= ETH_TEST_FL_FAILED;
7379                 }
7380                 if (bnx2_test_memory(bp) != 0) {
7381                         buf[1] = 1;
7382                         etest->flags |= ETH_TEST_FL_FAILED;
7383                 }
7384                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7385                         etest->flags |= ETH_TEST_FL_FAILED;
7386
7387                 if (!netif_running(bp->dev))
7388                         bnx2_shutdown_chip(bp);
7389                 else {
7390                         bnx2_init_nic(bp, 1);
7391                         bnx2_netif_start(bp, true);
7392                 }
7393
7394                 /* wait for link up */
7395                 for (i = 0; i < 7; i++) {
7396                         if (bp->link_up)
7397                                 break;
7398                         msleep_interruptible(1000);
7399                 }
7400         }
7401
7402         if (bnx2_test_nvram(bp) != 0) {
7403                 buf[3] = 1;
7404                 etest->flags |= ETH_TEST_FL_FAILED;
7405         }
7406         if (bnx2_test_intr(bp) != 0) {
7407                 buf[4] = 1;
7408                 etest->flags |= ETH_TEST_FL_FAILED;
7409         }
7410
7411         if (bnx2_test_link(bp) != 0) {
7412                 buf[5] = 1;
7413                 etest->flags |= ETH_TEST_FL_FAILED;
7414
7415         }
7416         if (!netif_running(bp->dev))
7417                 bnx2_set_power_state(bp, PCI_D3hot);
7418 }
7419
7420 static void
7421 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7422 {
7423         switch (stringset) {
7424         case ETH_SS_STATS:
7425                 memcpy(buf, bnx2_stats_str_arr,
7426                         sizeof(bnx2_stats_str_arr));
7427                 break;
7428         case ETH_SS_TEST:
7429                 memcpy(buf, bnx2_tests_str_arr,
7430                         sizeof(bnx2_tests_str_arr));
7431                 break;
7432         }
7433 }
7434
7435 static void
7436 bnx2_get_ethtool_stats(struct net_device *dev,
7437                 struct ethtool_stats *stats, u64 *buf)
7438 {
7439         struct bnx2 *bp = netdev_priv(dev);
7440         int i;
7441         u32 *hw_stats = (u32 *) bp->stats_blk;
7442         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7443         u8 *stats_len_arr = NULL;
7444
7445         if (hw_stats == NULL) {
7446                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7447                 return;
7448         }
7449
7450         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7451             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7452             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7453             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7454                 stats_len_arr = bnx2_5706_stats_len_arr;
7455         else
7456                 stats_len_arr = bnx2_5708_stats_len_arr;
7457
7458         for (i = 0; i < BNX2_NUM_STATS; i++) {
7459                 unsigned long offset;
7460
7461                 if (stats_len_arr[i] == 0) {
7462                         /* skip this counter */
7463                         buf[i] = 0;
7464                         continue;
7465                 }
7466
7467                 offset = bnx2_stats_offset_arr[i];
7468                 if (stats_len_arr[i] == 4) {
7469                         /* 4-byte counter */
7470                         buf[i] = (u64) *(hw_stats + offset) +
7471                                  *(temp_stats + offset);
7472                         continue;
7473                 }
7474                 /* 8-byte counter */
7475                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7476                          *(hw_stats + offset + 1) +
7477                          (((u64) *(temp_stats + offset)) << 32) +
7478                          *(temp_stats + offset + 1);
7479         }
7480 }
7481
7482 static int
7483 bnx2_phys_id(struct net_device *dev, u32 data)
7484 {
7485         struct bnx2 *bp = netdev_priv(dev);
7486         int i;
7487         u32 save;
7488
7489         bnx2_set_power_state(bp, PCI_D0);
7490
7491         if (data == 0)
7492                 data = 2;
7493
7494         save = REG_RD(bp, BNX2_MISC_CFG);
7495         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7496
7497         for (i = 0; i < (data * 2); i++) {
7498                 if ((i % 2) == 0) {
7499                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7500                 }
7501                 else {
7502                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7503                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
7504                                 BNX2_EMAC_LED_100MB_OVERRIDE |
7505                                 BNX2_EMAC_LED_10MB_OVERRIDE |
7506                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7507                                 BNX2_EMAC_LED_TRAFFIC);
7508                 }
7509                 msleep_interruptible(500);
7510                 if (signal_pending(current))
7511                         break;
7512         }
7513         REG_WR(bp, BNX2_EMAC_LED, 0);
7514         REG_WR(bp, BNX2_MISC_CFG, save);
7515
7516         if (!netif_running(dev))
7517                 bnx2_set_power_state(bp, PCI_D3hot);
7518
7519         return 0;
7520 }
7521
7522 static int
7523 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7524 {
7525         struct bnx2 *bp = netdev_priv(dev);
7526
7527         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7528                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7529         else
7530                 return (ethtool_op_set_tx_csum(dev, data));
7531 }
7532
7533 static const struct ethtool_ops bnx2_ethtool_ops = {
7534         .get_settings           = bnx2_get_settings,
7535         .set_settings           = bnx2_set_settings,
7536         .get_drvinfo            = bnx2_get_drvinfo,
7537         .get_regs_len           = bnx2_get_regs_len,
7538         .get_regs               = bnx2_get_regs,
7539         .get_wol                = bnx2_get_wol,
7540         .set_wol                = bnx2_set_wol,
7541         .nway_reset             = bnx2_nway_reset,
7542         .get_link               = bnx2_get_link,
7543         .get_eeprom_len         = bnx2_get_eeprom_len,
7544         .get_eeprom             = bnx2_get_eeprom,
7545         .set_eeprom             = bnx2_set_eeprom,
7546         .get_coalesce           = bnx2_get_coalesce,
7547         .set_coalesce           = bnx2_set_coalesce,
7548         .get_ringparam          = bnx2_get_ringparam,
7549         .set_ringparam          = bnx2_set_ringparam,
7550         .get_pauseparam         = bnx2_get_pauseparam,
7551         .set_pauseparam         = bnx2_set_pauseparam,
7552         .get_rx_csum            = bnx2_get_rx_csum,
7553         .set_rx_csum            = bnx2_set_rx_csum,
7554         .set_tx_csum            = bnx2_set_tx_csum,
7555         .set_sg                 = ethtool_op_set_sg,
7556         .set_tso                = bnx2_set_tso,
7557         .self_test              = bnx2_self_test,
7558         .get_strings            = bnx2_get_strings,
7559         .phys_id                = bnx2_phys_id,
7560         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7561         .get_sset_count         = bnx2_get_sset_count,
7562 };
7563
7564 /* Called with rtnl_lock */
7565 static int
7566 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7567 {
7568         struct mii_ioctl_data *data = if_mii(ifr);
7569         struct bnx2 *bp = netdev_priv(dev);
7570         int err;
7571
7572         switch(cmd) {
7573         case SIOCGMIIPHY:
7574                 data->phy_id = bp->phy_addr;
7575
7576                 /* fallthru */
7577         case SIOCGMIIREG: {
7578                 u32 mii_regval;
7579
7580                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7581                         return -EOPNOTSUPP;
7582
7583                 if (!netif_running(dev))
7584                         return -EAGAIN;
7585
7586                 spin_lock_bh(&bp->phy_lock);
7587                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7588                 spin_unlock_bh(&bp->phy_lock);
7589
7590                 data->val_out = mii_regval;
7591
7592                 return err;
7593         }
7594
7595         case SIOCSMIIREG:
7596                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7597                         return -EOPNOTSUPP;
7598
7599                 if (!netif_running(dev))
7600                         return -EAGAIN;
7601
7602                 spin_lock_bh(&bp->phy_lock);
7603                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7604                 spin_unlock_bh(&bp->phy_lock);
7605
7606                 return err;
7607
7608         default:
7609                 /* do nothing */
7610                 break;
7611         }
7612         return -EOPNOTSUPP;
7613 }
7614
7615 /* Called with rtnl_lock */
7616 static int
7617 bnx2_change_mac_addr(struct net_device *dev, void *p)
7618 {
7619         struct sockaddr *addr = p;
7620         struct bnx2 *bp = netdev_priv(dev);
7621
7622         if (!is_valid_ether_addr(addr->sa_data))
7623                 return -EINVAL;
7624
7625         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7626         if (netif_running(dev))
7627                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7628
7629         return 0;
7630 }
7631
7632 /* Called with rtnl_lock */
7633 static int
7634 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7635 {
7636         struct bnx2 *bp = netdev_priv(dev);
7637
7638         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7639                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7640                 return -EINVAL;
7641
7642         dev->mtu = new_mtu;
7643         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7644 }
7645
7646 #ifdef CONFIG_NET_POLL_CONTROLLER
7647 static void
7648 poll_bnx2(struct net_device *dev)
7649 {
7650         struct bnx2 *bp = netdev_priv(dev);
7651         int i;
7652
7653         for (i = 0; i < bp->irq_nvecs; i++) {
7654                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7655
7656                 disable_irq(irq->vector);
7657                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7658                 enable_irq(irq->vector);
7659         }
7660 }
7661 #endif
7662
7663 static void __devinit
7664 bnx2_get_5709_media(struct bnx2 *bp)
7665 {
7666         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7667         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7668         u32 strap;
7669
7670         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7671                 return;
7672         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7673                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7674                 return;
7675         }
7676
7677         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7678                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7679         else
7680                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7681
7682         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7683                 switch (strap) {
7684                 case 0x4:
7685                 case 0x5:
7686                 case 0x6:
7687                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7688                         return;
7689                 }
7690         } else {
7691                 switch (strap) {
7692                 case 0x1:
7693                 case 0x2:
7694                 case 0x4:
7695                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7696                         return;
7697                 }
7698         }
7699 }
7700
7701 static void __devinit
7702 bnx2_get_pci_speed(struct bnx2 *bp)
7703 {
7704         u32 reg;
7705
7706         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7707         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7708                 u32 clkreg;
7709
7710                 bp->flags |= BNX2_FLAG_PCIX;
7711
7712                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7713
7714                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7715                 switch (clkreg) {
7716                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7717                         bp->bus_speed_mhz = 133;
7718                         break;
7719
7720                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7721                         bp->bus_speed_mhz = 100;
7722                         break;
7723
7724                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7725                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7726                         bp->bus_speed_mhz = 66;
7727                         break;
7728
7729                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7730                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7731                         bp->bus_speed_mhz = 50;
7732                         break;
7733
7734                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7735                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7736                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7737                         bp->bus_speed_mhz = 33;
7738                         break;
7739                 }
7740         }
7741         else {
7742                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7743                         bp->bus_speed_mhz = 66;
7744                 else
7745                         bp->bus_speed_mhz = 33;
7746         }
7747
7748         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7749                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7750
7751 }
7752
7753 static void __devinit
7754 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7755 {
7756         int rc, i, j;
7757         u8 *data;
7758         unsigned int block_end, rosize, len;
7759
7760 #define BNX2_VPD_NVRAM_OFFSET   0x300
7761 #define BNX2_VPD_LEN            128
7762 #define BNX2_MAX_VER_SLEN       30
7763
7764         data = kmalloc(256, GFP_KERNEL);
7765         if (!data)
7766                 return;
7767
7768         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7769                              BNX2_VPD_LEN);
7770         if (rc)
7771                 goto vpd_done;
7772
7773         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7774                 data[i] = data[i + BNX2_VPD_LEN + 3];
7775                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7776                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7777                 data[i + 3] = data[i + BNX2_VPD_LEN];
7778         }
7779
7780         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7781         if (i < 0)
7782                 goto vpd_done;
7783
7784         rosize = pci_vpd_lrdt_size(&data[i]);
7785         i += PCI_VPD_LRDT_TAG_SIZE;
7786         block_end = i + rosize;
7787
7788         if (block_end > BNX2_VPD_LEN)
7789                 goto vpd_done;
7790
7791         j = pci_vpd_find_info_keyword(data, i, rosize,
7792                                       PCI_VPD_RO_KEYWORD_MFR_ID);
7793         if (j < 0)
7794                 goto vpd_done;
7795
7796         len = pci_vpd_info_field_size(&data[j]);
7797
7798         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7799         if (j + len > block_end || len != 4 ||
7800             memcmp(&data[j], "1028", 4))
7801                 goto vpd_done;
7802
7803         j = pci_vpd_find_info_keyword(data, i, rosize,
7804                                       PCI_VPD_RO_KEYWORD_VENDOR0);
7805         if (j < 0)
7806                 goto vpd_done;
7807
7808         len = pci_vpd_info_field_size(&data[j]);
7809
7810         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7811         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7812                 goto vpd_done;
7813
7814         memcpy(bp->fw_version, &data[j], len);
7815         bp->fw_version[len] = ' ';
7816
7817 vpd_done:
7818         kfree(data);
7819 }
7820
7821 static int __devinit
7822 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7823 {
7824         struct bnx2 *bp;
7825         unsigned long mem_len;
7826         int rc, i, j;
7827         u32 reg;
7828         u64 dma_mask, persist_dma_mask;
7829
7830         SET_NETDEV_DEV(dev, &pdev->dev);
7831         bp = netdev_priv(dev);
7832
7833         bp->flags = 0;
7834         bp->phy_flags = 0;
7835
7836         bp->temp_stats_blk =
7837                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7838
7839         if (bp->temp_stats_blk == NULL) {
7840                 rc = -ENOMEM;
7841                 goto err_out;
7842         }
7843
7844         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7845         rc = pci_enable_device(pdev);
7846         if (rc) {
7847                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7848                 goto err_out;
7849         }
7850
7851         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7852                 dev_err(&pdev->dev,
7853                         "Cannot find PCI device base address, aborting\n");
7854                 rc = -ENODEV;
7855                 goto err_out_disable;
7856         }
7857
7858         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7859         if (rc) {
7860                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7861                 goto err_out_disable;
7862         }
7863
7864         pci_set_master(pdev);
7865         pci_save_state(pdev);
7866
7867         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7868         if (bp->pm_cap == 0) {
7869                 dev_err(&pdev->dev,
7870                         "Cannot find power management capability, aborting\n");
7871                 rc = -EIO;
7872                 goto err_out_release;
7873         }
7874
7875         bp->dev = dev;
7876         bp->pdev = pdev;
7877
7878         spin_lock_init(&bp->phy_lock);
7879         spin_lock_init(&bp->indirect_lock);
7880 #ifdef BCM_CNIC
7881         mutex_init(&bp->cnic_lock);
7882 #endif
7883         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7884
7885         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7886         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7887         dev->mem_end = dev->mem_start + mem_len;
7888         dev->irq = pdev->irq;
7889
7890         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7891
7892         if (!bp->regview) {
7893                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7894                 rc = -ENOMEM;
7895                 goto err_out_release;
7896         }
7897
7898         /* Configure byte swap and enable write to the reg_window registers.
7899          * Rely on CPU to do target byte swapping on big endian systems
7900          * The chip's target access swapping will not swap all accesses
7901          */
7902         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7903                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7904                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7905
7906         bnx2_set_power_state(bp, PCI_D0);
7907
7908         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7909
7910         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7911                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7912                         dev_err(&pdev->dev,
7913                                 "Cannot find PCIE capability, aborting\n");
7914                         rc = -EIO;
7915                         goto err_out_unmap;
7916                 }
7917                 bp->flags |= BNX2_FLAG_PCIE;
7918                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7919                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7920         } else {
7921                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7922                 if (bp->pcix_cap == 0) {
7923                         dev_err(&pdev->dev,
7924                                 "Cannot find PCIX capability, aborting\n");
7925                         rc = -EIO;
7926                         goto err_out_unmap;
7927                 }
7928                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7929         }
7930
7931         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7932                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7933                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7934         }
7935
7936         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7937                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7938                         bp->flags |= BNX2_FLAG_MSI_CAP;
7939         }
7940
7941         /* 5708 cannot support DMA addresses > 40-bit.  */
7942         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7943                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7944         else
7945                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7946
7947         /* Configure DMA attributes. */
7948         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7949                 dev->features |= NETIF_F_HIGHDMA;
7950                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7951                 if (rc) {
7952                         dev_err(&pdev->dev,
7953                                 "pci_set_consistent_dma_mask failed, aborting\n");
7954                         goto err_out_unmap;
7955                 }
7956         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7957                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7958                 goto err_out_unmap;
7959         }
7960
7961         if (!(bp->flags & BNX2_FLAG_PCIE))
7962                 bnx2_get_pci_speed(bp);
7963
7964         /* 5706A0 may falsely detect SERR and PERR. */
7965         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7966                 reg = REG_RD(bp, PCI_COMMAND);
7967                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7968                 REG_WR(bp, PCI_COMMAND, reg);
7969         }
7970         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7971                 !(bp->flags & BNX2_FLAG_PCIX)) {
7972
7973                 dev_err(&pdev->dev,
7974                         "5706 A1 can only be used in a PCIX bus, aborting\n");
7975                 goto err_out_unmap;
7976         }
7977
7978         bnx2_init_nvram(bp);
7979
7980         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7981
7982         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7983             BNX2_SHM_HDR_SIGNATURE_SIG) {
7984                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7985
7986                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7987         } else
7988                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7989
7990         /* Get the permanent MAC address.  First we need to make sure the
7991          * firmware is actually running.
7992          */
7993         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7994
7995         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7996             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7997                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
7998                 rc = -ENODEV;
7999                 goto err_out_unmap;
8000         }
8001
8002         bnx2_read_vpd_fw_ver(bp);
8003
8004         j = strlen(bp->fw_version);
8005         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8006         for (i = 0; i < 3 && j < 24; i++) {
8007                 u8 num, k, skip0;
8008
8009                 if (i == 0) {
8010                         bp->fw_version[j++] = 'b';
8011                         bp->fw_version[j++] = 'c';
8012                         bp->fw_version[j++] = ' ';
8013                 }
8014                 num = (u8) (reg >> (24 - (i * 8)));
8015                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8016                         if (num >= k || !skip0 || k == 1) {
8017                                 bp->fw_version[j++] = (num / k) + '0';
8018                                 skip0 = 0;
8019                         }
8020                 }
8021                 if (i != 2)
8022                         bp->fw_version[j++] = '.';
8023         }
8024         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8025         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8026                 bp->wol = 1;
8027
8028         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8029                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8030
8031                 for (i = 0; i < 30; i++) {
8032                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8033                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8034                                 break;
8035                         msleep(10);
8036                 }
8037         }
8038         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8039         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8040         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8041             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8042                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8043
8044                 if (j < 32)
8045                         bp->fw_version[j++] = ' ';
8046                 for (i = 0; i < 3 && j < 28; i++) {
8047                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8048                         reg = swab32(reg);
8049                         memcpy(&bp->fw_version[j], &reg, 4);
8050                         j += 4;
8051                 }
8052         }
8053
8054         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8055         bp->mac_addr[0] = (u8) (reg >> 8);
8056         bp->mac_addr[1] = (u8) reg;
8057
8058         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8059         bp->mac_addr[2] = (u8) (reg >> 24);
8060         bp->mac_addr[3] = (u8) (reg >> 16);
8061         bp->mac_addr[4] = (u8) (reg >> 8);
8062         bp->mac_addr[5] = (u8) reg;
8063
8064         bp->tx_ring_size = MAX_TX_DESC_CNT;
8065         bnx2_set_rx_ring_size(bp, 255);
8066
8067         bp->rx_csum = 1;
8068
8069         bp->tx_quick_cons_trip_int = 2;
8070         bp->tx_quick_cons_trip = 20;
8071         bp->tx_ticks_int = 18;
8072         bp->tx_ticks = 80;
8073
8074         bp->rx_quick_cons_trip_int = 2;
8075         bp->rx_quick_cons_trip = 12;
8076         bp->rx_ticks_int = 18;
8077         bp->rx_ticks = 18;
8078
8079         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8080
8081         bp->current_interval = BNX2_TIMER_INTERVAL;
8082
8083         bp->phy_addr = 1;
8084
8085         /* Disable WOL support if we are running on a SERDES chip. */
8086         if (CHIP_NUM(bp) == CHIP_NUM_5709)
8087                 bnx2_get_5709_media(bp);
8088         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8089                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8090
8091         bp->phy_port = PORT_TP;
8092         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8093                 bp->phy_port = PORT_FIBRE;
8094                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8095                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8096                         bp->flags |= BNX2_FLAG_NO_WOL;
8097                         bp->wol = 0;
8098                 }
8099                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8100                         /* Don't do parallel detect on this board because of
8101                          * some board problems.  The link will not go down
8102                          * if we do parallel detect.
8103                          */
8104                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8105                             pdev->subsystem_device == 0x310c)
8106                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8107                 } else {
8108                         bp->phy_addr = 2;
8109                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8110                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8111                 }
8112         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8113                    CHIP_NUM(bp) == CHIP_NUM_5708)
8114                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8115         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8116                  (CHIP_REV(bp) == CHIP_REV_Ax ||
8117                   CHIP_REV(bp) == CHIP_REV_Bx))
8118                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8119
8120         bnx2_init_fw_cap(bp);
8121
8122         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8123             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8124             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8125             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8126                 bp->flags |= BNX2_FLAG_NO_WOL;
8127                 bp->wol = 0;
8128         }
8129
8130         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8131                 bp->tx_quick_cons_trip_int =
8132                         bp->tx_quick_cons_trip;
8133                 bp->tx_ticks_int = bp->tx_ticks;
8134                 bp->rx_quick_cons_trip_int =
8135                         bp->rx_quick_cons_trip;
8136                 bp->rx_ticks_int = bp->rx_ticks;
8137                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8138                 bp->com_ticks_int = bp->com_ticks;
8139                 bp->cmd_ticks_int = bp->cmd_ticks;
8140         }
8141
8142         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8143          *
8144          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8145          * with byte enables disabled on the unused 32-bit word.  This is legal
8146          * but causes problems on the AMD 8132 which will eventually stop
8147          * responding after a while.
8148          *
8149          * AMD believes this incompatibility is unique to the 5706, and
8150          * prefers to locally disable MSI rather than globally disabling it.
8151          */
8152         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8153                 struct pci_dev *amd_8132 = NULL;
8154
8155                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8156                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8157                                                   amd_8132))) {
8158
8159                         if (amd_8132->revision >= 0x10 &&
8160                             amd_8132->revision <= 0x13) {
8161                                 disable_msi = 1;
8162                                 pci_dev_put(amd_8132);
8163                                 break;
8164                         }
8165                 }
8166         }
8167
8168         bnx2_set_default_link(bp);
8169         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8170
8171         init_timer(&bp->timer);
8172         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8173         bp->timer.data = (unsigned long) bp;
8174         bp->timer.function = bnx2_timer;
8175
8176         return 0;
8177
8178 err_out_unmap:
8179         if (bp->regview) {
8180                 iounmap(bp->regview);
8181                 bp->regview = NULL;
8182         }
8183
8184 err_out_release:
8185         pci_release_regions(pdev);
8186
8187 err_out_disable:
8188         pci_disable_device(pdev);
8189         pci_set_drvdata(pdev, NULL);
8190
8191 err_out:
8192         return rc;
8193 }
8194
8195 static char * __devinit
8196 bnx2_bus_string(struct bnx2 *bp, char *str)
8197 {
8198         char *s = str;
8199
8200         if (bp->flags & BNX2_FLAG_PCIE) {
8201                 s += sprintf(s, "PCI Express");
8202         } else {
8203                 s += sprintf(s, "PCI");
8204                 if (bp->flags & BNX2_FLAG_PCIX)
8205                         s += sprintf(s, "-X");
8206                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8207                         s += sprintf(s, " 32-bit");
8208                 else
8209                         s += sprintf(s, " 64-bit");
8210                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8211         }
8212         return str;
8213 }
8214
8215 static void __devinit
8216 bnx2_init_napi(struct bnx2 *bp)
8217 {
8218         int i;
8219
8220         for (i = 0; i < bp->irq_nvecs; i++) {
8221                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8222                 int (*poll)(struct napi_struct *, int);
8223
8224                 if (i == 0)
8225                         poll = bnx2_poll;
8226                 else
8227                         poll = bnx2_poll_msix;
8228
8229                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8230                 bnapi->bp = bp;
8231         }
8232 }
8233
8234 static const struct net_device_ops bnx2_netdev_ops = {
8235         .ndo_open               = bnx2_open,
8236         .ndo_start_xmit         = bnx2_start_xmit,
8237         .ndo_stop               = bnx2_close,
8238         .ndo_get_stats          = bnx2_get_stats,
8239         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8240         .ndo_do_ioctl           = bnx2_ioctl,
8241         .ndo_validate_addr      = eth_validate_addr,
8242         .ndo_set_mac_address    = bnx2_change_mac_addr,
8243         .ndo_change_mtu         = bnx2_change_mtu,
8244         .ndo_tx_timeout         = bnx2_tx_timeout,
8245 #ifdef BCM_VLAN
8246         .ndo_vlan_rx_register   = bnx2_vlan_rx_register,
8247 #endif
8248 #ifdef CONFIG_NET_POLL_CONTROLLER
8249         .ndo_poll_controller    = poll_bnx2,
8250 #endif
8251 };
8252
8253 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8254 {
8255 #ifdef BCM_VLAN
8256         dev->vlan_features |= flags;
8257 #endif
8258 }
8259
8260 static int __devinit
8261 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8262 {
8263         static int version_printed = 0;
8264         struct net_device *dev = NULL;
8265         struct bnx2 *bp;
8266         int rc;
8267         char str[40];
8268
8269         if (version_printed++ == 0)
8270                 pr_info("%s", version);
8271
8272         /* dev zeroed in init_etherdev */
8273         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8274
8275         if (!dev)
8276                 return -ENOMEM;
8277
8278         rc = bnx2_init_board(pdev, dev);
8279         if (rc < 0) {
8280                 free_netdev(dev);
8281                 return rc;
8282         }
8283
8284         dev->netdev_ops = &bnx2_netdev_ops;
8285         dev->watchdog_timeo = TX_TIMEOUT;
8286         dev->ethtool_ops = &bnx2_ethtool_ops;
8287
8288         bp = netdev_priv(dev);
8289
8290         pci_set_drvdata(pdev, dev);
8291
8292         rc = bnx2_request_firmware(bp);
8293         if (rc)
8294                 goto error;
8295
8296         memcpy(dev->dev_addr, bp->mac_addr, 6);
8297         memcpy(dev->perm_addr, bp->mac_addr, 6);
8298
8299         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
8300         vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8301         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8302                 dev->features |= NETIF_F_IPV6_CSUM;
8303                 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8304         }
8305 #ifdef BCM_VLAN
8306         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8307 #endif
8308         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8309         vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8310         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8311                 dev->features |= NETIF_F_TSO6;
8312                 vlan_features_add(dev, NETIF_F_TSO6);
8313         }
8314         if ((rc = register_netdev(dev))) {
8315                 dev_err(&pdev->dev, "Cannot register net device\n");
8316                 goto error;
8317         }
8318
8319         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8320                     board_info[ent->driver_data].name,
8321                     ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8322                     ((CHIP_ID(bp) & 0x0ff0) >> 4),
8323                     bnx2_bus_string(bp, str),
8324                     dev->base_addr,
8325                     bp->pdev->irq, dev->dev_addr);
8326
8327         return 0;
8328
8329 error:
8330         if (bp->mips_firmware)
8331                 release_firmware(bp->mips_firmware);
8332         if (bp->rv2p_firmware)
8333                 release_firmware(bp->rv2p_firmware);
8334
8335         if (bp->regview)
8336                 iounmap(bp->regview);
8337         pci_release_regions(pdev);
8338         pci_disable_device(pdev);
8339         pci_set_drvdata(pdev, NULL);
8340         free_netdev(dev);
8341         return rc;
8342 }
8343
8344 static void __devexit
8345 bnx2_remove_one(struct pci_dev *pdev)
8346 {
8347         struct net_device *dev = pci_get_drvdata(pdev);
8348         struct bnx2 *bp = netdev_priv(dev);
8349
8350         flush_scheduled_work();
8351
8352         unregister_netdev(dev);
8353
8354         if (bp->mips_firmware)
8355                 release_firmware(bp->mips_firmware);
8356         if (bp->rv2p_firmware)
8357                 release_firmware(bp->rv2p_firmware);
8358
8359         if (bp->regview)
8360                 iounmap(bp->regview);
8361
8362         kfree(bp->temp_stats_blk);
8363
8364         free_netdev(dev);
8365         pci_release_regions(pdev);
8366         pci_disable_device(pdev);
8367         pci_set_drvdata(pdev, NULL);
8368 }
8369
8370 static int
8371 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8372 {
8373         struct net_device *dev = pci_get_drvdata(pdev);
8374         struct bnx2 *bp = netdev_priv(dev);
8375
8376         /* PCI register 4 needs to be saved whether netif_running() or not.
8377          * MSI address and data need to be saved if using MSI and
8378          * netif_running().
8379          */
8380         pci_save_state(pdev);
8381         if (!netif_running(dev))
8382                 return 0;
8383
8384         flush_scheduled_work();
8385         bnx2_netif_stop(bp, true);
8386         netif_device_detach(dev);
8387         del_timer_sync(&bp->timer);
8388         bnx2_shutdown_chip(bp);
8389         bnx2_free_skbs(bp);
8390         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8391         return 0;
8392 }
8393
8394 static int
8395 bnx2_resume(struct pci_dev *pdev)
8396 {
8397         struct net_device *dev = pci_get_drvdata(pdev);
8398         struct bnx2 *bp = netdev_priv(dev);
8399
8400         pci_restore_state(pdev);
8401         if (!netif_running(dev))
8402                 return 0;
8403
8404         bnx2_set_power_state(bp, PCI_D0);
8405         netif_device_attach(dev);
8406         bnx2_init_nic(bp, 1);
8407         bnx2_netif_start(bp, true);
8408         return 0;
8409 }
8410
8411 /**
8412  * bnx2_io_error_detected - called when PCI error is detected
8413  * @pdev: Pointer to PCI device
8414  * @state: The current pci connection state
8415  *
8416  * This function is called after a PCI bus error affecting
8417  * this device has been detected.
8418  */
8419 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8420                                                pci_channel_state_t state)
8421 {
8422         struct net_device *dev = pci_get_drvdata(pdev);
8423         struct bnx2 *bp = netdev_priv(dev);
8424
8425         rtnl_lock();
8426         netif_device_detach(dev);
8427
8428         if (state == pci_channel_io_perm_failure) {
8429                 rtnl_unlock();
8430                 return PCI_ERS_RESULT_DISCONNECT;
8431         }
8432
8433         if (netif_running(dev)) {
8434                 bnx2_netif_stop(bp, true);
8435                 del_timer_sync(&bp->timer);
8436                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8437         }
8438
8439         pci_disable_device(pdev);
8440         rtnl_unlock();
8441
8442         /* Request a slot slot reset. */
8443         return PCI_ERS_RESULT_NEED_RESET;
8444 }
8445
8446 /**
8447  * bnx2_io_slot_reset - called after the pci bus has been reset.
8448  * @pdev: Pointer to PCI device
8449  *
8450  * Restart the card from scratch, as if from a cold-boot.
8451  */
8452 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8453 {
8454         struct net_device *dev = pci_get_drvdata(pdev);
8455         struct bnx2 *bp = netdev_priv(dev);
8456
8457         rtnl_lock();
8458         if (pci_enable_device(pdev)) {
8459                 dev_err(&pdev->dev,
8460                         "Cannot re-enable PCI device after reset\n");
8461                 rtnl_unlock();
8462                 return PCI_ERS_RESULT_DISCONNECT;
8463         }
8464         pci_set_master(pdev);
8465         pci_restore_state(pdev);
8466         pci_save_state(pdev);
8467
8468         if (netif_running(dev)) {
8469                 bnx2_set_power_state(bp, PCI_D0);
8470                 bnx2_init_nic(bp, 1);
8471         }
8472
8473         rtnl_unlock();
8474         return PCI_ERS_RESULT_RECOVERED;
8475 }
8476
8477 /**
8478  * bnx2_io_resume - called when traffic can start flowing again.
8479  * @pdev: Pointer to PCI device
8480  *
8481  * This callback is called when the error recovery driver tells us that
8482  * its OK to resume normal operation.
8483  */
8484 static void bnx2_io_resume(struct pci_dev *pdev)
8485 {
8486         struct net_device *dev = pci_get_drvdata(pdev);
8487         struct bnx2 *bp = netdev_priv(dev);
8488
8489         rtnl_lock();
8490         if (netif_running(dev))
8491                 bnx2_netif_start(bp, true);
8492
8493         netif_device_attach(dev);
8494         rtnl_unlock();
8495 }
8496
8497 static struct pci_error_handlers bnx2_err_handler = {
8498         .error_detected = bnx2_io_error_detected,
8499         .slot_reset     = bnx2_io_slot_reset,
8500         .resume         = bnx2_io_resume,
8501 };
8502
8503 static struct pci_driver bnx2_pci_driver = {
8504         .name           = DRV_MODULE_NAME,
8505         .id_table       = bnx2_pci_tbl,
8506         .probe          = bnx2_init_one,
8507         .remove         = __devexit_p(bnx2_remove_one),
8508         .suspend        = bnx2_suspend,
8509         .resume         = bnx2_resume,
8510         .err_handler    = &bnx2_err_handler,
8511 };
8512
8513 static int __init bnx2_init(void)
8514 {
8515         return pci_register_driver(&bnx2_pci_driver);
8516 }
8517
8518 static void __exit bnx2_cleanup(void)
8519 {
8520         pci_unregister_driver(&bnx2_pci_driver);
8521 }
8522
8523 module_init(bnx2_init);
8524 module_exit(bnx2_cleanup);
8525
8526
8527