77ba13520d87ae539854da167551892655b3ec3d
[cascardo/linux.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
59
60 #define DRV_MODULE_VERSION      "1.52.1-5"
61 #define DRV_MODULE_RELDATE      "2009/11/09"
62 #define BNX2X_BC_VER            0x040200
63
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
66 /* FW files */
67 #define FW_FILE_VERSION                                 \
68         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
69         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
70         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
71         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
74
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT              (5*HZ)
77
78 static char version[] __devinitdata =
79         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
88
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92                              "(0 Disable; 1 Enable (default))");
93
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97                                 " (default is as a number of CPUs)");
98
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102
103 static int int_mode;
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
107 static int dropless_fc;
108 module_param(dropless_fc, int, 0);
109 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
111 static int poll;
112 module_param(poll, int, 0);
113 MODULE_PARM_DESC(poll, " Use polling (for debug)");
114
115 static int mrrs = -1;
116 module_param(mrrs, int, 0);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
119 static int debug;
120 module_param(debug, int, 0);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
124
125 static struct workqueue_struct *bnx2x_wq;
126
127 enum bnx2x_board_type {
128         BCM57710 = 0,
129         BCM57711 = 1,
130         BCM57711E = 2,
131 };
132
133 /* indexed by board_type, above */
134 static struct {
135         char *name;
136 } board_info[] __devinitdata = {
137         { "Broadcom NetXtreme II BCM57710 XGb" },
138         { "Broadcom NetXtreme II BCM57711 XGb" },
139         { "Broadcom NetXtreme II BCM57711E XGb" }
140 };
141
142
143 static const struct pci_device_id bnx2x_pci_tbl[] = {
144         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147         { 0 }
148 };
149
150 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152 /****************************************************************************
153 * General service functions
154 ****************************************************************************/
155
156 /* used only at init
157  * locking is done by mcp
158  */
159 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
160 {
161         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164                                PCICFG_VENDOR_ID_OFFSET);
165 }
166
167 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 {
169         u32 val;
170
171         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174                                PCICFG_VENDOR_ID_OFFSET);
175
176         return val;
177 }
178
179 static const u32 dmae_reg_go_c[] = {
180         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184 };
185
186 /* copy command into DMAE command memory and set DMAE command go */
187 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188                             int idx)
189 {
190         u32 cmd_offset;
191         int i;
192
193         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
197                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
199         }
200         REG_WR(bp, dmae_reg_go_c[idx], 1);
201 }
202
203 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204                       u32 len32)
205 {
206         struct dmae_command dmae;
207         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
208         int cnt = 200;
209
210         if (!bp->dmae_ready) {
211                 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
214                    "  using indirect\n", dst_addr, len32);
215                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216                 return;
217         }
218
219         memset(&dmae, 0, sizeof(struct dmae_command));
220
221         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
224 #ifdef __BIG_ENDIAN
225                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
226 #else
227                        DMAE_CMD_ENDIANITY_DW_SWAP |
228 #endif
229                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231         dmae.src_addr_lo = U64_LO(dma_addr);
232         dmae.src_addr_hi = U64_HI(dma_addr);
233         dmae.dst_addr_lo = dst_addr >> 2;
234         dmae.dst_addr_hi = 0;
235         dmae.len = len32;
236         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238         dmae.comp_val = DMAE_COMP_VAL;
239
240         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
241            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
242                     "dst_addr [%x:%08x (%08x)]\n"
243            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
244            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
247         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
248            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
250
251         mutex_lock(&bp->dmae_mutex);
252
253         *wb_comp = 0;
254
255         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256
257         udelay(5);
258
259         while (*wb_comp != DMAE_COMP_VAL) {
260                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
262                 if (!cnt) {
263                         BNX2X_ERR("DMAE timeout!\n");
264                         break;
265                 }
266                 cnt--;
267                 /* adjust delay for emulation/FPGA */
268                 if (CHIP_REV_IS_SLOW(bp))
269                         msleep(100);
270                 else
271                         udelay(5);
272         }
273
274         mutex_unlock(&bp->dmae_mutex);
275 }
276
277 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
278 {
279         struct dmae_command dmae;
280         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
281         int cnt = 200;
282
283         if (!bp->dmae_ready) {
284                 u32 *data = bnx2x_sp(bp, wb_data[0]);
285                 int i;
286
287                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
288                    "  using indirect\n", src_addr, len32);
289                 for (i = 0; i < len32; i++)
290                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291                 return;
292         }
293
294         memset(&dmae, 0, sizeof(struct dmae_command));
295
296         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
299 #ifdef __BIG_ENDIAN
300                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
301 #else
302                        DMAE_CMD_ENDIANITY_DW_SWAP |
303 #endif
304                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306         dmae.src_addr_lo = src_addr >> 2;
307         dmae.src_addr_hi = 0;
308         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310         dmae.len = len32;
311         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313         dmae.comp_val = DMAE_COMP_VAL;
314
315         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
316            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
317                     "dst_addr [%x:%08x (%08x)]\n"
318            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
319            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
322
323         mutex_lock(&bp->dmae_mutex);
324
325         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
326         *wb_comp = 0;
327
328         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329
330         udelay(5);
331
332         while (*wb_comp != DMAE_COMP_VAL) {
333
334                 if (!cnt) {
335                         BNX2X_ERR("DMAE timeout!\n");
336                         break;
337                 }
338                 cnt--;
339                 /* adjust delay for emulation/FPGA */
340                 if (CHIP_REV_IS_SLOW(bp))
341                         msleep(100);
342                 else
343                         udelay(5);
344         }
345         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
348
349         mutex_unlock(&bp->dmae_mutex);
350 }
351
352 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353                                u32 addr, u32 len)
354 {
355         int offset = 0;
356
357         while (len > DMAE_LEN32_WR_MAX) {
358                 bnx2x_write_dmae(bp, phys_addr + offset,
359                                  addr + offset, DMAE_LEN32_WR_MAX);
360                 offset += DMAE_LEN32_WR_MAX * 4;
361                 len -= DMAE_LEN32_WR_MAX;
362         }
363
364         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365 }
366
367 /* used only for slowpath so not inlined */
368 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369 {
370         u32 wb_write[2];
371
372         wb_write[0] = val_hi;
373         wb_write[1] = val_lo;
374         REG_WR_DMAE(bp, reg, wb_write, 2);
375 }
376
377 #ifdef USE_WB_RD
378 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379 {
380         u32 wb_data[2];
381
382         REG_RD_DMAE(bp, reg, wb_data, 2);
383
384         return HILO_U64(wb_data[0], wb_data[1]);
385 }
386 #endif
387
388 static int bnx2x_mc_assert(struct bnx2x *bp)
389 {
390         char last_idx;
391         int i, rc = 0;
392         u32 row0, row1, row2, row3;
393
394         /* XSTORM */
395         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
397         if (last_idx)
398                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
399
400         /* print the asserts */
401         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
402
403                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404                               XSTORM_ASSERT_LIST_OFFSET(i));
405                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
411
412                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414                                   " 0x%08x 0x%08x 0x%08x\n",
415                                   i, row3, row2, row1, row0);
416                         rc++;
417                 } else {
418                         break;
419                 }
420         }
421
422         /* TSTORM */
423         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
425         if (last_idx)
426                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428         /* print the asserts */
429         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432                               TSTORM_ASSERT_LIST_OFFSET(i));
433                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442                                   " 0x%08x 0x%08x 0x%08x\n",
443                                   i, row3, row2, row1, row0);
444                         rc++;
445                 } else {
446                         break;
447                 }
448         }
449
450         /* CSTORM */
451         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
453         if (last_idx)
454                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456         /* print the asserts */
457         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460                               CSTORM_ASSERT_LIST_OFFSET(i));
461                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470                                   " 0x%08x 0x%08x 0x%08x\n",
471                                   i, row3, row2, row1, row0);
472                         rc++;
473                 } else {
474                         break;
475                 }
476         }
477
478         /* USTORM */
479         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480                            USTORM_ASSERT_LIST_INDEX_OFFSET);
481         if (last_idx)
482                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484         /* print the asserts */
485         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488                               USTORM_ASSERT_LIST_OFFSET(i));
489                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
491                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
493                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498                                   " 0x%08x 0x%08x 0x%08x\n",
499                                   i, row3, row2, row1, row0);
500                         rc++;
501                 } else {
502                         break;
503                 }
504         }
505
506         return rc;
507 }
508
509 static void bnx2x_fw_dump(struct bnx2x *bp)
510 {
511         u32 mark, offset;
512         __be32 data[9];
513         int word;
514
515         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
516         mark = ((mark + 0x3) & ~0x3);
517         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
518
519         printk(KERN_ERR PFX);
520         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521                 for (word = 0; word < 8; word++)
522                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523                                                   offset + 4*word));
524                 data[8] = 0x0;
525                 printk(KERN_CONT "%s", (char *)data);
526         }
527         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528                 for (word = 0; word < 8; word++)
529                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530                                                   offset + 4*word));
531                 data[8] = 0x0;
532                 printk(KERN_CONT "%s", (char *)data);
533         }
534         printk(KERN_ERR PFX "end of fw dump\n");
535 }
536
537 static void bnx2x_panic_dump(struct bnx2x *bp)
538 {
539         int i;
540         u16 j, start, end;
541
542         bp->stats_state = STATS_STATE_DISABLED;
543         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
545         BNX2X_ERR("begin crash dump -----------------\n");
546
547         /* Indices */
548         /* Common */
549         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
550                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
551                   "  spq_prod_idx(%u)\n",
552                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555         /* Rx */
556         for_each_queue(bp, i) {
557                 struct bnx2x_fastpath *fp = &bp->fp[i];
558
559                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
560                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
561                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
562                           i, fp->rx_bd_prod, fp->rx_bd_cons,
563                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
565                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
566                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
567                           fp->rx_sge_prod, fp->last_max_sge,
568                           le16_to_cpu(fp->fp_u_idx),
569                           fp->status_blk->u_status_block.status_block_index);
570         }
571
572         /* Tx */
573         for_each_queue(bp, i) {
574                 struct bnx2x_fastpath *fp = &bp->fp[i];
575
576                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
577                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
578                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
580                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
581                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
582                           fp->status_blk->c_status_block.status_block_index,
583                           fp->tx_db.data.prod);
584         }
585
586         /* Rings */
587         /* Rx */
588         for_each_queue(bp, i) {
589                 struct bnx2x_fastpath *fp = &bp->fp[i];
590
591                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
593                 for (j = start; j != end; j = RX_BD(j + 1)) {
594                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
597                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
598                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
599                 }
600
601                 start = RX_SGE(fp->rx_sge_prod);
602                 end = RX_SGE(fp->last_max_sge);
603                 for (j = start; j != end; j = RX_SGE(j + 1)) {
604                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
607                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
608                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
609                 }
610
611                 start = RCQ_BD(fp->rx_comp_cons - 10);
612                 end = RCQ_BD(fp->rx_comp_cons + 503);
613                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
614                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
616                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
618                 }
619         }
620
621         /* Tx */
622         for_each_queue(bp, i) {
623                 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627                 for (j = start; j != end; j = TX_BD(j + 1)) {
628                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
630                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631                                   i, j, sw_bd->skb, sw_bd->first_bd);
632                 }
633
634                 start = TX_BD(fp->tx_bd_cons - 10);
635                 end = TX_BD(fp->tx_bd_cons + 254);
636                 for (j = start; j != end; j = TX_BD(j + 1)) {
637                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
639                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
641                 }
642         }
643
644         bnx2x_fw_dump(bp);
645         bnx2x_mc_assert(bp);
646         BNX2X_ERR("end crash dump -----------------\n");
647 }
648
649 static void bnx2x_int_enable(struct bnx2x *bp)
650 {
651         int port = BP_PORT(bp);
652         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653         u32 val = REG_RD(bp, addr);
654         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
656
657         if (msix) {
658                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659                          HC_CONFIG_0_REG_INT_LINE_EN_0);
660                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
662         } else if (msi) {
663                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
667         } else {
668                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
671                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
672
673                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674                    val, port, addr);
675
676                 REG_WR(bp, addr, val);
677
678                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679         }
680
681         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
682            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
683
684         REG_WR(bp, addr, val);
685         /*
686          * Ensure that HC_CONFIG is written before leading/trailing edge config
687          */
688         mmiowb();
689         barrier();
690
691         if (CHIP_IS_E1H(bp)) {
692                 /* init leading/trailing edge */
693                 if (IS_E1HMF(bp)) {
694                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
695                         if (bp->port.pmf)
696                                 /* enable nig and gpio3 attention */
697                                 val |= 0x1100;
698                 } else
699                         val = 0xffff;
700
701                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703         }
704
705         /* Make sure that interrupts are indeed enabled from here on */
706         mmiowb();
707 }
708
709 static void bnx2x_int_disable(struct bnx2x *bp)
710 {
711         int port = BP_PORT(bp);
712         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713         u32 val = REG_RD(bp, addr);
714
715         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
718                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721            val, port, addr);
722
723         /* flush all outstanding writes */
724         mmiowb();
725
726         REG_WR(bp, addr, val);
727         if (REG_RD(bp, addr) != val)
728                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729 }
730
731 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
732 {
733         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
734         int i, offset;
735
736         /* disable interrupt handling */
737         atomic_inc(&bp->intr_sem);
738         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
740         if (disable_hw)
741                 /* prevent the HW from sending interrupts */
742                 bnx2x_int_disable(bp);
743
744         /* make sure all ISRs are done */
745         if (msix) {
746                 synchronize_irq(bp->msix_table[0].vector);
747                 offset = 1;
748 #ifdef BCM_CNIC
749                 offset++;
750 #endif
751                 for_each_queue(bp, i)
752                         synchronize_irq(bp->msix_table[i + offset].vector);
753         } else
754                 synchronize_irq(bp->pdev->irq);
755
756         /* make sure sp_task is not running */
757         cancel_delayed_work(&bp->sp_task);
758         flush_workqueue(bnx2x_wq);
759 }
760
761 /* fast path */
762
763 /*
764  * General service functions
765  */
766
767 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
768                                 u8 storm, u16 index, u8 op, u8 update)
769 {
770         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771                        COMMAND_REG_INT_ACK);
772         struct igu_ack_register igu_ack;
773
774         igu_ack.status_block_index = index;
775         igu_ack.sb_id_and_flags =
776                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
777                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
780
781         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782            (*(u32 *)&igu_ack), hc_addr);
783         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
784
785         /* Make sure that ACK is written */
786         mmiowb();
787         barrier();
788 }
789
790 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
791 {
792         struct host_status_block *fpsb = fp->status_blk;
793
794         barrier(); /* status block is written to by the chip */
795         fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796         fp->fp_u_idx = fpsb->u_status_block.status_block_index;
797 }
798
799 static u16 bnx2x_ack_int(struct bnx2x *bp)
800 {
801         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802                        COMMAND_REG_SIMD_MASK);
803         u32 result = REG_RD(bp, hc_addr);
804
805         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
806            result, hc_addr);
807
808         return result;
809 }
810
811
812 /*
813  * fast path service functions
814  */
815
816 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
817 {
818         /* Tell compiler that consumer and producer can change */
819         barrier();
820         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
821 }
822
823 /* free skb in the packet ring at pos idx
824  * return idx of last bd freed
825  */
826 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827                              u16 idx)
828 {
829         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
830         struct eth_tx_start_bd *tx_start_bd;
831         struct eth_tx_bd *tx_data_bd;
832         struct sk_buff *skb = tx_buf->skb;
833         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
834         int nbd;
835
836         /* prefetch skb end pointer to speedup dev_kfree_skb() */
837         prefetch(&skb->end);
838
839         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
840            idx, tx_buf, skb);
841
842         /* unmap first bd */
843         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
844         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
847
848         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
849 #ifdef BNX2X_STOP_ON_ERROR
850         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
851                 BNX2X_ERR("BAD nbd!\n");
852                 bnx2x_panic();
853         }
854 #endif
855         new_cons = nbd + tx_buf->first_bd;
856
857         /* Get the next bd */
858         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
859
860         /* Skip a parse bd... */
861         --nbd;
862         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863
864         /* ...and the TSO split header bd since they have no mapping */
865         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866                 --nbd;
867                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
868         }
869
870         /* now free frags */
871         while (nbd > 0) {
872
873                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
874                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
877                 if (--nbd)
878                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879         }
880
881         /* release skb */
882         WARN_ON(!skb);
883         dev_kfree_skb(skb);
884         tx_buf->first_bd = 0;
885         tx_buf->skb = NULL;
886
887         return new_cons;
888 }
889
890 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
891 {
892         s16 used;
893         u16 prod;
894         u16 cons;
895
896         barrier(); /* Tell compiler that prod and cons can change */
897         prod = fp->tx_bd_prod;
898         cons = fp->tx_bd_cons;
899
900         /* NUM_TX_RINGS = number of "next-page" entries
901            It will be used as a threshold */
902         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
903
904 #ifdef BNX2X_STOP_ON_ERROR
905         WARN_ON(used < 0);
906         WARN_ON(used > fp->bp->tx_ring_size);
907         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
908 #endif
909
910         return (s16)(fp->bp->tx_ring_size) - used;
911 }
912
913 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
914 {
915         u16 hw_cons;
916
917         /* Tell compiler that status block fields can change */
918         barrier();
919         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920         return hw_cons != fp->tx_pkt_cons;
921 }
922
923 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
924 {
925         struct bnx2x *bp = fp->bp;
926         struct netdev_queue *txq;
927         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
928
929 #ifdef BNX2X_STOP_ON_ERROR
930         if (unlikely(bp->panic))
931                 return -1;
932 #endif
933
934         txq = netdev_get_tx_queue(bp->dev, fp->index);
935         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
936         sw_cons = fp->tx_pkt_cons;
937
938         while (sw_cons != hw_cons) {
939                 u16 pkt_cons;
940
941                 pkt_cons = TX_BD(sw_cons);
942
943                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
944
945                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
946                    hw_cons, sw_cons, pkt_cons);
947
948 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
949                         rmb();
950                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
951                 }
952 */
953                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
954                 sw_cons++;
955         }
956
957         fp->tx_pkt_cons = sw_cons;
958         fp->tx_bd_cons = bd_cons;
959
960         /* TBD need a thresh? */
961         if (unlikely(netif_tx_queue_stopped(txq))) {
962
963                 /* Need to make the tx_bd_cons update visible to start_xmit()
964                  * before checking for netif_tx_queue_stopped().  Without the
965                  * memory barrier, there is a small possibility that
966                  * start_xmit() will miss it and cause the queue to be stopped
967                  * forever.
968                  */
969                 smp_mb();
970
971                 if ((netif_tx_queue_stopped(txq)) &&
972                     (bp->state == BNX2X_STATE_OPEN) &&
973                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
974                         netif_tx_wake_queue(txq);
975         }
976         return 0;
977 }
978
979 #ifdef BCM_CNIC
980 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
981 #endif
982
983 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
984                            union eth_rx_cqe *rr_cqe)
985 {
986         struct bnx2x *bp = fp->bp;
987         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
988         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
989
990         DP(BNX2X_MSG_SP,
991            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
992            fp->index, cid, command, bp->state,
993            rr_cqe->ramrod_cqe.ramrod_type);
994
995         bp->spq_left++;
996
997         if (fp->index) {
998                 switch (command | fp->state) {
999                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1000                                                 BNX2X_FP_STATE_OPENING):
1001                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1002                            cid);
1003                         fp->state = BNX2X_FP_STATE_OPEN;
1004                         break;
1005
1006                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1007                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1008                            cid);
1009                         fp->state = BNX2X_FP_STATE_HALTED;
1010                         break;
1011
1012                 default:
1013                         BNX2X_ERR("unexpected MC reply (%d)  "
1014                                   "fp->state is %x\n", command, fp->state);
1015                         break;
1016                 }
1017                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1018                 return;
1019         }
1020
1021         switch (command | bp->state) {
1022         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1023                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1024                 bp->state = BNX2X_STATE_OPEN;
1025                 break;
1026
1027         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1028                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1029                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1030                 fp->state = BNX2X_FP_STATE_HALTED;
1031                 break;
1032
1033         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1034                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1035                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1036                 break;
1037
1038 #ifdef BCM_CNIC
1039         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1040                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1041                 bnx2x_cnic_cfc_comp(bp, cid);
1042                 break;
1043 #endif
1044
1045         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1046         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1047                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1048                 bp->set_mac_pending--;
1049                 smp_wmb();
1050                 break;
1051
1052         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1053                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1054                 bp->set_mac_pending--;
1055                 smp_wmb();
1056                 break;
1057
1058         default:
1059                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1060                           command, bp->state);
1061                 break;
1062         }
1063         mb(); /* force bnx2x_wait_ramrod() to see the change */
1064 }
1065
1066 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1067                                      struct bnx2x_fastpath *fp, u16 index)
1068 {
1069         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070         struct page *page = sw_buf->page;
1071         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1072
1073         /* Skip "next page" elements */
1074         if (!page)
1075                 return;
1076
1077         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1078                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1079         __free_pages(page, PAGES_PER_SGE_SHIFT);
1080
1081         sw_buf->page = NULL;
1082         sge->addr_hi = 0;
1083         sge->addr_lo = 0;
1084 }
1085
1086 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1087                                            struct bnx2x_fastpath *fp, int last)
1088 {
1089         int i;
1090
1091         for (i = 0; i < last; i++)
1092                 bnx2x_free_rx_sge(bp, fp, i);
1093 }
1094
1095 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1096                                      struct bnx2x_fastpath *fp, u16 index)
1097 {
1098         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1099         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1100         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1101         dma_addr_t mapping;
1102
1103         if (unlikely(page == NULL))
1104                 return -ENOMEM;
1105
1106         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1107                                PCI_DMA_FROMDEVICE);
1108         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1109                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1110                 return -ENOMEM;
1111         }
1112
1113         sw_buf->page = page;
1114         pci_unmap_addr_set(sw_buf, mapping, mapping);
1115
1116         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1117         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1118
1119         return 0;
1120 }
1121
1122 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1123                                      struct bnx2x_fastpath *fp, u16 index)
1124 {
1125         struct sk_buff *skb;
1126         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1127         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1128         dma_addr_t mapping;
1129
1130         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1131         if (unlikely(skb == NULL))
1132                 return -ENOMEM;
1133
1134         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1135                                  PCI_DMA_FROMDEVICE);
1136         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1137                 dev_kfree_skb(skb);
1138                 return -ENOMEM;
1139         }
1140
1141         rx_buf->skb = skb;
1142         pci_unmap_addr_set(rx_buf, mapping, mapping);
1143
1144         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1145         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1146
1147         return 0;
1148 }
1149
1150 /* note that we are not allocating a new skb,
1151  * we are just moving one from cons to prod
1152  * we are not creating a new mapping,
1153  * so there is no need to check for dma_mapping_error().
1154  */
1155 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1156                                struct sk_buff *skb, u16 cons, u16 prod)
1157 {
1158         struct bnx2x *bp = fp->bp;
1159         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1160         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1161         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1162         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1163
1164         pci_dma_sync_single_for_device(bp->pdev,
1165                                        pci_unmap_addr(cons_rx_buf, mapping),
1166                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1167
1168         prod_rx_buf->skb = cons_rx_buf->skb;
1169         pci_unmap_addr_set(prod_rx_buf, mapping,
1170                            pci_unmap_addr(cons_rx_buf, mapping));
1171         *prod_bd = *cons_bd;
1172 }
1173
1174 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1175                                              u16 idx)
1176 {
1177         u16 last_max = fp->last_max_sge;
1178
1179         if (SUB_S16(idx, last_max) > 0)
1180                 fp->last_max_sge = idx;
1181 }
1182
1183 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1184 {
1185         int i, j;
1186
1187         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1188                 int idx = RX_SGE_CNT * i - 1;
1189
1190                 for (j = 0; j < 2; j++) {
1191                         SGE_MASK_CLEAR_BIT(fp, idx);
1192                         idx--;
1193                 }
1194         }
1195 }
1196
1197 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1198                                   struct eth_fast_path_rx_cqe *fp_cqe)
1199 {
1200         struct bnx2x *bp = fp->bp;
1201         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1202                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1203                       SGE_PAGE_SHIFT;
1204         u16 last_max, last_elem, first_elem;
1205         u16 delta = 0;
1206         u16 i;
1207
1208         if (!sge_len)
1209                 return;
1210
1211         /* First mark all used pages */
1212         for (i = 0; i < sge_len; i++)
1213                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1214
1215         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1216            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1217
1218         /* Here we assume that the last SGE index is the biggest */
1219         prefetch((void *)(fp->sge_mask));
1220         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1221
1222         last_max = RX_SGE(fp->last_max_sge);
1223         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1224         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1225
1226         /* If ring is not full */
1227         if (last_elem + 1 != first_elem)
1228                 last_elem++;
1229
1230         /* Now update the prod */
1231         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1232                 if (likely(fp->sge_mask[i]))
1233                         break;
1234
1235                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1236                 delta += RX_SGE_MASK_ELEM_SZ;
1237         }
1238
1239         if (delta > 0) {
1240                 fp->rx_sge_prod += delta;
1241                 /* clear page-end entries */
1242                 bnx2x_clear_sge_mask_next_elems(fp);
1243         }
1244
1245         DP(NETIF_MSG_RX_STATUS,
1246            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1247            fp->last_max_sge, fp->rx_sge_prod);
1248 }
1249
1250 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1251 {
1252         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1253         memset(fp->sge_mask, 0xff,
1254                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1255
1256         /* Clear the two last indices in the page to 1:
1257            these are the indices that correspond to the "next" element,
1258            hence will never be indicated and should be removed from
1259            the calculations. */
1260         bnx2x_clear_sge_mask_next_elems(fp);
1261 }
1262
1263 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1264                             struct sk_buff *skb, u16 cons, u16 prod)
1265 {
1266         struct bnx2x *bp = fp->bp;
1267         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1268         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1269         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1270         dma_addr_t mapping;
1271
1272         /* move empty skb from pool to prod and map it */
1273         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1274         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1275                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1276         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1277
1278         /* move partial skb from cons to pool (don't unmap yet) */
1279         fp->tpa_pool[queue] = *cons_rx_buf;
1280
1281         /* mark bin state as start - print error if current state != stop */
1282         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1283                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1284
1285         fp->tpa_state[queue] = BNX2X_TPA_START;
1286
1287         /* point prod_bd to new skb */
1288         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1289         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1290
1291 #ifdef BNX2X_STOP_ON_ERROR
1292         fp->tpa_queue_used |= (1 << queue);
1293 #ifdef __powerpc64__
1294         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1295 #else
1296         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1297 #endif
1298            fp->tpa_queue_used);
1299 #endif
1300 }
1301
1302 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1303                                struct sk_buff *skb,
1304                                struct eth_fast_path_rx_cqe *fp_cqe,
1305                                u16 cqe_idx)
1306 {
1307         struct sw_rx_page *rx_pg, old_rx_pg;
1308         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1309         u32 i, frag_len, frag_size, pages;
1310         int err;
1311         int j;
1312
1313         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1314         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1315
1316         /* This is needed in order to enable forwarding support */
1317         if (frag_size)
1318                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1319                                                max(frag_size, (u32)len_on_bd));
1320
1321 #ifdef BNX2X_STOP_ON_ERROR
1322         if (pages >
1323             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1324                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1325                           pages, cqe_idx);
1326                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1327                           fp_cqe->pkt_len, len_on_bd);
1328                 bnx2x_panic();
1329                 return -EINVAL;
1330         }
1331 #endif
1332
1333         /* Run through the SGL and compose the fragmented skb */
1334         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1335                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1336
1337                 /* FW gives the indices of the SGE as if the ring is an array
1338                    (meaning that "next" element will consume 2 indices) */
1339                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1340                 rx_pg = &fp->rx_page_ring[sge_idx];
1341                 old_rx_pg = *rx_pg;
1342
1343                 /* If we fail to allocate a substitute page, we simply stop
1344                    where we are and drop the whole packet */
1345                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1346                 if (unlikely(err)) {
1347                         fp->eth_q_stats.rx_skb_alloc_failed++;
1348                         return err;
1349                 }
1350
1351                 /* Unmap the page as we r going to pass it to the stack */
1352                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1353                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1354
1355                 /* Add one frag and update the appropriate fields in the skb */
1356                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1357
1358                 skb->data_len += frag_len;
1359                 skb->truesize += frag_len;
1360                 skb->len += frag_len;
1361
1362                 frag_size -= frag_len;
1363         }
1364
1365         return 0;
1366 }
1367
1368 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1369                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1370                            u16 cqe_idx)
1371 {
1372         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1373         struct sk_buff *skb = rx_buf->skb;
1374         /* alloc new skb */
1375         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1376
1377         /* Unmap skb in the pool anyway, as we are going to change
1378            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1379            fails. */
1380         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1381                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1382
1383         if (likely(new_skb)) {
1384                 /* fix ip xsum and give it to the stack */
1385                 /* (no need to map the new skb) */
1386 #ifdef BCM_VLAN
1387                 int is_vlan_cqe =
1388                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1389                          PARSING_FLAGS_VLAN);
1390                 int is_not_hwaccel_vlan_cqe =
1391                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1392 #endif
1393
1394                 prefetch(skb);
1395                 prefetch(((char *)(skb)) + 128);
1396
1397 #ifdef BNX2X_STOP_ON_ERROR
1398                 if (pad + len > bp->rx_buf_size) {
1399                         BNX2X_ERR("skb_put is about to fail...  "
1400                                   "pad %d  len %d  rx_buf_size %d\n",
1401                                   pad, len, bp->rx_buf_size);
1402                         bnx2x_panic();
1403                         return;
1404                 }
1405 #endif
1406
1407                 skb_reserve(skb, pad);
1408                 skb_put(skb, len);
1409
1410                 skb->protocol = eth_type_trans(skb, bp->dev);
1411                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1412
1413                 {
1414                         struct iphdr *iph;
1415
1416                         iph = (struct iphdr *)skb->data;
1417 #ifdef BCM_VLAN
1418                         /* If there is no Rx VLAN offloading -
1419                            take VLAN tag into an account */
1420                         if (unlikely(is_not_hwaccel_vlan_cqe))
1421                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1422 #endif
1423                         iph->check = 0;
1424                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1425                 }
1426
1427                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1428                                          &cqe->fast_path_cqe, cqe_idx)) {
1429 #ifdef BCM_VLAN
1430                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1431                             (!is_not_hwaccel_vlan_cqe))
1432                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1433                                                 le16_to_cpu(cqe->fast_path_cqe.
1434                                                             vlan_tag));
1435                         else
1436 #endif
1437                                 netif_receive_skb(skb);
1438                 } else {
1439                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1440                            " - dropping packet!\n");
1441                         dev_kfree_skb(skb);
1442                 }
1443
1444
1445                 /* put new skb in bin */
1446                 fp->tpa_pool[queue].skb = new_skb;
1447
1448         } else {
1449                 /* else drop the packet and keep the buffer in the bin */
1450                 DP(NETIF_MSG_RX_STATUS,
1451                    "Failed to allocate new skb - dropping packet!\n");
1452                 fp->eth_q_stats.rx_skb_alloc_failed++;
1453         }
1454
1455         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1456 }
1457
1458 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1459                                         struct bnx2x_fastpath *fp,
1460                                         u16 bd_prod, u16 rx_comp_prod,
1461                                         u16 rx_sge_prod)
1462 {
1463         struct ustorm_eth_rx_producers rx_prods = {0};
1464         int i;
1465
1466         /* Update producers */
1467         rx_prods.bd_prod = bd_prod;
1468         rx_prods.cqe_prod = rx_comp_prod;
1469         rx_prods.sge_prod = rx_sge_prod;
1470
1471         /*
1472          * Make sure that the BD and SGE data is updated before updating the
1473          * producers since FW might read the BD/SGE right after the producer
1474          * is updated.
1475          * This is only applicable for weak-ordered memory model archs such
1476          * as IA-64. The following barrier is also mandatory since FW will
1477          * assumes BDs must have buffers.
1478          */
1479         wmb();
1480
1481         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1482                 REG_WR(bp, BAR_USTRORM_INTMEM +
1483                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1484                        ((u32 *)&rx_prods)[i]);
1485
1486         mmiowb(); /* keep prod updates ordered */
1487
1488         DP(NETIF_MSG_RX_STATUS,
1489            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1490            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1491 }
1492
1493 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1494 {
1495         struct bnx2x *bp = fp->bp;
1496         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1497         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1498         int rx_pkt = 0;
1499
1500 #ifdef BNX2X_STOP_ON_ERROR
1501         if (unlikely(bp->panic))
1502                 return 0;
1503 #endif
1504
1505         /* CQ "next element" is of the size of the regular element,
1506            that's why it's ok here */
1507         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1508         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1509                 hw_comp_cons++;
1510
1511         bd_cons = fp->rx_bd_cons;
1512         bd_prod = fp->rx_bd_prod;
1513         bd_prod_fw = bd_prod;
1514         sw_comp_cons = fp->rx_comp_cons;
1515         sw_comp_prod = fp->rx_comp_prod;
1516
1517         /* Memory barrier necessary as speculative reads of the rx
1518          * buffer can be ahead of the index in the status block
1519          */
1520         rmb();
1521
1522         DP(NETIF_MSG_RX_STATUS,
1523            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1524            fp->index, hw_comp_cons, sw_comp_cons);
1525
1526         while (sw_comp_cons != hw_comp_cons) {
1527                 struct sw_rx_bd *rx_buf = NULL;
1528                 struct sk_buff *skb;
1529                 union eth_rx_cqe *cqe;
1530                 u8 cqe_fp_flags;
1531                 u16 len, pad;
1532
1533                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1534                 bd_prod = RX_BD(bd_prod);
1535                 bd_cons = RX_BD(bd_cons);
1536
1537                 /* Prefetch the page containing the BD descriptor
1538                    at producer's index. It will be needed when new skb is
1539                    allocated */
1540                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1541                                              (&fp->rx_desc_ring[bd_prod])) -
1542                                   PAGE_SIZE + 1));
1543
1544                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1545                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1546
1547                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1548                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1549                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1550                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1551                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1552                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1553
1554                 /* is this a slowpath msg? */
1555                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1556                         bnx2x_sp_event(fp, cqe);
1557                         goto next_cqe;
1558
1559                 /* this is an rx packet */
1560                 } else {
1561                         rx_buf = &fp->rx_buf_ring[bd_cons];
1562                         skb = rx_buf->skb;
1563                         prefetch(skb);
1564                         prefetch((u8 *)skb + 256);
1565                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1566                         pad = cqe->fast_path_cqe.placement_offset;
1567
1568                         /* If CQE is marked both TPA_START and TPA_END
1569                            it is a non-TPA CQE */
1570                         if ((!fp->disable_tpa) &&
1571                             (TPA_TYPE(cqe_fp_flags) !=
1572                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1573                                 u16 queue = cqe->fast_path_cqe.queue_index;
1574
1575                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1576                                         DP(NETIF_MSG_RX_STATUS,
1577                                            "calling tpa_start on queue %d\n",
1578                                            queue);
1579
1580                                         bnx2x_tpa_start(fp, queue, skb,
1581                                                         bd_cons, bd_prod);
1582                                         goto next_rx;
1583                                 }
1584
1585                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1586                                         DP(NETIF_MSG_RX_STATUS,
1587                                            "calling tpa_stop on queue %d\n",
1588                                            queue);
1589
1590                                         if (!BNX2X_RX_SUM_FIX(cqe))
1591                                                 BNX2X_ERR("STOP on none TCP "
1592                                                           "data\n");
1593
1594                                         /* This is a size of the linear data
1595                                            on this skb */
1596                                         len = le16_to_cpu(cqe->fast_path_cqe.
1597                                                                 len_on_bd);
1598                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1599                                                     len, cqe, comp_ring_cons);
1600 #ifdef BNX2X_STOP_ON_ERROR
1601                                         if (bp->panic)
1602                                                 return 0;
1603 #endif
1604
1605                                         bnx2x_update_sge_prod(fp,
1606                                                         &cqe->fast_path_cqe);
1607                                         goto next_cqe;
1608                                 }
1609                         }
1610
1611                         pci_dma_sync_single_for_device(bp->pdev,
1612                                         pci_unmap_addr(rx_buf, mapping),
1613                                                        pad + RX_COPY_THRESH,
1614                                                        PCI_DMA_FROMDEVICE);
1615                         prefetch(skb);
1616                         prefetch(((char *)(skb)) + 128);
1617
1618                         /* is this an error packet? */
1619                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1620                                 DP(NETIF_MSG_RX_ERR,
1621                                    "ERROR  flags %x  rx packet %u\n",
1622                                    cqe_fp_flags, sw_comp_cons);
1623                                 fp->eth_q_stats.rx_err_discard_pkt++;
1624                                 goto reuse_rx;
1625                         }
1626
1627                         /* Since we don't have a jumbo ring
1628                          * copy small packets if mtu > 1500
1629                          */
1630                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1631                             (len <= RX_COPY_THRESH)) {
1632                                 struct sk_buff *new_skb;
1633
1634                                 new_skb = netdev_alloc_skb(bp->dev,
1635                                                            len + pad);
1636                                 if (new_skb == NULL) {
1637                                         DP(NETIF_MSG_RX_ERR,
1638                                            "ERROR  packet dropped "
1639                                            "because of alloc failure\n");
1640                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1641                                         goto reuse_rx;
1642                                 }
1643
1644                                 /* aligned copy */
1645                                 skb_copy_from_linear_data_offset(skb, pad,
1646                                                     new_skb->data + pad, len);
1647                                 skb_reserve(new_skb, pad);
1648                                 skb_put(new_skb, len);
1649
1650                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1651
1652                                 skb = new_skb;
1653
1654                         } else
1655                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1656                                 pci_unmap_single(bp->pdev,
1657                                         pci_unmap_addr(rx_buf, mapping),
1658                                                  bp->rx_buf_size,
1659                                                  PCI_DMA_FROMDEVICE);
1660                                 skb_reserve(skb, pad);
1661                                 skb_put(skb, len);
1662
1663                         } else {
1664                                 DP(NETIF_MSG_RX_ERR,
1665                                    "ERROR  packet dropped because "
1666                                    "of alloc failure\n");
1667                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1668 reuse_rx:
1669                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1670                                 goto next_rx;
1671                         }
1672
1673                         skb->protocol = eth_type_trans(skb, bp->dev);
1674
1675                         skb->ip_summed = CHECKSUM_NONE;
1676                         if (bp->rx_csum) {
1677                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1678                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1679                                 else
1680                                         fp->eth_q_stats.hw_csum_err++;
1681                         }
1682                 }
1683
1684                 skb_record_rx_queue(skb, fp->index);
1685
1686 #ifdef BCM_VLAN
1687                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1688                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1689                      PARSING_FLAGS_VLAN))
1690                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1691                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1692                 else
1693 #endif
1694                         netif_receive_skb(skb);
1695
1696
1697 next_rx:
1698                 rx_buf->skb = NULL;
1699
1700                 bd_cons = NEXT_RX_IDX(bd_cons);
1701                 bd_prod = NEXT_RX_IDX(bd_prod);
1702                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1703                 rx_pkt++;
1704 next_cqe:
1705                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1706                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1707
1708                 if (rx_pkt == budget)
1709                         break;
1710         } /* while */
1711
1712         fp->rx_bd_cons = bd_cons;
1713         fp->rx_bd_prod = bd_prod_fw;
1714         fp->rx_comp_cons = sw_comp_cons;
1715         fp->rx_comp_prod = sw_comp_prod;
1716
1717         /* Update producers */
1718         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1719                              fp->rx_sge_prod);
1720
1721         fp->rx_pkt += rx_pkt;
1722         fp->rx_calls++;
1723
1724         return rx_pkt;
1725 }
1726
1727 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1728 {
1729         struct bnx2x_fastpath *fp = fp_cookie;
1730         struct bnx2x *bp = fp->bp;
1731
1732         /* Return here if interrupt is disabled */
1733         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1734                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1735                 return IRQ_HANDLED;
1736         }
1737
1738         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1739            fp->index, fp->sb_id);
1740         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1741
1742 #ifdef BNX2X_STOP_ON_ERROR
1743         if (unlikely(bp->panic))
1744                 return IRQ_HANDLED;
1745 #endif
1746
1747         /* Handle Rx and Tx according to MSI-X vector */
1748         prefetch(fp->rx_cons_sb);
1749         prefetch(fp->tx_cons_sb);
1750         prefetch(&fp->status_blk->u_status_block.status_block_index);
1751         prefetch(&fp->status_blk->c_status_block.status_block_index);
1752         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1753
1754         return IRQ_HANDLED;
1755 }
1756
1757 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1758 {
1759         struct bnx2x *bp = netdev_priv(dev_instance);
1760         u16 status = bnx2x_ack_int(bp);
1761         u16 mask;
1762         int i;
1763
1764         /* Return here if interrupt is shared and it's not for us */
1765         if (unlikely(status == 0)) {
1766                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1767                 return IRQ_NONE;
1768         }
1769         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1770
1771         /* Return here if interrupt is disabled */
1772         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1773                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1774                 return IRQ_HANDLED;
1775         }
1776
1777 #ifdef BNX2X_STOP_ON_ERROR
1778         if (unlikely(bp->panic))
1779                 return IRQ_HANDLED;
1780 #endif
1781
1782         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1783                 struct bnx2x_fastpath *fp = &bp->fp[i];
1784
1785                 mask = 0x2 << fp->sb_id;
1786                 if (status & mask) {
1787                         /* Handle Rx and Tx according to SB id */
1788                         prefetch(fp->rx_cons_sb);
1789                         prefetch(&fp->status_blk->u_status_block.
1790                                                 status_block_index);
1791                         prefetch(fp->tx_cons_sb);
1792                         prefetch(&fp->status_blk->c_status_block.
1793                                                 status_block_index);
1794                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1795                         status &= ~mask;
1796                 }
1797         }
1798
1799 #ifdef BCM_CNIC
1800         mask = 0x2 << CNIC_SB_ID(bp);
1801         if (status & (mask | 0x1)) {
1802                 struct cnic_ops *c_ops = NULL;
1803
1804                 rcu_read_lock();
1805                 c_ops = rcu_dereference(bp->cnic_ops);
1806                 if (c_ops)
1807                         c_ops->cnic_handler(bp->cnic_data, NULL);
1808                 rcu_read_unlock();
1809
1810                 status &= ~mask;
1811         }
1812 #endif
1813
1814         if (unlikely(status & 0x1)) {
1815                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1816
1817                 status &= ~0x1;
1818                 if (!status)
1819                         return IRQ_HANDLED;
1820         }
1821
1822         if (status)
1823                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1824                    status);
1825
1826         return IRQ_HANDLED;
1827 }
1828
1829 /* end of fast path */
1830
1831 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1832
1833 /* Link */
1834
1835 /*
1836  * General service functions
1837  */
1838
1839 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1840 {
1841         u32 lock_status;
1842         u32 resource_bit = (1 << resource);
1843         int func = BP_FUNC(bp);
1844         u32 hw_lock_control_reg;
1845         int cnt;
1846
1847         /* Validating that the resource is within range */
1848         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1849                 DP(NETIF_MSG_HW,
1850                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1851                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1852                 return -EINVAL;
1853         }
1854
1855         if (func <= 5) {
1856                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1857         } else {
1858                 hw_lock_control_reg =
1859                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1860         }
1861
1862         /* Validating that the resource is not already taken */
1863         lock_status = REG_RD(bp, hw_lock_control_reg);
1864         if (lock_status & resource_bit) {
1865                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1866                    lock_status, resource_bit);
1867                 return -EEXIST;
1868         }
1869
1870         /* Try for 5 second every 5ms */
1871         for (cnt = 0; cnt < 1000; cnt++) {
1872                 /* Try to acquire the lock */
1873                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1874                 lock_status = REG_RD(bp, hw_lock_control_reg);
1875                 if (lock_status & resource_bit)
1876                         return 0;
1877
1878                 msleep(5);
1879         }
1880         DP(NETIF_MSG_HW, "Timeout\n");
1881         return -EAGAIN;
1882 }
1883
1884 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1885 {
1886         u32 lock_status;
1887         u32 resource_bit = (1 << resource);
1888         int func = BP_FUNC(bp);
1889         u32 hw_lock_control_reg;
1890
1891         /* Validating that the resource is within range */
1892         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1893                 DP(NETIF_MSG_HW,
1894                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1895                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1896                 return -EINVAL;
1897         }
1898
1899         if (func <= 5) {
1900                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1901         } else {
1902                 hw_lock_control_reg =
1903                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1904         }
1905
1906         /* Validating that the resource is currently taken */
1907         lock_status = REG_RD(bp, hw_lock_control_reg);
1908         if (!(lock_status & resource_bit)) {
1909                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1910                    lock_status, resource_bit);
1911                 return -EFAULT;
1912         }
1913
1914         REG_WR(bp, hw_lock_control_reg, resource_bit);
1915         return 0;
1916 }
1917
1918 /* HW Lock for shared dual port PHYs */
1919 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1920 {
1921         mutex_lock(&bp->port.phy_mutex);
1922
1923         if (bp->port.need_hw_lock)
1924                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1925 }
1926
1927 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1928 {
1929         if (bp->port.need_hw_lock)
1930                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1931
1932         mutex_unlock(&bp->port.phy_mutex);
1933 }
1934
1935 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1936 {
1937         /* The GPIO should be swapped if swap register is set and active */
1938         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940         int gpio_shift = gpio_num +
1941                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942         u32 gpio_mask = (1 << gpio_shift);
1943         u32 gpio_reg;
1944         int value;
1945
1946         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1947                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1948                 return -EINVAL;
1949         }
1950
1951         /* read GPIO value */
1952         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1953
1954         /* get the requested pin value */
1955         if ((gpio_reg & gpio_mask) == gpio_mask)
1956                 value = 1;
1957         else
1958                 value = 0;
1959
1960         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1961
1962         return value;
1963 }
1964
1965 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1966 {
1967         /* The GPIO should be swapped if swap register is set and active */
1968         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1969                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1970         int gpio_shift = gpio_num +
1971                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972         u32 gpio_mask = (1 << gpio_shift);
1973         u32 gpio_reg;
1974
1975         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1976                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1977                 return -EINVAL;
1978         }
1979
1980         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1981         /* read GPIO and mask except the float bits */
1982         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1983
1984         switch (mode) {
1985         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1986                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1987                    gpio_num, gpio_shift);
1988                 /* clear FLOAT and set CLR */
1989                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1990                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1991                 break;
1992
1993         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1994                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1995                    gpio_num, gpio_shift);
1996                 /* clear FLOAT and set SET */
1997                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1998                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1999                 break;
2000
2001         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2002                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2003                    gpio_num, gpio_shift);
2004                 /* set FLOAT */
2005                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2006                 break;
2007
2008         default:
2009                 break;
2010         }
2011
2012         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2013         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2014
2015         return 0;
2016 }
2017
2018 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2019 {
2020         /* The GPIO should be swapped if swap register is set and active */
2021         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2022                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2023         int gpio_shift = gpio_num +
2024                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2025         u32 gpio_mask = (1 << gpio_shift);
2026         u32 gpio_reg;
2027
2028         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2029                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2030                 return -EINVAL;
2031         }
2032
2033         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2034         /* read GPIO int */
2035         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2036
2037         switch (mode) {
2038         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2039                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2040                                    "output low\n", gpio_num, gpio_shift);
2041                 /* clear SET and set CLR */
2042                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2043                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2044                 break;
2045
2046         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2047                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2048                                    "output high\n", gpio_num, gpio_shift);
2049                 /* clear CLR and set SET */
2050                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2051                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2052                 break;
2053
2054         default:
2055                 break;
2056         }
2057
2058         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2059         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2060
2061         return 0;
2062 }
2063
2064 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2065 {
2066         u32 spio_mask = (1 << spio_num);
2067         u32 spio_reg;
2068
2069         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2070             (spio_num > MISC_REGISTERS_SPIO_7)) {
2071                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2072                 return -EINVAL;
2073         }
2074
2075         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2076         /* read SPIO and mask except the float bits */
2077         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2078
2079         switch (mode) {
2080         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2081                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2082                 /* clear FLOAT and set CLR */
2083                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2084                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2085                 break;
2086
2087         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2088                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2089                 /* clear FLOAT and set SET */
2090                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2091                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2092                 break;
2093
2094         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2095                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2096                 /* set FLOAT */
2097                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2098                 break;
2099
2100         default:
2101                 break;
2102         }
2103
2104         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2105         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2106
2107         return 0;
2108 }
2109
2110 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2111 {
2112         switch (bp->link_vars.ieee_fc &
2113                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2114         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2115                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2116                                           ADVERTISED_Pause);
2117                 break;
2118
2119         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2120                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2121                                          ADVERTISED_Pause);
2122                 break;
2123
2124         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2125                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2126                 break;
2127
2128         default:
2129                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2130                                           ADVERTISED_Pause);
2131                 break;
2132         }
2133 }
2134
2135 static void bnx2x_link_report(struct bnx2x *bp)
2136 {
2137         if (bp->flags & MF_FUNC_DIS) {
2138                 netif_carrier_off(bp->dev);
2139                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2140                 return;
2141         }
2142
2143         if (bp->link_vars.link_up) {
2144                 u16 line_speed;
2145
2146                 if (bp->state == BNX2X_STATE_OPEN)
2147                         netif_carrier_on(bp->dev);
2148                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2149
2150                 line_speed = bp->link_vars.line_speed;
2151                 if (IS_E1HMF(bp)) {
2152                         u16 vn_max_rate;
2153
2154                         vn_max_rate =
2155                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2156                                  FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2157                         if (vn_max_rate < line_speed)
2158                                 line_speed = vn_max_rate;
2159                 }
2160                 printk("%d Mbps ", line_speed);
2161
2162                 if (bp->link_vars.duplex == DUPLEX_FULL)
2163                         printk("full duplex");
2164                 else
2165                         printk("half duplex");
2166
2167                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2168                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2169                                 printk(", receive ");
2170                                 if (bp->link_vars.flow_ctrl &
2171                                     BNX2X_FLOW_CTRL_TX)
2172                                         printk("& transmit ");
2173                         } else {
2174                                 printk(", transmit ");
2175                         }
2176                         printk("flow control ON");
2177                 }
2178                 printk("\n");
2179
2180         } else { /* link_down */
2181                 netif_carrier_off(bp->dev);
2182                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2183         }
2184 }
2185
2186 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2187 {
2188         if (!BP_NOMCP(bp)) {
2189                 u8 rc;
2190
2191                 /* Initialize link parameters structure variables */
2192                 /* It is recommended to turn off RX FC for jumbo frames
2193                    for better performance */
2194                 if (bp->dev->mtu > 5000)
2195                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2196                 else
2197                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2198
2199                 bnx2x_acquire_phy_lock(bp);
2200
2201                 if (load_mode == LOAD_DIAG)
2202                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2203
2204                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2205
2206                 bnx2x_release_phy_lock(bp);
2207
2208                 bnx2x_calc_fc_adv(bp);
2209
2210                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2211                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2212                         bnx2x_link_report(bp);
2213                 }
2214
2215                 return rc;
2216         }
2217         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2218         return -EINVAL;
2219 }
2220
2221 static void bnx2x_link_set(struct bnx2x *bp)
2222 {
2223         if (!BP_NOMCP(bp)) {
2224                 bnx2x_acquire_phy_lock(bp);
2225                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2226                 bnx2x_release_phy_lock(bp);
2227
2228                 bnx2x_calc_fc_adv(bp);
2229         } else
2230                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2231 }
2232
2233 static void bnx2x__link_reset(struct bnx2x *bp)
2234 {
2235         if (!BP_NOMCP(bp)) {
2236                 bnx2x_acquire_phy_lock(bp);
2237                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2238                 bnx2x_release_phy_lock(bp);
2239         } else
2240                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2241 }
2242
2243 static u8 bnx2x_link_test(struct bnx2x *bp)
2244 {
2245         u8 rc;
2246
2247         bnx2x_acquire_phy_lock(bp);
2248         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2249         bnx2x_release_phy_lock(bp);
2250
2251         return rc;
2252 }
2253
2254 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2255 {
2256         u32 r_param = bp->link_vars.line_speed / 8;
2257         u32 fair_periodic_timeout_usec;
2258         u32 t_fair;
2259
2260         memset(&(bp->cmng.rs_vars), 0,
2261                sizeof(struct rate_shaping_vars_per_port));
2262         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2263
2264         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2265         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2266
2267         /* this is the threshold below which no timer arming will occur
2268            1.25 coefficient is for the threshold to be a little bigger
2269            than the real time, to compensate for timer in-accuracy */
2270         bp->cmng.rs_vars.rs_threshold =
2271                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2272
2273         /* resolution of fairness timer */
2274         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2275         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2276         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2277
2278         /* this is the threshold below which we won't arm the timer anymore */
2279         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2280
2281         /* we multiply by 1e3/8 to get bytes/msec.
2282            We don't want the credits to pass a credit
2283            of the t_fair*FAIR_MEM (algorithm resolution) */
2284         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2285         /* since each tick is 4 usec */
2286         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2287 }
2288
2289 /* Calculates the sum of vn_min_rates.
2290    It's needed for further normalizing of the min_rates.
2291    Returns:
2292      sum of vn_min_rates.
2293        or
2294      0 - if all the min_rates are 0.
2295      In the later case fainess algorithm should be deactivated.
2296      If not all min_rates are zero then those that are zeroes will be set to 1.
2297  */
2298 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2299 {
2300         int all_zero = 1;
2301         int port = BP_PORT(bp);
2302         int vn;
2303
2304         bp->vn_weight_sum = 0;
2305         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2306                 int func = 2*vn + port;
2307                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2308                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2309                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2310
2311                 /* Skip hidden vns */
2312                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2313                         continue;
2314
2315                 /* If min rate is zero - set it to 1 */
2316                 if (!vn_min_rate)
2317                         vn_min_rate = DEF_MIN_RATE;
2318                 else
2319                         all_zero = 0;
2320
2321                 bp->vn_weight_sum += vn_min_rate;
2322         }
2323
2324         /* ... only if all min rates are zeros - disable fairness */
2325         if (all_zero) {
2326                 bp->cmng.flags.cmng_enables &=
2327                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2328                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2329                    "  fairness will be disabled\n");
2330         } else
2331                 bp->cmng.flags.cmng_enables |=
2332                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2333 }
2334
2335 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2336 {
2337         struct rate_shaping_vars_per_vn m_rs_vn;
2338         struct fairness_vars_per_vn m_fair_vn;
2339         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2340         u16 vn_min_rate, vn_max_rate;
2341         int i;
2342
2343         /* If function is hidden - set min and max to zeroes */
2344         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2345                 vn_min_rate = 0;
2346                 vn_max_rate = 0;
2347
2348         } else {
2349                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2350                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2351                 /* If min rate is zero - set it to 1 */
2352                 if (!vn_min_rate)
2353                         vn_min_rate = DEF_MIN_RATE;
2354                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2355                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2356         }
2357         DP(NETIF_MSG_IFUP,
2358            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2359            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2360
2361         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2362         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2363
2364         /* global vn counter - maximal Mbps for this vn */
2365         m_rs_vn.vn_counter.rate = vn_max_rate;
2366
2367         /* quota - number of bytes transmitted in this period */
2368         m_rs_vn.vn_counter.quota =
2369                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2370
2371         if (bp->vn_weight_sum) {
2372                 /* credit for each period of the fairness algorithm:
2373                    number of bytes in T_FAIR (the vn share the port rate).
2374                    vn_weight_sum should not be larger than 10000, thus
2375                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2376                    than zero */
2377                 m_fair_vn.vn_credit_delta =
2378                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2379                                                  (8 * bp->vn_weight_sum))),
2380                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2381                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2382                    m_fair_vn.vn_credit_delta);
2383         }
2384
2385         /* Store it to internal memory */
2386         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2387                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2388                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2389                        ((u32 *)(&m_rs_vn))[i]);
2390
2391         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2392                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2393                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2394                        ((u32 *)(&m_fair_vn))[i]);
2395 }
2396
2397
2398 /* This function is called upon link interrupt */
2399 static void bnx2x_link_attn(struct bnx2x *bp)
2400 {
2401         /* Make sure that we are synced with the current statistics */
2402         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2403
2404         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2405
2406         if (bp->link_vars.link_up) {
2407
2408                 /* dropless flow control */
2409                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2410                         int port = BP_PORT(bp);
2411                         u32 pause_enabled = 0;
2412
2413                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2414                                 pause_enabled = 1;
2415
2416                         REG_WR(bp, BAR_USTRORM_INTMEM +
2417                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2418                                pause_enabled);
2419                 }
2420
2421                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2422                         struct host_port_stats *pstats;
2423
2424                         pstats = bnx2x_sp(bp, port_stats);
2425                         /* reset old bmac stats */
2426                         memset(&(pstats->mac_stx[0]), 0,
2427                                sizeof(struct mac_stx));
2428                 }
2429                 if (bp->state == BNX2X_STATE_OPEN)
2430                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2431         }
2432
2433         /* indicate link status */
2434         bnx2x_link_report(bp);
2435
2436         if (IS_E1HMF(bp)) {
2437                 int port = BP_PORT(bp);
2438                 int func;
2439                 int vn;
2440
2441                 /* Set the attention towards other drivers on the same port */
2442                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2443                         if (vn == BP_E1HVN(bp))
2444                                 continue;
2445
2446                         func = ((vn << 1) | port);
2447                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2448                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2449                 }
2450
2451                 if (bp->link_vars.link_up) {
2452                         int i;
2453
2454                         /* Init rate shaping and fairness contexts */
2455                         bnx2x_init_port_minmax(bp);
2456
2457                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2458                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2459
2460                         /* Store it to internal memory */
2461                         for (i = 0;
2462                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2463                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2464                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2465                                        ((u32 *)(&bp->cmng))[i]);
2466                 }
2467         }
2468 }
2469
2470 static void bnx2x__link_status_update(struct bnx2x *bp)
2471 {
2472         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2473                 return;
2474
2475         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2476
2477         if (bp->link_vars.link_up)
2478                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2479         else
2480                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2481
2482         bnx2x_calc_vn_weight_sum(bp);
2483
2484         /* indicate link status */
2485         bnx2x_link_report(bp);
2486 }
2487
2488 static void bnx2x_pmf_update(struct bnx2x *bp)
2489 {
2490         int port = BP_PORT(bp);
2491         u32 val;
2492
2493         bp->port.pmf = 1;
2494         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2495
2496         /* enable nig attention */
2497         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2498         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2499         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2500
2501         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2502 }
2503
2504 /* end of Link */
2505
2506 /* slow path */
2507
2508 /*
2509  * General service functions
2510  */
2511
2512 /* send the MCP a request, block until there is a reply */
2513 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2514 {
2515         int func = BP_FUNC(bp);
2516         u32 seq = ++bp->fw_seq;
2517         u32 rc = 0;
2518         u32 cnt = 1;
2519         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2520
2521         mutex_lock(&bp->fw_mb_mutex);
2522         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2523         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2524
2525         do {
2526                 /* let the FW do it's magic ... */
2527                 msleep(delay);
2528
2529                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2530
2531                 /* Give the FW up to 5 second (500*10ms) */
2532         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2533
2534         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2535            cnt*delay, rc, seq);
2536
2537         /* is this a reply to our command? */
2538         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2539                 rc &= FW_MSG_CODE_MASK;
2540         else {
2541                 /* FW BUG! */
2542                 BNX2X_ERR("FW failed to respond!\n");
2543                 bnx2x_fw_dump(bp);
2544                 rc = 0;
2545         }
2546         mutex_unlock(&bp->fw_mb_mutex);
2547
2548         return rc;
2549 }
2550
2551 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2552 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2553 static void bnx2x_set_rx_mode(struct net_device *dev);
2554
2555 static void bnx2x_e1h_disable(struct bnx2x *bp)
2556 {
2557         int port = BP_PORT(bp);
2558
2559         netif_tx_disable(bp->dev);
2560
2561         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2562
2563         netif_carrier_off(bp->dev);
2564 }
2565
2566 static void bnx2x_e1h_enable(struct bnx2x *bp)
2567 {
2568         int port = BP_PORT(bp);
2569
2570         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2571
2572         /* Tx queue should be only reenabled */
2573         netif_tx_wake_all_queues(bp->dev);
2574
2575         /*
2576          * Should not call netif_carrier_on since it will be called if the link
2577          * is up when checking for link state
2578          */
2579 }
2580
2581 static void bnx2x_update_min_max(struct bnx2x *bp)
2582 {
2583         int port = BP_PORT(bp);
2584         int vn, i;
2585
2586         /* Init rate shaping and fairness contexts */
2587         bnx2x_init_port_minmax(bp);
2588
2589         bnx2x_calc_vn_weight_sum(bp);
2590
2591         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2592                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2593
2594         if (bp->port.pmf) {
2595                 int func;
2596
2597                 /* Set the attention towards other drivers on the same port */
2598                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2599                         if (vn == BP_E1HVN(bp))
2600                                 continue;
2601
2602                         func = ((vn << 1) | port);
2603                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2604                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2605                 }
2606
2607                 /* Store it to internal memory */
2608                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2609                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2610                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2611                                ((u32 *)(&bp->cmng))[i]);
2612         }
2613 }
2614
2615 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2616 {
2617         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2618
2619         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2620
2621                 /*
2622                  * This is the only place besides the function initialization
2623                  * where the bp->flags can change so it is done without any
2624                  * locks
2625                  */
2626                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2627                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2628                         bp->flags |= MF_FUNC_DIS;
2629
2630                         bnx2x_e1h_disable(bp);
2631                 } else {
2632                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2633                         bp->flags &= ~MF_FUNC_DIS;
2634
2635                         bnx2x_e1h_enable(bp);
2636                 }
2637                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2638         }
2639         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2640
2641                 bnx2x_update_min_max(bp);
2642                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2643         }
2644
2645         /* Report results to MCP */
2646         if (dcc_event)
2647                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2648         else
2649                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2650 }
2651
2652 /* must be called under the spq lock */
2653 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2654 {
2655         struct eth_spe *next_spe = bp->spq_prod_bd;
2656
2657         if (bp->spq_prod_bd == bp->spq_last_bd) {
2658                 bp->spq_prod_bd = bp->spq;
2659                 bp->spq_prod_idx = 0;
2660                 DP(NETIF_MSG_TIMER, "end of spq\n");
2661         } else {
2662                 bp->spq_prod_bd++;
2663                 bp->spq_prod_idx++;
2664         }
2665         return next_spe;
2666 }
2667
2668 /* must be called under the spq lock */
2669 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2670 {
2671         int func = BP_FUNC(bp);
2672
2673         /* Make sure that BD data is updated before writing the producer */
2674         wmb();
2675
2676         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2677                bp->spq_prod_idx);
2678         mmiowb();
2679 }
2680
2681 /* the slow path queue is odd since completions arrive on the fastpath ring */
2682 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2683                          u32 data_hi, u32 data_lo, int common)
2684 {
2685         struct eth_spe *spe;
2686
2687         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2688            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2689            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2690            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2691            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2692
2693 #ifdef BNX2X_STOP_ON_ERROR
2694         if (unlikely(bp->panic))
2695                 return -EIO;
2696 #endif
2697
2698         spin_lock_bh(&bp->spq_lock);
2699
2700         if (!bp->spq_left) {
2701                 BNX2X_ERR("BUG! SPQ ring full!\n");
2702                 spin_unlock_bh(&bp->spq_lock);
2703                 bnx2x_panic();
2704                 return -EBUSY;
2705         }
2706
2707         spe = bnx2x_sp_get_next(bp);
2708
2709         /* CID needs port number to be encoded int it */
2710         spe->hdr.conn_and_cmd_data =
2711                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2712                                      HW_CID(bp, cid)));
2713         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2714         if (common)
2715                 spe->hdr.type |=
2716                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2717
2718         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2719         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2720
2721         bp->spq_left--;
2722
2723         bnx2x_sp_prod_update(bp);
2724         spin_unlock_bh(&bp->spq_lock);
2725         return 0;
2726 }
2727
2728 /* acquire split MCP access lock register */
2729 static int bnx2x_acquire_alr(struct bnx2x *bp)
2730 {
2731         u32 i, j, val;
2732         int rc = 0;
2733
2734         might_sleep();
2735         i = 100;
2736         for (j = 0; j < i*10; j++) {
2737                 val = (1UL << 31);
2738                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2739                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2740                 if (val & (1L << 31))
2741                         break;
2742
2743                 msleep(5);
2744         }
2745         if (!(val & (1L << 31))) {
2746                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2747                 rc = -EBUSY;
2748         }
2749
2750         return rc;
2751 }
2752
2753 /* release split MCP access lock register */
2754 static void bnx2x_release_alr(struct bnx2x *bp)
2755 {
2756         u32 val = 0;
2757
2758         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2759 }
2760
2761 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2762 {
2763         struct host_def_status_block *def_sb = bp->def_status_blk;
2764         u16 rc = 0;
2765
2766         barrier(); /* status block is written to by the chip */
2767         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2768                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2769                 rc |= 1;
2770         }
2771         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2772                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2773                 rc |= 2;
2774         }
2775         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2776                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2777                 rc |= 4;
2778         }
2779         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2780                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2781                 rc |= 8;
2782         }
2783         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2784                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2785                 rc |= 16;
2786         }
2787         return rc;
2788 }
2789
2790 /*
2791  * slow path service functions
2792  */
2793
2794 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2795 {
2796         int port = BP_PORT(bp);
2797         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2798                        COMMAND_REG_ATTN_BITS_SET);
2799         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2800                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2801         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2802                                        NIG_REG_MASK_INTERRUPT_PORT0;
2803         u32 aeu_mask;
2804         u32 nig_mask = 0;
2805
2806         if (bp->attn_state & asserted)
2807                 BNX2X_ERR("IGU ERROR\n");
2808
2809         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2810         aeu_mask = REG_RD(bp, aeu_addr);
2811
2812         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2813            aeu_mask, asserted);
2814         aeu_mask &= ~(asserted & 0xff);
2815         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2816
2817         REG_WR(bp, aeu_addr, aeu_mask);
2818         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2819
2820         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2821         bp->attn_state |= asserted;
2822         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2823
2824         if (asserted & ATTN_HARD_WIRED_MASK) {
2825                 if (asserted & ATTN_NIG_FOR_FUNC) {
2826
2827                         bnx2x_acquire_phy_lock(bp);
2828
2829                         /* save nig interrupt mask */
2830                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2831                         REG_WR(bp, nig_int_mask_addr, 0);
2832
2833                         bnx2x_link_attn(bp);
2834
2835                         /* handle unicore attn? */
2836                 }
2837                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2838                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2839
2840                 if (asserted & GPIO_2_FUNC)
2841                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2842
2843                 if (asserted & GPIO_3_FUNC)
2844                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2845
2846                 if (asserted & GPIO_4_FUNC)
2847                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2848
2849                 if (port == 0) {
2850                         if (asserted & ATTN_GENERAL_ATTN_1) {
2851                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2852                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2853                         }
2854                         if (asserted & ATTN_GENERAL_ATTN_2) {
2855                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2856                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2857                         }
2858                         if (asserted & ATTN_GENERAL_ATTN_3) {
2859                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2860                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2861                         }
2862                 } else {
2863                         if (asserted & ATTN_GENERAL_ATTN_4) {
2864                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2865                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2866                         }
2867                         if (asserted & ATTN_GENERAL_ATTN_5) {
2868                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2869                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2870                         }
2871                         if (asserted & ATTN_GENERAL_ATTN_6) {
2872                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2873                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2874                         }
2875                 }
2876
2877         } /* if hardwired */
2878
2879         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2880            asserted, hc_addr);
2881         REG_WR(bp, hc_addr, asserted);
2882
2883         /* now set back the mask */
2884         if (asserted & ATTN_NIG_FOR_FUNC) {
2885                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2886                 bnx2x_release_phy_lock(bp);
2887         }
2888 }
2889
2890 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2891 {
2892         int port = BP_PORT(bp);
2893
2894         /* mark the failure */
2895         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2896         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2897         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2898                  bp->link_params.ext_phy_config);
2899
2900         /* log the failure */
2901         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2902                " the driver to shutdown the card to prevent permanent"
2903                " damage.  Please contact Dell Support for assistance\n",
2904                bp->dev->name);
2905 }
2906
2907 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2908 {
2909         int port = BP_PORT(bp);
2910         int reg_offset;
2911         u32 val, swap_val, swap_override;
2912
2913         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2914                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2915
2916         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2917
2918                 val = REG_RD(bp, reg_offset);
2919                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2920                 REG_WR(bp, reg_offset, val);
2921
2922                 BNX2X_ERR("SPIO5 hw attention\n");
2923
2924                 /* Fan failure attention */
2925                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2926                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2927                         /* Low power mode is controlled by GPIO 2 */
2928                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2929                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2930                         /* The PHY reset is controlled by GPIO 1 */
2931                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2932                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2933                         break;
2934
2935                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2936                         /* The PHY reset is controlled by GPIO 1 */
2937                         /* fake the port number to cancel the swap done in
2938                            set_gpio() */
2939                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2940                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2941                         port = (swap_val && swap_override) ^ 1;
2942                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2943                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2944                         break;
2945
2946                 default:
2947                         break;
2948                 }
2949                 bnx2x_fan_failure(bp);
2950         }
2951
2952         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2953                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2954                 bnx2x_acquire_phy_lock(bp);
2955                 bnx2x_handle_module_detect_int(&bp->link_params);
2956                 bnx2x_release_phy_lock(bp);
2957         }
2958
2959         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2960
2961                 val = REG_RD(bp, reg_offset);
2962                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2963                 REG_WR(bp, reg_offset, val);
2964
2965                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2966                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2967                 bnx2x_panic();
2968         }
2969 }
2970
2971 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2972 {
2973         u32 val;
2974
2975         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2976
2977                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2978                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2979                 /* DORQ discard attention */
2980                 if (val & 0x2)
2981                         BNX2X_ERR("FATAL error from DORQ\n");
2982         }
2983
2984         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2985
2986                 int port = BP_PORT(bp);
2987                 int reg_offset;
2988
2989                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2990                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2991
2992                 val = REG_RD(bp, reg_offset);
2993                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2994                 REG_WR(bp, reg_offset, val);
2995
2996                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2997                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2998                 bnx2x_panic();
2999         }
3000 }
3001
3002 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3003 {
3004         u32 val;
3005
3006         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3007
3008                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3009                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3010                 /* CFC error attention */
3011                 if (val & 0x2)
3012                         BNX2X_ERR("FATAL error from CFC\n");
3013         }
3014
3015         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3016
3017                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3018                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3019                 /* RQ_USDMDP_FIFO_OVERFLOW */
3020                 if (val & 0x18000)
3021                         BNX2X_ERR("FATAL error from PXP\n");
3022         }
3023
3024         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3025
3026                 int port = BP_PORT(bp);
3027                 int reg_offset;
3028
3029                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3030                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3031
3032                 val = REG_RD(bp, reg_offset);
3033                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3034                 REG_WR(bp, reg_offset, val);
3035
3036                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3037                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3038                 bnx2x_panic();
3039         }
3040 }
3041
3042 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3043 {
3044         u32 val;
3045
3046         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3047
3048                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3049                         int func = BP_FUNC(bp);
3050
3051                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3052                         bp->mf_config = SHMEM_RD(bp,
3053                                            mf_cfg.func_mf_config[func].config);
3054                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3055                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3056                                 bnx2x_dcc_event(bp,
3057                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3058                         bnx2x__link_status_update(bp);
3059                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3060                                 bnx2x_pmf_update(bp);
3061
3062                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3063
3064                         BNX2X_ERR("MC assert!\n");
3065                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3066                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3067                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3068                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3069                         bnx2x_panic();
3070
3071                 } else if (attn & BNX2X_MCP_ASSERT) {
3072
3073                         BNX2X_ERR("MCP assert!\n");
3074                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3075                         bnx2x_fw_dump(bp);
3076
3077                 } else
3078                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3079         }
3080
3081         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3082                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3083                 if (attn & BNX2X_GRC_TIMEOUT) {
3084                         val = CHIP_IS_E1H(bp) ?
3085                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3086                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3087                 }
3088                 if (attn & BNX2X_GRC_RSV) {
3089                         val = CHIP_IS_E1H(bp) ?
3090                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3091                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3092                 }
3093                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3094         }
3095 }
3096
3097 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3098 {
3099         struct attn_route attn;
3100         struct attn_route group_mask;
3101         int port = BP_PORT(bp);
3102         int index;
3103         u32 reg_addr;
3104         u32 val;
3105         u32 aeu_mask;
3106
3107         /* need to take HW lock because MCP or other port might also
3108            try to handle this event */
3109         bnx2x_acquire_alr(bp);
3110
3111         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3112         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3113         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3114         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3115         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3116            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3117
3118         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3119                 if (deasserted & (1 << index)) {
3120                         group_mask = bp->attn_group[index];
3121
3122                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3123                            index, group_mask.sig[0], group_mask.sig[1],
3124                            group_mask.sig[2], group_mask.sig[3]);
3125
3126                         bnx2x_attn_int_deasserted3(bp,
3127                                         attn.sig[3] & group_mask.sig[3]);
3128                         bnx2x_attn_int_deasserted1(bp,
3129                                         attn.sig[1] & group_mask.sig[1]);
3130                         bnx2x_attn_int_deasserted2(bp,
3131                                         attn.sig[2] & group_mask.sig[2]);
3132                         bnx2x_attn_int_deasserted0(bp,
3133                                         attn.sig[0] & group_mask.sig[0]);
3134
3135                         if ((attn.sig[0] & group_mask.sig[0] &
3136                                                 HW_PRTY_ASSERT_SET_0) ||
3137                             (attn.sig[1] & group_mask.sig[1] &
3138                                                 HW_PRTY_ASSERT_SET_1) ||
3139                             (attn.sig[2] & group_mask.sig[2] &
3140                                                 HW_PRTY_ASSERT_SET_2))
3141                                 BNX2X_ERR("FATAL HW block parity attention\n");
3142                 }
3143         }
3144
3145         bnx2x_release_alr(bp);
3146
3147         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3148
3149         val = ~deasserted;
3150         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3151            val, reg_addr);
3152         REG_WR(bp, reg_addr, val);
3153
3154         if (~bp->attn_state & deasserted)
3155                 BNX2X_ERR("IGU ERROR\n");
3156
3157         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3158                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3159
3160         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3161         aeu_mask = REG_RD(bp, reg_addr);
3162
3163         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3164            aeu_mask, deasserted);
3165         aeu_mask |= (deasserted & 0xff);
3166         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3167
3168         REG_WR(bp, reg_addr, aeu_mask);
3169         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3170
3171         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3172         bp->attn_state &= ~deasserted;
3173         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3174 }
3175
3176 static void bnx2x_attn_int(struct bnx2x *bp)
3177 {
3178         /* read local copy of bits */
3179         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3180                                                                 attn_bits);
3181         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3182                                                                 attn_bits_ack);
3183         u32 attn_state = bp->attn_state;
3184
3185         /* look for changed bits */
3186         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3187         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3188
3189         DP(NETIF_MSG_HW,
3190            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3191            attn_bits, attn_ack, asserted, deasserted);
3192
3193         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3194                 BNX2X_ERR("BAD attention state\n");
3195
3196         /* handle bits that were raised */
3197         if (asserted)
3198                 bnx2x_attn_int_asserted(bp, asserted);
3199
3200         if (deasserted)
3201                 bnx2x_attn_int_deasserted(bp, deasserted);
3202 }
3203
3204 static void bnx2x_sp_task(struct work_struct *work)
3205 {
3206         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3207         u16 status;
3208
3209
3210         /* Return here if interrupt is disabled */
3211         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3212                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3213                 return;
3214         }
3215
3216         status = bnx2x_update_dsb_idx(bp);
3217 /*      if (status == 0)                                     */
3218 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3219
3220         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3221
3222         /* HW attentions */
3223         if (status & 0x1)
3224                 bnx2x_attn_int(bp);
3225
3226         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3227                      IGU_INT_NOP, 1);
3228         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3229                      IGU_INT_NOP, 1);
3230         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3231                      IGU_INT_NOP, 1);
3232         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3233                      IGU_INT_NOP, 1);
3234         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3235                      IGU_INT_ENABLE, 1);
3236
3237 }
3238
3239 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3240 {
3241         struct net_device *dev = dev_instance;
3242         struct bnx2x *bp = netdev_priv(dev);
3243
3244         /* Return here if interrupt is disabled */
3245         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3246                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3247                 return IRQ_HANDLED;
3248         }
3249
3250         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3251
3252 #ifdef BNX2X_STOP_ON_ERROR
3253         if (unlikely(bp->panic))
3254                 return IRQ_HANDLED;
3255 #endif
3256
3257 #ifdef BCM_CNIC
3258         {
3259                 struct cnic_ops *c_ops;
3260
3261                 rcu_read_lock();
3262                 c_ops = rcu_dereference(bp->cnic_ops);
3263                 if (c_ops)
3264                         c_ops->cnic_handler(bp->cnic_data, NULL);
3265                 rcu_read_unlock();
3266         }
3267 #endif
3268         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3269
3270         return IRQ_HANDLED;
3271 }
3272
3273 /* end of slow path */
3274
3275 /* Statistics */
3276
3277 /****************************************************************************
3278 * Macros
3279 ****************************************************************************/
3280
3281 /* sum[hi:lo] += add[hi:lo] */
3282 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3283         do { \
3284                 s_lo += a_lo; \
3285                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3286         } while (0)
3287
3288 /* difference = minuend - subtrahend */
3289 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3290         do { \
3291                 if (m_lo < s_lo) { \
3292                         /* underflow */ \
3293                         d_hi = m_hi - s_hi; \
3294                         if (d_hi > 0) { \
3295                                 /* we can 'loan' 1 */ \
3296                                 d_hi--; \
3297                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3298                         } else { \
3299                                 /* m_hi <= s_hi */ \
3300                                 d_hi = 0; \
3301                                 d_lo = 0; \
3302                         } \
3303                 } else { \
3304                         /* m_lo >= s_lo */ \
3305                         if (m_hi < s_hi) { \
3306                                 d_hi = 0; \
3307                                 d_lo = 0; \
3308                         } else { \
3309                                 /* m_hi >= s_hi */ \
3310                                 d_hi = m_hi - s_hi; \
3311                                 d_lo = m_lo - s_lo; \
3312                         } \
3313                 } \
3314         } while (0)
3315
3316 #define UPDATE_STAT64(s, t) \
3317         do { \
3318                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3319                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3320                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3321                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3322                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3323                        pstats->mac_stx[1].t##_lo, diff.lo); \
3324         } while (0)
3325
3326 #define UPDATE_STAT64_NIG(s, t) \
3327         do { \
3328                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3329                         diff.lo, new->s##_lo, old->s##_lo); \
3330                 ADD_64(estats->t##_hi, diff.hi, \
3331                        estats->t##_lo, diff.lo); \
3332         } while (0)
3333
3334 /* sum[hi:lo] += add */
3335 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3336         do { \
3337                 s_lo += a; \
3338                 s_hi += (s_lo < a) ? 1 : 0; \
3339         } while (0)
3340
3341 #define UPDATE_EXTEND_STAT(s) \
3342         do { \
3343                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3344                               pstats->mac_stx[1].s##_lo, \
3345                               new->s); \
3346         } while (0)
3347
3348 #define UPDATE_EXTEND_TSTAT(s, t) \
3349         do { \
3350                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3351                 old_tclient->s = tclient->s; \
3352                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3353         } while (0)
3354
3355 #define UPDATE_EXTEND_USTAT(s, t) \
3356         do { \
3357                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3358                 old_uclient->s = uclient->s; \
3359                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3360         } while (0)
3361
3362 #define UPDATE_EXTEND_XSTAT(s, t) \
3363         do { \
3364                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3365                 old_xclient->s = xclient->s; \
3366                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3367         } while (0)
3368
3369 /* minuend -= subtrahend */
3370 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3371         do { \
3372                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3373         } while (0)
3374
3375 /* minuend[hi:lo] -= subtrahend */
3376 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3377         do { \
3378                 SUB_64(m_hi, 0, m_lo, s); \
3379         } while (0)
3380
3381 #define SUB_EXTEND_USTAT(s, t) \
3382         do { \
3383                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3384                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3385         } while (0)
3386
3387 /*
3388  * General service functions
3389  */
3390
3391 static inline long bnx2x_hilo(u32 *hiref)
3392 {
3393         u32 lo = *(hiref + 1);
3394 #if (BITS_PER_LONG == 64)
3395         u32 hi = *hiref;
3396
3397         return HILO_U64(hi, lo);
3398 #else
3399         return lo;
3400 #endif
3401 }
3402
3403 /*
3404  * Init service functions
3405  */
3406
3407 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3408 {
3409         if (!bp->stats_pending) {
3410                 struct eth_query_ramrod_data ramrod_data = {0};
3411                 int i, rc;
3412
3413                 ramrod_data.drv_counter = bp->stats_counter++;
3414                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3415                 for_each_queue(bp, i)
3416                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3417
3418                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3419                                    ((u32 *)&ramrod_data)[1],
3420                                    ((u32 *)&ramrod_data)[0], 0);
3421                 if (rc == 0) {
3422                         /* stats ramrod has it's own slot on the spq */
3423                         bp->spq_left++;
3424                         bp->stats_pending = 1;
3425                 }
3426         }
3427 }
3428
3429 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3430 {
3431         struct dmae_command *dmae = &bp->stats_dmae;
3432         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3433
3434         *stats_comp = DMAE_COMP_VAL;
3435         if (CHIP_REV_IS_SLOW(bp))
3436                 return;
3437
3438         /* loader */
3439         if (bp->executer_idx) {
3440                 int loader_idx = PMF_DMAE_C(bp);
3441
3442                 memset(dmae, 0, sizeof(struct dmae_command));
3443
3444                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3445                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3446                                 DMAE_CMD_DST_RESET |
3447 #ifdef __BIG_ENDIAN
3448                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3449 #else
3450                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3451 #endif
3452                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3453                                                DMAE_CMD_PORT_0) |
3454                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3455                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3456                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3457                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3458                                      sizeof(struct dmae_command) *
3459                                      (loader_idx + 1)) >> 2;
3460                 dmae->dst_addr_hi = 0;
3461                 dmae->len = sizeof(struct dmae_command) >> 2;
3462                 if (CHIP_IS_E1(bp))
3463                         dmae->len--;
3464                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3465                 dmae->comp_addr_hi = 0;
3466                 dmae->comp_val = 1;
3467
3468                 *stats_comp = 0;
3469                 bnx2x_post_dmae(bp, dmae, loader_idx);
3470
3471         } else if (bp->func_stx) {
3472                 *stats_comp = 0;
3473                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3474         }
3475 }
3476
3477 static int bnx2x_stats_comp(struct bnx2x *bp)
3478 {
3479         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3480         int cnt = 10;
3481
3482         might_sleep();
3483         while (*stats_comp != DMAE_COMP_VAL) {
3484                 if (!cnt) {
3485                         BNX2X_ERR("timeout waiting for stats finished\n");
3486                         break;
3487                 }
3488                 cnt--;
3489                 msleep(1);
3490         }
3491         return 1;
3492 }
3493
3494 /*
3495  * Statistics service functions
3496  */
3497
3498 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3499 {
3500         struct dmae_command *dmae;
3501         u32 opcode;
3502         int loader_idx = PMF_DMAE_C(bp);
3503         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3504
3505         /* sanity */
3506         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3507                 BNX2X_ERR("BUG!\n");
3508                 return;
3509         }
3510
3511         bp->executer_idx = 0;
3512
3513         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3514                   DMAE_CMD_C_ENABLE |
3515                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3516 #ifdef __BIG_ENDIAN
3517                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3518 #else
3519                   DMAE_CMD_ENDIANITY_DW_SWAP |
3520 #endif
3521                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3522                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3523
3524         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3525         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3526         dmae->src_addr_lo = bp->port.port_stx >> 2;
3527         dmae->src_addr_hi = 0;
3528         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3529         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3530         dmae->len = DMAE_LEN32_RD_MAX;
3531         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3532         dmae->comp_addr_hi = 0;
3533         dmae->comp_val = 1;
3534
3535         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3536         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3537         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3538         dmae->src_addr_hi = 0;
3539         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3540                                    DMAE_LEN32_RD_MAX * 4);
3541         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3542                                    DMAE_LEN32_RD_MAX * 4);
3543         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3544         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3545         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3546         dmae->comp_val = DMAE_COMP_VAL;
3547
3548         *stats_comp = 0;
3549         bnx2x_hw_stats_post(bp);
3550         bnx2x_stats_comp(bp);
3551 }
3552
3553 static void bnx2x_port_stats_init(struct bnx2x *bp)
3554 {
3555         struct dmae_command *dmae;
3556         int port = BP_PORT(bp);
3557         int vn = BP_E1HVN(bp);
3558         u32 opcode;
3559         int loader_idx = PMF_DMAE_C(bp);
3560         u32 mac_addr;
3561         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3562
3563         /* sanity */
3564         if (!bp->link_vars.link_up || !bp->port.pmf) {
3565                 BNX2X_ERR("BUG!\n");
3566                 return;
3567         }
3568
3569         bp->executer_idx = 0;
3570
3571         /* MCP */
3572         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3573                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3574                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3575 #ifdef __BIG_ENDIAN
3576                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3577 #else
3578                   DMAE_CMD_ENDIANITY_DW_SWAP |
3579 #endif
3580                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3581                   (vn << DMAE_CMD_E1HVN_SHIFT));
3582
3583         if (bp->port.port_stx) {
3584
3585                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3586                 dmae->opcode = opcode;
3587                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3588                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3589                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3590                 dmae->dst_addr_hi = 0;
3591                 dmae->len = sizeof(struct host_port_stats) >> 2;
3592                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3593                 dmae->comp_addr_hi = 0;
3594                 dmae->comp_val = 1;
3595         }
3596
3597         if (bp->func_stx) {
3598
3599                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3600                 dmae->opcode = opcode;
3601                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3602                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3603                 dmae->dst_addr_lo = bp->func_stx >> 2;
3604                 dmae->dst_addr_hi = 0;
3605                 dmae->len = sizeof(struct host_func_stats) >> 2;
3606                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3607                 dmae->comp_addr_hi = 0;
3608                 dmae->comp_val = 1;
3609         }
3610
3611         /* MAC */
3612         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3613                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3614                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3615 #ifdef __BIG_ENDIAN
3616                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3617 #else
3618                   DMAE_CMD_ENDIANITY_DW_SWAP |
3619 #endif
3620                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3621                   (vn << DMAE_CMD_E1HVN_SHIFT));
3622
3623         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3624
3625                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3626                                    NIG_REG_INGRESS_BMAC0_MEM);
3627
3628                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3629                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3630                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3631                 dmae->opcode = opcode;
3632                 dmae->src_addr_lo = (mac_addr +
3633                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3634                 dmae->src_addr_hi = 0;
3635                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3636                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3637                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3638                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3639                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3640                 dmae->comp_addr_hi = 0;
3641                 dmae->comp_val = 1;
3642
3643                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3644                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3645                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3646                 dmae->opcode = opcode;
3647                 dmae->src_addr_lo = (mac_addr +
3648                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3649                 dmae->src_addr_hi = 0;
3650                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3651                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3652                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3653                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3654                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3655                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3656                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3657                 dmae->comp_addr_hi = 0;
3658                 dmae->comp_val = 1;
3659
3660         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3661
3662                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3663
3664                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3665                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3666                 dmae->opcode = opcode;
3667                 dmae->src_addr_lo = (mac_addr +
3668                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3669                 dmae->src_addr_hi = 0;
3670                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3671                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3672                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3673                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3674                 dmae->comp_addr_hi = 0;
3675                 dmae->comp_val = 1;
3676
3677                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3678                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3679                 dmae->opcode = opcode;
3680                 dmae->src_addr_lo = (mac_addr +
3681                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3682                 dmae->src_addr_hi = 0;
3683                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3684                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3685                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3686                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3687                 dmae->len = 1;
3688                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3689                 dmae->comp_addr_hi = 0;
3690                 dmae->comp_val = 1;
3691
3692                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3693                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3694                 dmae->opcode = opcode;
3695                 dmae->src_addr_lo = (mac_addr +
3696                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3697                 dmae->src_addr_hi = 0;
3698                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3699                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3700                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3701                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3702                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3703                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3704                 dmae->comp_addr_hi = 0;
3705                 dmae->comp_val = 1;
3706         }
3707
3708         /* NIG */
3709         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3710         dmae->opcode = opcode;
3711         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3712                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3713         dmae->src_addr_hi = 0;
3714         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3715         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3716         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3717         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3718         dmae->comp_addr_hi = 0;
3719         dmae->comp_val = 1;
3720
3721         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3722         dmae->opcode = opcode;
3723         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3724                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3725         dmae->src_addr_hi = 0;
3726         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3727                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3728         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3729                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3730         dmae->len = (2*sizeof(u32)) >> 2;
3731         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3732         dmae->comp_addr_hi = 0;
3733         dmae->comp_val = 1;
3734
3735         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3736         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3737                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3738                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3739 #ifdef __BIG_ENDIAN
3740                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3741 #else
3742                         DMAE_CMD_ENDIANITY_DW_SWAP |
3743 #endif
3744                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3745                         (vn << DMAE_CMD_E1HVN_SHIFT));
3746         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3747                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3748         dmae->src_addr_hi = 0;
3749         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3750                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3751         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3752                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3753         dmae->len = (2*sizeof(u32)) >> 2;
3754         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3755         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3756         dmae->comp_val = DMAE_COMP_VAL;
3757
3758         *stats_comp = 0;
3759 }
3760
3761 static void bnx2x_func_stats_init(struct bnx2x *bp)
3762 {
3763         struct dmae_command *dmae = &bp->stats_dmae;
3764         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3765
3766         /* sanity */
3767         if (!bp->func_stx) {
3768                 BNX2X_ERR("BUG!\n");
3769                 return;
3770         }
3771
3772         bp->executer_idx = 0;
3773         memset(dmae, 0, sizeof(struct dmae_command));
3774
3775         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3776                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3777                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3778 #ifdef __BIG_ENDIAN
3779                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3780 #else
3781                         DMAE_CMD_ENDIANITY_DW_SWAP |
3782 #endif
3783                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3784                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3785         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3786         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3787         dmae->dst_addr_lo = bp->func_stx >> 2;
3788         dmae->dst_addr_hi = 0;
3789         dmae->len = sizeof(struct host_func_stats) >> 2;
3790         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3791         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3792         dmae->comp_val = DMAE_COMP_VAL;
3793
3794         *stats_comp = 0;
3795 }
3796
3797 static void bnx2x_stats_start(struct bnx2x *bp)
3798 {
3799         if (bp->port.pmf)
3800                 bnx2x_port_stats_init(bp);
3801
3802         else if (bp->func_stx)
3803                 bnx2x_func_stats_init(bp);
3804
3805         bnx2x_hw_stats_post(bp);
3806         bnx2x_storm_stats_post(bp);
3807 }
3808
3809 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3810 {
3811         bnx2x_stats_comp(bp);
3812         bnx2x_stats_pmf_update(bp);
3813         bnx2x_stats_start(bp);
3814 }
3815
3816 static void bnx2x_stats_restart(struct bnx2x *bp)
3817 {
3818         bnx2x_stats_comp(bp);
3819         bnx2x_stats_start(bp);
3820 }
3821
3822 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3823 {
3824         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3825         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3826         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3827         struct {
3828                 u32 lo;
3829                 u32 hi;
3830         } diff;
3831
3832         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3833         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3834         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3835         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3836         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3837         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3838         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3839         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3840         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3841         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3842         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3843         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3844         UPDATE_STAT64(tx_stat_gt127,
3845                                 tx_stat_etherstatspkts65octetsto127octets);
3846         UPDATE_STAT64(tx_stat_gt255,
3847                                 tx_stat_etherstatspkts128octetsto255octets);
3848         UPDATE_STAT64(tx_stat_gt511,
3849                                 tx_stat_etherstatspkts256octetsto511octets);
3850         UPDATE_STAT64(tx_stat_gt1023,
3851                                 tx_stat_etherstatspkts512octetsto1023octets);
3852         UPDATE_STAT64(tx_stat_gt1518,
3853                                 tx_stat_etherstatspkts1024octetsto1522octets);
3854         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3855         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3856         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3857         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3858         UPDATE_STAT64(tx_stat_gterr,
3859                                 tx_stat_dot3statsinternalmactransmiterrors);
3860         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3861
3862         estats->pause_frames_received_hi =
3863                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3864         estats->pause_frames_received_lo =
3865                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3866
3867         estats->pause_frames_sent_hi =
3868                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3869         estats->pause_frames_sent_lo =
3870                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3871 }
3872
3873 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3874 {
3875         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3876         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3877         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3878
3879         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3880         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3881         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3882         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3883         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3884         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3885         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3886         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3887         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3888         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3889         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3890         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3891         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3892         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3893         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3894         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3895         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3896         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3897         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3898         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3899         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3900         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3901         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3902         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3903         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3904         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3905         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3906         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3907         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3908         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3909         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3910
3911         estats->pause_frames_received_hi =
3912                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3913         estats->pause_frames_received_lo =
3914                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3915         ADD_64(estats->pause_frames_received_hi,
3916                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3917                estats->pause_frames_received_lo,
3918                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3919
3920         estats->pause_frames_sent_hi =
3921                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3922         estats->pause_frames_sent_lo =
3923                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3924         ADD_64(estats->pause_frames_sent_hi,
3925                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3926                estats->pause_frames_sent_lo,
3927                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3928 }
3929
3930 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3931 {
3932         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3933         struct nig_stats *old = &(bp->port.old_nig_stats);
3934         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3935         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3936         struct {
3937                 u32 lo;
3938                 u32 hi;
3939         } diff;
3940         u32 nig_timer_max;
3941
3942         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3943                 bnx2x_bmac_stats_update(bp);
3944
3945         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3946                 bnx2x_emac_stats_update(bp);
3947
3948         else { /* unreached */
3949                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3950                 return -1;
3951         }
3952
3953         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3954                       new->brb_discard - old->brb_discard);
3955         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3956                       new->brb_truncate - old->brb_truncate);
3957
3958         UPDATE_STAT64_NIG(egress_mac_pkt0,
3959                                         etherstatspkts1024octetsto1522octets);
3960         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3961
3962         memcpy(old, new, sizeof(struct nig_stats));
3963
3964         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3965                sizeof(struct mac_stx));
3966         estats->brb_drop_hi = pstats->brb_drop_hi;
3967         estats->brb_drop_lo = pstats->brb_drop_lo;
3968
3969         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3970
3971         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3972         if (nig_timer_max != estats->nig_timer_max) {
3973                 estats->nig_timer_max = nig_timer_max;
3974                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3975         }
3976
3977         return 0;
3978 }
3979
3980 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3981 {
3982         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3983         struct tstorm_per_port_stats *tport =
3984                                         &stats->tstorm_common.port_statistics;
3985         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3986         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3987         int i;
3988
3989         memcpy(&(fstats->total_bytes_received_hi),
3990                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3991                sizeof(struct host_func_stats) - 2*sizeof(u32));
3992         estats->error_bytes_received_hi = 0;
3993         estats->error_bytes_received_lo = 0;
3994         estats->etherstatsoverrsizepkts_hi = 0;
3995         estats->etherstatsoverrsizepkts_lo = 0;
3996         estats->no_buff_discard_hi = 0;
3997         estats->no_buff_discard_lo = 0;
3998
3999         for_each_queue(bp, i) {
4000                 struct bnx2x_fastpath *fp = &bp->fp[i];
4001                 int cl_id = fp->cl_id;
4002                 struct tstorm_per_client_stats *tclient =
4003                                 &stats->tstorm_common.client_statistics[cl_id];
4004                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4005                 struct ustorm_per_client_stats *uclient =
4006                                 &stats->ustorm_common.client_statistics[cl_id];
4007                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4008                 struct xstorm_per_client_stats *xclient =
4009                                 &stats->xstorm_common.client_statistics[cl_id];
4010                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4011                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4012                 u32 diff;
4013
4014                 /* are storm stats valid? */
4015                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4016                                                         bp->stats_counter) {
4017                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4018                            "  xstorm counter (%d) != stats_counter (%d)\n",
4019                            i, xclient->stats_counter, bp->stats_counter);
4020                         return -1;
4021                 }
4022                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4023                                                         bp->stats_counter) {
4024                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4025                            "  tstorm counter (%d) != stats_counter (%d)\n",
4026                            i, tclient->stats_counter, bp->stats_counter);
4027                         return -2;
4028                 }
4029                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4030                                                         bp->stats_counter) {
4031                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4032                            "  ustorm counter (%d) != stats_counter (%d)\n",
4033                            i, uclient->stats_counter, bp->stats_counter);
4034                         return -4;
4035                 }
4036
4037                 qstats->total_bytes_received_hi =
4038                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4039                 qstats->total_bytes_received_lo =
4040                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4041
4042                 ADD_64(qstats->total_bytes_received_hi,
4043                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4044                        qstats->total_bytes_received_lo,
4045                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4046
4047                 ADD_64(qstats->total_bytes_received_hi,
4048                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4049                        qstats->total_bytes_received_lo,
4050                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4051
4052                 qstats->valid_bytes_received_hi =
4053                                         qstats->total_bytes_received_hi;
4054                 qstats->valid_bytes_received_lo =
4055                                         qstats->total_bytes_received_lo;
4056
4057                 qstats->error_bytes_received_hi =
4058                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4059                 qstats->error_bytes_received_lo =
4060                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4061
4062                 ADD_64(qstats->total_bytes_received_hi,
4063                        qstats->error_bytes_received_hi,
4064                        qstats->total_bytes_received_lo,
4065                        qstats->error_bytes_received_lo);
4066
4067                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4068                                         total_unicast_packets_received);
4069                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4070                                         total_multicast_packets_received);
4071                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4072                                         total_broadcast_packets_received);
4073                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4074                                         etherstatsoverrsizepkts);
4075                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4076
4077                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4078                                         total_unicast_packets_received);
4079                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4080                                         total_multicast_packets_received);
4081                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4082                                         total_broadcast_packets_received);
4083                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4084                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4085                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4086
4087                 qstats->total_bytes_transmitted_hi =
4088                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4089                 qstats->total_bytes_transmitted_lo =
4090                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4091
4092                 ADD_64(qstats->total_bytes_transmitted_hi,
4093                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4094                        qstats->total_bytes_transmitted_lo,
4095                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4096
4097                 ADD_64(qstats->total_bytes_transmitted_hi,
4098                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4099                        qstats->total_bytes_transmitted_lo,
4100                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4101
4102                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4103                                         total_unicast_packets_transmitted);
4104                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4105                                         total_multicast_packets_transmitted);
4106                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4107                                         total_broadcast_packets_transmitted);
4108
4109                 old_tclient->checksum_discard = tclient->checksum_discard;
4110                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4111
4112                 ADD_64(fstats->total_bytes_received_hi,
4113                        qstats->total_bytes_received_hi,
4114                        fstats->total_bytes_received_lo,
4115                        qstats->total_bytes_received_lo);
4116                 ADD_64(fstats->total_bytes_transmitted_hi,
4117                        qstats->total_bytes_transmitted_hi,
4118                        fstats->total_bytes_transmitted_lo,
4119                        qstats->total_bytes_transmitted_lo);
4120                 ADD_64(fstats->total_unicast_packets_received_hi,
4121                        qstats->total_unicast_packets_received_hi,
4122                        fstats->total_unicast_packets_received_lo,
4123                        qstats->total_unicast_packets_received_lo);
4124                 ADD_64(fstats->total_multicast_packets_received_hi,
4125                        qstats->total_multicast_packets_received_hi,
4126                        fstats->total_multicast_packets_received_lo,
4127                        qstats->total_multicast_packets_received_lo);
4128                 ADD_64(fstats->total_broadcast_packets_received_hi,
4129                        qstats->total_broadcast_packets_received_hi,
4130                        fstats->total_broadcast_packets_received_lo,
4131                        qstats->total_broadcast_packets_received_lo);
4132                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4133                        qstats->total_unicast_packets_transmitted_hi,
4134                        fstats->total_unicast_packets_transmitted_lo,
4135                        qstats->total_unicast_packets_transmitted_lo);
4136                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4137                        qstats->total_multicast_packets_transmitted_hi,
4138                        fstats->total_multicast_packets_transmitted_lo,
4139                        qstats->total_multicast_packets_transmitted_lo);
4140                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4141                        qstats->total_broadcast_packets_transmitted_hi,
4142                        fstats->total_broadcast_packets_transmitted_lo,
4143                        qstats->total_broadcast_packets_transmitted_lo);
4144                 ADD_64(fstats->valid_bytes_received_hi,
4145                        qstats->valid_bytes_received_hi,
4146                        fstats->valid_bytes_received_lo,
4147                        qstats->valid_bytes_received_lo);
4148
4149                 ADD_64(estats->error_bytes_received_hi,
4150                        qstats->error_bytes_received_hi,
4151                        estats->error_bytes_received_lo,
4152                        qstats->error_bytes_received_lo);
4153                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4154                        qstats->etherstatsoverrsizepkts_hi,
4155                        estats->etherstatsoverrsizepkts_lo,
4156                        qstats->etherstatsoverrsizepkts_lo);
4157                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4158                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4159         }
4160
4161         ADD_64(fstats->total_bytes_received_hi,
4162                estats->rx_stat_ifhcinbadoctets_hi,
4163                fstats->total_bytes_received_lo,
4164                estats->rx_stat_ifhcinbadoctets_lo);
4165
4166         memcpy(estats, &(fstats->total_bytes_received_hi),
4167                sizeof(struct host_func_stats) - 2*sizeof(u32));
4168
4169         ADD_64(estats->etherstatsoverrsizepkts_hi,
4170                estats->rx_stat_dot3statsframestoolong_hi,
4171                estats->etherstatsoverrsizepkts_lo,
4172                estats->rx_stat_dot3statsframestoolong_lo);
4173         ADD_64(estats->error_bytes_received_hi,
4174                estats->rx_stat_ifhcinbadoctets_hi,
4175                estats->error_bytes_received_lo,
4176                estats->rx_stat_ifhcinbadoctets_lo);
4177
4178         if (bp->port.pmf) {
4179                 estats->mac_filter_discard =
4180                                 le32_to_cpu(tport->mac_filter_discard);
4181                 estats->xxoverflow_discard =
4182                                 le32_to_cpu(tport->xxoverflow_discard);
4183                 estats->brb_truncate_discard =
4184                                 le32_to_cpu(tport->brb_truncate_discard);
4185                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4186         }
4187
4188         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4189
4190         bp->stats_pending = 0;
4191
4192         return 0;
4193 }
4194
4195 static void bnx2x_net_stats_update(struct bnx2x *bp)
4196 {
4197         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4198         struct net_device_stats *nstats = &bp->dev->stats;
4199         int i;
4200
4201         nstats->rx_packets =
4202                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4203                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4204                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4205
4206         nstats->tx_packets =
4207                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4208                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4209                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4210
4211         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4212
4213         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4214
4215         nstats->rx_dropped = estats->mac_discard;
4216         for_each_queue(bp, i)
4217                 nstats->rx_dropped +=
4218                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4219
4220         nstats->tx_dropped = 0;
4221
4222         nstats->multicast =
4223                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4224
4225         nstats->collisions =
4226                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4227
4228         nstats->rx_length_errors =
4229                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4230                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4231         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4232                                  bnx2x_hilo(&estats->brb_truncate_hi);
4233         nstats->rx_crc_errors =
4234                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4235         nstats->rx_frame_errors =
4236                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4237         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4238         nstats->rx_missed_errors = estats->xxoverflow_discard;
4239
4240         nstats->rx_errors = nstats->rx_length_errors +
4241                             nstats->rx_over_errors +
4242                             nstats->rx_crc_errors +
4243                             nstats->rx_frame_errors +
4244                             nstats->rx_fifo_errors +
4245                             nstats->rx_missed_errors;
4246
4247         nstats->tx_aborted_errors =
4248                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4249                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4250         nstats->tx_carrier_errors =
4251                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4252         nstats->tx_fifo_errors = 0;
4253         nstats->tx_heartbeat_errors = 0;
4254         nstats->tx_window_errors = 0;
4255
4256         nstats->tx_errors = nstats->tx_aborted_errors +
4257                             nstats->tx_carrier_errors +
4258             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4259 }
4260
4261 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4262 {
4263         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4264         int i;
4265
4266         estats->driver_xoff = 0;
4267         estats->rx_err_discard_pkt = 0;
4268         estats->rx_skb_alloc_failed = 0;
4269         estats->hw_csum_err = 0;
4270         for_each_queue(bp, i) {
4271                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4272
4273                 estats->driver_xoff += qstats->driver_xoff;
4274                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4275                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4276                 estats->hw_csum_err += qstats->hw_csum_err;
4277         }
4278 }
4279
4280 static void bnx2x_stats_update(struct bnx2x *bp)
4281 {
4282         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4283
4284         if (*stats_comp != DMAE_COMP_VAL)
4285                 return;
4286
4287         if (bp->port.pmf)
4288                 bnx2x_hw_stats_update(bp);
4289
4290         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4291                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4292                 bnx2x_panic();
4293                 return;
4294         }
4295
4296         bnx2x_net_stats_update(bp);
4297         bnx2x_drv_stats_update(bp);
4298
4299         if (bp->msglevel & NETIF_MSG_TIMER) {
4300                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4301                 struct bnx2x_fastpath *fp0_tx = bp->fp;
4302                 struct tstorm_per_client_stats *old_tclient =
4303                                                         &bp->fp->old_tclient;
4304                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4305                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4306                 struct net_device_stats *nstats = &bp->dev->stats;
4307                 int i;
4308
4309                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4310                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4311                                   "  tx pkt (%lx)\n",
4312                        bnx2x_tx_avail(fp0_tx),
4313                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4314                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4315                                   "  rx pkt (%lx)\n",
4316                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4317                              fp0_rx->rx_comp_cons),
4318                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4319                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4320                                   "brb truncate %u\n",
4321                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4322                        qstats->driver_xoff,
4323                        estats->brb_drop_lo, estats->brb_truncate_lo);
4324                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4325                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4326                         "mac_discard %u  mac_filter_discard %u  "
4327                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4328                         "ttl0_discard %u\n",
4329                        le32_to_cpu(old_tclient->checksum_discard),
4330                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4331                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4332                        estats->mac_discard, estats->mac_filter_discard,
4333                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4334                        le32_to_cpu(old_tclient->ttl0_discard));
4335
4336                 for_each_queue(bp, i) {
4337                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4338                                bnx2x_fp(bp, i, tx_pkt),
4339                                bnx2x_fp(bp, i, rx_pkt),
4340                                bnx2x_fp(bp, i, rx_calls));
4341                 }
4342         }
4343
4344         bnx2x_hw_stats_post(bp);
4345         bnx2x_storm_stats_post(bp);
4346 }
4347
4348 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4349 {
4350         struct dmae_command *dmae;
4351         u32 opcode;
4352         int loader_idx = PMF_DMAE_C(bp);
4353         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4354
4355         bp->executer_idx = 0;
4356
4357         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4358                   DMAE_CMD_C_ENABLE |
4359                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4360 #ifdef __BIG_ENDIAN
4361                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4362 #else
4363                   DMAE_CMD_ENDIANITY_DW_SWAP |
4364 #endif
4365                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4366                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4367
4368         if (bp->port.port_stx) {
4369
4370                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4371                 if (bp->func_stx)
4372                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4373                 else
4374                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4375                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4376                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4377                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4378                 dmae->dst_addr_hi = 0;
4379                 dmae->len = sizeof(struct host_port_stats) >> 2;
4380                 if (bp->func_stx) {
4381                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4382                         dmae->comp_addr_hi = 0;
4383                         dmae->comp_val = 1;
4384                 } else {
4385                         dmae->comp_addr_lo =
4386                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4387                         dmae->comp_addr_hi =
4388                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4389                         dmae->comp_val = DMAE_COMP_VAL;
4390
4391                         *stats_comp = 0;
4392                 }
4393         }
4394
4395         if (bp->func_stx) {
4396
4397                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4398                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4399                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4400                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4401                 dmae->dst_addr_lo = bp->func_stx >> 2;
4402                 dmae->dst_addr_hi = 0;
4403                 dmae->len = sizeof(struct host_func_stats) >> 2;
4404                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4405                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4406                 dmae->comp_val = DMAE_COMP_VAL;
4407
4408                 *stats_comp = 0;
4409         }
4410 }
4411
4412 static void bnx2x_stats_stop(struct bnx2x *bp)
4413 {
4414         int update = 0;
4415
4416         bnx2x_stats_comp(bp);
4417
4418         if (bp->port.pmf)
4419                 update = (bnx2x_hw_stats_update(bp) == 0);
4420
4421         update |= (bnx2x_storm_stats_update(bp) == 0);
4422
4423         if (update) {
4424                 bnx2x_net_stats_update(bp);
4425
4426                 if (bp->port.pmf)
4427                         bnx2x_port_stats_stop(bp);
4428
4429                 bnx2x_hw_stats_post(bp);
4430                 bnx2x_stats_comp(bp);
4431         }
4432 }
4433
4434 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4435 {
4436 }
4437
4438 static const struct {
4439         void (*action)(struct bnx2x *bp);
4440         enum bnx2x_stats_state next_state;
4441 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4442 /* state        event   */
4443 {
4444 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4445 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4446 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4447 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4448 },
4449 {
4450 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4451 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4452 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4453 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4454 }
4455 };
4456
4457 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4458 {
4459         enum bnx2x_stats_state state = bp->stats_state;
4460
4461         bnx2x_stats_stm[state][event].action(bp);
4462         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4463
4464         /* Make sure the state has been "changed" */
4465         smp_wmb();
4466
4467         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4468                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4469                    state, event, bp->stats_state);
4470 }
4471
4472 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4473 {
4474         struct dmae_command *dmae;
4475         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4476
4477         /* sanity */
4478         if (!bp->port.pmf || !bp->port.port_stx) {
4479                 BNX2X_ERR("BUG!\n");
4480                 return;
4481         }
4482
4483         bp->executer_idx = 0;
4484
4485         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4486         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4487                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4488                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4489 #ifdef __BIG_ENDIAN
4490                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4491 #else
4492                         DMAE_CMD_ENDIANITY_DW_SWAP |
4493 #endif
4494                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4495                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4496         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4497         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4498         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4499         dmae->dst_addr_hi = 0;
4500         dmae->len = sizeof(struct host_port_stats) >> 2;
4501         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4502         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4503         dmae->comp_val = DMAE_COMP_VAL;
4504
4505         *stats_comp = 0;
4506         bnx2x_hw_stats_post(bp);
4507         bnx2x_stats_comp(bp);
4508 }
4509
4510 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4511 {
4512         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4513         int port = BP_PORT(bp);
4514         int func;
4515         u32 func_stx;
4516
4517         /* sanity */
4518         if (!bp->port.pmf || !bp->func_stx) {
4519                 BNX2X_ERR("BUG!\n");
4520                 return;
4521         }
4522
4523         /* save our func_stx */
4524         func_stx = bp->func_stx;
4525
4526         for (vn = VN_0; vn < vn_max; vn++) {
4527                 func = 2*vn + port;
4528
4529                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4530                 bnx2x_func_stats_init(bp);
4531                 bnx2x_hw_stats_post(bp);
4532                 bnx2x_stats_comp(bp);
4533         }
4534
4535         /* restore our func_stx */
4536         bp->func_stx = func_stx;
4537 }
4538
4539 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4540 {
4541         struct dmae_command *dmae = &bp->stats_dmae;
4542         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4543
4544         /* sanity */
4545         if (!bp->func_stx) {
4546                 BNX2X_ERR("BUG!\n");
4547                 return;
4548         }
4549
4550         bp->executer_idx = 0;
4551         memset(dmae, 0, sizeof(struct dmae_command));
4552
4553         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4554                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4555                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4556 #ifdef __BIG_ENDIAN
4557                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4558 #else
4559                         DMAE_CMD_ENDIANITY_DW_SWAP |
4560 #endif
4561                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4562                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4563         dmae->src_addr_lo = bp->func_stx >> 2;
4564         dmae->src_addr_hi = 0;
4565         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4566         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4567         dmae->len = sizeof(struct host_func_stats) >> 2;
4568         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4569         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4570         dmae->comp_val = DMAE_COMP_VAL;
4571
4572         *stats_comp = 0;
4573         bnx2x_hw_stats_post(bp);
4574         bnx2x_stats_comp(bp);
4575 }
4576
4577 static void bnx2x_stats_init(struct bnx2x *bp)
4578 {
4579         int port = BP_PORT(bp);
4580         int func = BP_FUNC(bp);
4581         int i;
4582
4583         bp->stats_pending = 0;
4584         bp->executer_idx = 0;
4585         bp->stats_counter = 0;
4586
4587         /* port and func stats for management */
4588         if (!BP_NOMCP(bp)) {
4589                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4590                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4591
4592         } else {
4593                 bp->port.port_stx = 0;
4594                 bp->func_stx = 0;
4595         }
4596         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4597            bp->port.port_stx, bp->func_stx);
4598
4599         /* port stats */
4600         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4601         bp->port.old_nig_stats.brb_discard =
4602                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4603         bp->port.old_nig_stats.brb_truncate =
4604                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4605         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4606                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4607         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4608                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4609
4610         /* function stats */
4611         for_each_queue(bp, i) {
4612                 struct bnx2x_fastpath *fp = &bp->fp[i];
4613
4614                 memset(&fp->old_tclient, 0,
4615                        sizeof(struct tstorm_per_client_stats));
4616                 memset(&fp->old_uclient, 0,
4617                        sizeof(struct ustorm_per_client_stats));
4618                 memset(&fp->old_xclient, 0,
4619                        sizeof(struct xstorm_per_client_stats));
4620                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4621         }
4622
4623         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4624         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4625
4626         bp->stats_state = STATS_STATE_DISABLED;
4627
4628         if (bp->port.pmf) {
4629                 if (bp->port.port_stx)
4630                         bnx2x_port_stats_base_init(bp);
4631
4632                 if (bp->func_stx)
4633                         bnx2x_func_stats_base_init(bp);
4634
4635         } else if (bp->func_stx)
4636                 bnx2x_func_stats_base_update(bp);
4637 }
4638
4639 static void bnx2x_timer(unsigned long data)
4640 {
4641         struct bnx2x *bp = (struct bnx2x *) data;
4642
4643         if (!netif_running(bp->dev))
4644                 return;
4645
4646         if (atomic_read(&bp->intr_sem) != 0)
4647                 goto timer_restart;
4648
4649         if (poll) {
4650                 struct bnx2x_fastpath *fp = &bp->fp[0];
4651                 int rc;
4652
4653                 bnx2x_tx_int(fp);
4654                 rc = bnx2x_rx_int(fp, 1000);
4655         }
4656
4657         if (!BP_NOMCP(bp)) {
4658                 int func = BP_FUNC(bp);
4659                 u32 drv_pulse;
4660                 u32 mcp_pulse;
4661
4662                 ++bp->fw_drv_pulse_wr_seq;
4663                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4664                 /* TBD - add SYSTEM_TIME */
4665                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4666                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4667
4668                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4669                              MCP_PULSE_SEQ_MASK);
4670                 /* The delta between driver pulse and mcp response
4671                  * should be 1 (before mcp response) or 0 (after mcp response)
4672                  */
4673                 if ((drv_pulse != mcp_pulse) &&
4674                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4675                         /* someone lost a heartbeat... */
4676                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4677                                   drv_pulse, mcp_pulse);
4678                 }
4679         }
4680
4681         if (bp->state == BNX2X_STATE_OPEN)
4682                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4683
4684 timer_restart:
4685         mod_timer(&bp->timer, jiffies + bp->current_interval);
4686 }
4687
4688 /* end of Statistics */
4689
4690 /* nic init */
4691
4692 /*
4693  * nic init service functions
4694  */
4695
4696 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4697 {
4698         int port = BP_PORT(bp);
4699
4700         /* "CSTORM" */
4701         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4702                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4703                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4704         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4705                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4706                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4707 }
4708
4709 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4710                           dma_addr_t mapping, int sb_id)
4711 {
4712         int port = BP_PORT(bp);
4713         int func = BP_FUNC(bp);
4714         int index;
4715         u64 section;
4716
4717         /* USTORM */
4718         section = ((u64)mapping) + offsetof(struct host_status_block,
4719                                             u_status_block);
4720         sb->u_status_block.status_block_id = sb_id;
4721
4722         REG_WR(bp, BAR_CSTRORM_INTMEM +
4723                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4724         REG_WR(bp, BAR_CSTRORM_INTMEM +
4725                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4726                U64_HI(section));
4727         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4728                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4729
4730         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4731                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4732                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4733
4734         /* CSTORM */
4735         section = ((u64)mapping) + offsetof(struct host_status_block,
4736                                             c_status_block);
4737         sb->c_status_block.status_block_id = sb_id;
4738
4739         REG_WR(bp, BAR_CSTRORM_INTMEM +
4740                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4741         REG_WR(bp, BAR_CSTRORM_INTMEM +
4742                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4743                U64_HI(section));
4744         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4745                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4746
4747         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4748                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4749                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4750
4751         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4752 }
4753
4754 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4755 {
4756         int func = BP_FUNC(bp);
4757
4758         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4759                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4760                         sizeof(struct tstorm_def_status_block)/4);
4761         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4762                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4763                         sizeof(struct cstorm_def_status_block_u)/4);
4764         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4765                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4766                         sizeof(struct cstorm_def_status_block_c)/4);
4767         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4768                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4769                         sizeof(struct xstorm_def_status_block)/4);
4770 }
4771
4772 static void bnx2x_init_def_sb(struct bnx2x *bp,
4773                               struct host_def_status_block *def_sb,
4774                               dma_addr_t mapping, int sb_id)
4775 {
4776         int port = BP_PORT(bp);
4777         int func = BP_FUNC(bp);
4778         int index, val, reg_offset;
4779         u64 section;
4780
4781         /* ATTN */
4782         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4783                                             atten_status_block);
4784         def_sb->atten_status_block.status_block_id = sb_id;
4785
4786         bp->attn_state = 0;
4787
4788         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4789                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4790
4791         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4792                 bp->attn_group[index].sig[0] = REG_RD(bp,
4793                                                      reg_offset + 0x10*index);
4794                 bp->attn_group[index].sig[1] = REG_RD(bp,
4795                                                reg_offset + 0x4 + 0x10*index);
4796                 bp->attn_group[index].sig[2] = REG_RD(bp,
4797                                                reg_offset + 0x8 + 0x10*index);
4798                 bp->attn_group[index].sig[3] = REG_RD(bp,
4799                                                reg_offset + 0xc + 0x10*index);
4800         }
4801
4802         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4803                              HC_REG_ATTN_MSG0_ADDR_L);
4804
4805         REG_WR(bp, reg_offset, U64_LO(section));
4806         REG_WR(bp, reg_offset + 4, U64_HI(section));
4807
4808         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4809
4810         val = REG_RD(bp, reg_offset);
4811         val |= sb_id;
4812         REG_WR(bp, reg_offset, val);
4813
4814         /* USTORM */
4815         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4816                                             u_def_status_block);
4817         def_sb->u_def_status_block.status_block_id = sb_id;
4818
4819         REG_WR(bp, BAR_CSTRORM_INTMEM +
4820                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4821         REG_WR(bp, BAR_CSTRORM_INTMEM +
4822                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4823                U64_HI(section));
4824         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4825                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4826
4827         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4828                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4829                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4830
4831         /* CSTORM */
4832         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4833                                             c_def_status_block);
4834         def_sb->c_def_status_block.status_block_id = sb_id;
4835
4836         REG_WR(bp, BAR_CSTRORM_INTMEM +
4837                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4838         REG_WR(bp, BAR_CSTRORM_INTMEM +
4839                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4840                U64_HI(section));
4841         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4842                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4843
4844         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4845                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4846                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4847
4848         /* TSTORM */
4849         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4850                                             t_def_status_block);
4851         def_sb->t_def_status_block.status_block_id = sb_id;
4852
4853         REG_WR(bp, BAR_TSTRORM_INTMEM +
4854                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4855         REG_WR(bp, BAR_TSTRORM_INTMEM +
4856                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4857                U64_HI(section));
4858         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4859                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4860
4861         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4862                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4863                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4864
4865         /* XSTORM */
4866         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4867                                             x_def_status_block);
4868         def_sb->x_def_status_block.status_block_id = sb_id;
4869
4870         REG_WR(bp, BAR_XSTRORM_INTMEM +
4871                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4872         REG_WR(bp, BAR_XSTRORM_INTMEM +
4873                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4874                U64_HI(section));
4875         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4876                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4877
4878         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4879                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4880                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4881
4882         bp->stats_pending = 0;
4883         bp->set_mac_pending = 0;
4884
4885         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4886 }
4887
4888 static void bnx2x_update_coalesce(struct bnx2x *bp)
4889 {
4890         int port = BP_PORT(bp);
4891         int i;
4892
4893         for_each_queue(bp, i) {
4894                 int sb_id = bp->fp[i].sb_id;
4895
4896                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4897                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4898                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4899                                                       U_SB_ETH_RX_CQ_INDEX),
4900                         bp->rx_ticks/(4 * BNX2X_BTR));
4901                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4902                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4903                                                        U_SB_ETH_RX_CQ_INDEX),
4904                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4905
4906                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4907                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4908                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4909                                                       C_SB_ETH_TX_CQ_INDEX),
4910                         bp->tx_ticks/(4 * BNX2X_BTR));
4911                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4912                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4913                                                        C_SB_ETH_TX_CQ_INDEX),
4914                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4915         }
4916 }
4917
4918 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4919                                        struct bnx2x_fastpath *fp, int last)
4920 {
4921         int i;
4922
4923         for (i = 0; i < last; i++) {
4924                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4925                 struct sk_buff *skb = rx_buf->skb;
4926
4927                 if (skb == NULL) {
4928                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4929                         continue;
4930                 }
4931
4932                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4933                         pci_unmap_single(bp->pdev,
4934                                          pci_unmap_addr(rx_buf, mapping),
4935                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4936
4937                 dev_kfree_skb(skb);
4938                 rx_buf->skb = NULL;
4939         }
4940 }
4941
4942 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4943 {
4944         int func = BP_FUNC(bp);
4945         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4946                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4947         u16 ring_prod, cqe_ring_prod;
4948         int i, j;
4949
4950         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4951         DP(NETIF_MSG_IFUP,
4952            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4953
4954         if (bp->flags & TPA_ENABLE_FLAG) {
4955
4956                 for_each_queue(bp, j) {
4957                         struct bnx2x_fastpath *fp = &bp->fp[j];
4958
4959                         for (i = 0; i < max_agg_queues; i++) {
4960                                 fp->tpa_pool[i].skb =
4961                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4962                                 if (!fp->tpa_pool[i].skb) {
4963                                         BNX2X_ERR("Failed to allocate TPA "
4964                                                   "skb pool for queue[%d] - "
4965                                                   "disabling TPA on this "
4966                                                   "queue!\n", j);
4967                                         bnx2x_free_tpa_pool(bp, fp, i);
4968                                         fp->disable_tpa = 1;
4969                                         break;
4970                                 }
4971                                 pci_unmap_addr_set((struct sw_rx_bd *)
4972                                                         &bp->fp->tpa_pool[i],
4973                                                    mapping, 0);
4974                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4975                         }
4976                 }
4977         }
4978
4979         for_each_queue(bp, j) {
4980                 struct bnx2x_fastpath *fp = &bp->fp[j];
4981
4982                 fp->rx_bd_cons = 0;
4983                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4984                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4985
4986                 /* "next page" elements initialization */
4987                 /* SGE ring */
4988                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4989                         struct eth_rx_sge *sge;
4990
4991                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4992                         sge->addr_hi =
4993                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4994                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4995                         sge->addr_lo =
4996                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4997                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4998                 }
4999
5000                 bnx2x_init_sge_ring_bit_mask(fp);
5001
5002                 /* RX BD ring */
5003                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5004                         struct eth_rx_bd *rx_bd;
5005
5006                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5007                         rx_bd->addr_hi =
5008                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5009                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5010                         rx_bd->addr_lo =
5011                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5012                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5013                 }
5014
5015                 /* CQ ring */
5016                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5017                         struct eth_rx_cqe_next_page *nextpg;
5018
5019                         nextpg = (struct eth_rx_cqe_next_page *)
5020                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5021                         nextpg->addr_hi =
5022                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5023                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5024                         nextpg->addr_lo =
5025                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5026                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5027                 }
5028
5029                 /* Allocate SGEs and initialize the ring elements */
5030                 for (i = 0, ring_prod = 0;
5031                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5032
5033                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5034                                 BNX2X_ERR("was only able to allocate "
5035                                           "%d rx sges\n", i);
5036                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5037                                 /* Cleanup already allocated elements */
5038                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5039                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5040                                 fp->disable_tpa = 1;
5041                                 ring_prod = 0;
5042                                 break;
5043                         }
5044                         ring_prod = NEXT_SGE_IDX(ring_prod);
5045                 }
5046                 fp->rx_sge_prod = ring_prod;
5047
5048                 /* Allocate BDs and initialize BD ring */
5049                 fp->rx_comp_cons = 0;
5050                 cqe_ring_prod = ring_prod = 0;
5051                 for (i = 0; i < bp->rx_ring_size; i++) {
5052                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5053                                 BNX2X_ERR("was only able to allocate "
5054                                           "%d rx skbs on queue[%d]\n", i, j);
5055                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5056                                 break;
5057                         }
5058                         ring_prod = NEXT_RX_IDX(ring_prod);
5059                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5060                         WARN_ON(ring_prod <= i);
5061                 }
5062
5063                 fp->rx_bd_prod = ring_prod;
5064                 /* must not have more available CQEs than BDs */
5065                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5066                                        cqe_ring_prod);
5067                 fp->rx_pkt = fp->rx_calls = 0;
5068
5069                 /* Warning!
5070                  * this will generate an interrupt (to the TSTORM)
5071                  * must only be done after chip is initialized
5072                  */
5073                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5074                                      fp->rx_sge_prod);
5075                 if (j != 0)
5076                         continue;
5077
5078                 REG_WR(bp, BAR_USTRORM_INTMEM +
5079                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5080                        U64_LO(fp->rx_comp_mapping));
5081                 REG_WR(bp, BAR_USTRORM_INTMEM +
5082                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5083                        U64_HI(fp->rx_comp_mapping));
5084         }
5085 }
5086
5087 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5088 {
5089         int i, j;
5090
5091         for_each_queue(bp, j) {
5092                 struct bnx2x_fastpath *fp = &bp->fp[j];
5093
5094                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5095                         struct eth_tx_next_bd *tx_next_bd =
5096                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5097
5098                         tx_next_bd->addr_hi =
5099                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5100                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5101                         tx_next_bd->addr_lo =
5102                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5103                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5104                 }
5105
5106                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5107                 fp->tx_db.data.zero_fill1 = 0;
5108                 fp->tx_db.data.prod = 0;
5109
5110                 fp->tx_pkt_prod = 0;
5111                 fp->tx_pkt_cons = 0;
5112                 fp->tx_bd_prod = 0;
5113                 fp->tx_bd_cons = 0;
5114                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5115                 fp->tx_pkt = 0;
5116         }
5117 }
5118
5119 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5120 {
5121         int func = BP_FUNC(bp);
5122
5123         spin_lock_init(&bp->spq_lock);
5124
5125         bp->spq_left = MAX_SPQ_PENDING;
5126         bp->spq_prod_idx = 0;
5127         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5128         bp->spq_prod_bd = bp->spq;
5129         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5130
5131         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5132                U64_LO(bp->spq_mapping));
5133         REG_WR(bp,
5134                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5135                U64_HI(bp->spq_mapping));
5136
5137         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5138                bp->spq_prod_idx);
5139 }
5140
5141 static void bnx2x_init_context(struct bnx2x *bp)
5142 {
5143         int i;
5144
5145         /* Rx */
5146         for_each_queue(bp, i) {
5147                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5148                 struct bnx2x_fastpath *fp = &bp->fp[i];
5149                 u8 cl_id = fp->cl_id;
5150
5151                 context->ustorm_st_context.common.sb_index_numbers =
5152                                                 BNX2X_RX_SB_INDEX_NUM;
5153                 context->ustorm_st_context.common.clientId = cl_id;
5154                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5155                 context->ustorm_st_context.common.flags =
5156                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5157                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5158                 context->ustorm_st_context.common.statistics_counter_id =
5159                                                 cl_id;
5160                 context->ustorm_st_context.common.mc_alignment_log_size =
5161                                                 BNX2X_RX_ALIGN_SHIFT;
5162                 context->ustorm_st_context.common.bd_buff_size =
5163                                                 bp->rx_buf_size;
5164                 context->ustorm_st_context.common.bd_page_base_hi =
5165                                                 U64_HI(fp->rx_desc_mapping);
5166                 context->ustorm_st_context.common.bd_page_base_lo =
5167                                                 U64_LO(fp->rx_desc_mapping);
5168                 if (!fp->disable_tpa) {
5169                         context->ustorm_st_context.common.flags |=
5170                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5171                         context->ustorm_st_context.common.sge_buff_size =
5172                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5173                                          (u32)0xffff);
5174                         context->ustorm_st_context.common.sge_page_base_hi =
5175                                                 U64_HI(fp->rx_sge_mapping);
5176                         context->ustorm_st_context.common.sge_page_base_lo =
5177                                                 U64_LO(fp->rx_sge_mapping);
5178
5179                         context->ustorm_st_context.common.max_sges_for_packet =
5180                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5181                         context->ustorm_st_context.common.max_sges_for_packet =
5182                                 ((context->ustorm_st_context.common.
5183                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5184                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5185                 }
5186
5187                 context->ustorm_ag_context.cdu_usage =
5188                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5189                                                CDU_REGION_NUMBER_UCM_AG,
5190                                                ETH_CONNECTION_TYPE);
5191
5192                 context->xstorm_ag_context.cdu_reserved =
5193                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5194                                                CDU_REGION_NUMBER_XCM_AG,
5195                                                ETH_CONNECTION_TYPE);
5196         }
5197
5198         /* Tx */
5199         for_each_queue(bp, i) {
5200                 struct bnx2x_fastpath *fp = &bp->fp[i];
5201                 struct eth_context *context =
5202                         bnx2x_sp(bp, context[i].eth);
5203
5204                 context->cstorm_st_context.sb_index_number =
5205                                                 C_SB_ETH_TX_CQ_INDEX;
5206                 context->cstorm_st_context.status_block_id = fp->sb_id;
5207
5208                 context->xstorm_st_context.tx_bd_page_base_hi =
5209                                                 U64_HI(fp->tx_desc_mapping);
5210                 context->xstorm_st_context.tx_bd_page_base_lo =
5211                                                 U64_LO(fp->tx_desc_mapping);
5212                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5213                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5214         }
5215 }
5216
5217 static void bnx2x_init_ind_table(struct bnx2x *bp)
5218 {
5219         int func = BP_FUNC(bp);
5220         int i;
5221
5222         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5223                 return;
5224
5225         DP(NETIF_MSG_IFUP,
5226            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5227         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5228                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5229                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5230                         bp->fp->cl_id + (i % bp->num_queues));
5231 }
5232
5233 static void bnx2x_set_client_config(struct bnx2x *bp)
5234 {
5235         struct tstorm_eth_client_config tstorm_client = {0};
5236         int port = BP_PORT(bp);
5237         int i;
5238
5239         tstorm_client.mtu = bp->dev->mtu;
5240         tstorm_client.config_flags =
5241                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5242                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5243 #ifdef BCM_VLAN
5244         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5245                 tstorm_client.config_flags |=
5246                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5247                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5248         }
5249 #endif
5250
5251         for_each_queue(bp, i) {
5252                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5253
5254                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5255                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5256                        ((u32 *)&tstorm_client)[0]);
5257                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5258                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5259                        ((u32 *)&tstorm_client)[1]);
5260         }
5261
5262         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5263            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5264 }
5265
5266 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5267 {
5268         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5269         int mode = bp->rx_mode;
5270         int mask = bp->rx_mode_cl_mask;
5271         int func = BP_FUNC(bp);
5272         int port = BP_PORT(bp);
5273         int i;
5274         /* All but management unicast packets should pass to the host as well */
5275         u32 llh_mask =
5276                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5277                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5278                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5279                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5280
5281         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5282
5283         switch (mode) {
5284         case BNX2X_RX_MODE_NONE: /* no Rx */
5285                 tstorm_mac_filter.ucast_drop_all = mask;
5286                 tstorm_mac_filter.mcast_drop_all = mask;
5287                 tstorm_mac_filter.bcast_drop_all = mask;
5288                 break;
5289
5290         case BNX2X_RX_MODE_NORMAL:
5291                 tstorm_mac_filter.bcast_accept_all = mask;
5292                 break;
5293
5294         case BNX2X_RX_MODE_ALLMULTI:
5295                 tstorm_mac_filter.mcast_accept_all = mask;
5296                 tstorm_mac_filter.bcast_accept_all = mask;
5297                 break;
5298
5299         case BNX2X_RX_MODE_PROMISC:
5300                 tstorm_mac_filter.ucast_accept_all = mask;
5301                 tstorm_mac_filter.mcast_accept_all = mask;
5302                 tstorm_mac_filter.bcast_accept_all = mask;
5303                 /* pass management unicast packets as well */
5304                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5305                 break;
5306
5307         default:
5308                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5309                 break;
5310         }
5311
5312         REG_WR(bp,
5313                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5314                llh_mask);
5315
5316         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5317                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5318                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5319                        ((u32 *)&tstorm_mac_filter)[i]);
5320
5321 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5322                    ((u32 *)&tstorm_mac_filter)[i]); */
5323         }
5324
5325         if (mode != BNX2X_RX_MODE_NONE)
5326                 bnx2x_set_client_config(bp);
5327 }
5328
5329 static void bnx2x_init_internal_common(struct bnx2x *bp)
5330 {
5331         int i;
5332
5333         /* Zero this manually as its initialization is
5334            currently missing in the initTool */
5335         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5336                 REG_WR(bp, BAR_USTRORM_INTMEM +
5337                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5338 }
5339
5340 static void bnx2x_init_internal_port(struct bnx2x *bp)
5341 {
5342         int port = BP_PORT(bp);
5343
5344         REG_WR(bp,
5345                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5346         REG_WR(bp,
5347                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5348         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5349         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5350 }
5351
5352 static void bnx2x_init_internal_func(struct bnx2x *bp)
5353 {
5354         struct tstorm_eth_function_common_config tstorm_config = {0};
5355         struct stats_indication_flags stats_flags = {0};
5356         int port = BP_PORT(bp);
5357         int func = BP_FUNC(bp);
5358         int i, j;
5359         u32 offset;
5360         u16 max_agg_size;
5361
5362         if (is_multi(bp)) {
5363                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5364                 tstorm_config.rss_result_mask = MULTI_MASK;
5365         }
5366
5367         /* Enable TPA if needed */
5368         if (bp->flags & TPA_ENABLE_FLAG)
5369                 tstorm_config.config_flags |=
5370                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5371
5372         if (IS_E1HMF(bp))
5373                 tstorm_config.config_flags |=
5374                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5375
5376         tstorm_config.leading_client_id = BP_L_ID(bp);
5377
5378         REG_WR(bp, BAR_TSTRORM_INTMEM +
5379                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5380                (*(u32 *)&tstorm_config));
5381
5382         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5383         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5384         bnx2x_set_storm_rx_mode(bp);
5385
5386         for_each_queue(bp, i) {
5387                 u8 cl_id = bp->fp[i].cl_id;
5388
5389                 /* reset xstorm per client statistics */
5390                 offset = BAR_XSTRORM_INTMEM +
5391                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5392                 for (j = 0;
5393                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5394                         REG_WR(bp, offset + j*4, 0);
5395
5396                 /* reset tstorm per client statistics */
5397                 offset = BAR_TSTRORM_INTMEM +
5398                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5399                 for (j = 0;
5400                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5401                         REG_WR(bp, offset + j*4, 0);
5402
5403                 /* reset ustorm per client statistics */
5404                 offset = BAR_USTRORM_INTMEM +
5405                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5406                 for (j = 0;
5407                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5408                         REG_WR(bp, offset + j*4, 0);
5409         }
5410
5411         /* Init statistics related context */
5412         stats_flags.collect_eth = 1;
5413
5414         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5415                ((u32 *)&stats_flags)[0]);
5416         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5417                ((u32 *)&stats_flags)[1]);
5418
5419         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5420                ((u32 *)&stats_flags)[0]);
5421         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5422                ((u32 *)&stats_flags)[1]);
5423
5424         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5425                ((u32 *)&stats_flags)[0]);
5426         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5427                ((u32 *)&stats_flags)[1]);
5428
5429         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5430                ((u32 *)&stats_flags)[0]);
5431         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5432                ((u32 *)&stats_flags)[1]);
5433
5434         REG_WR(bp, BAR_XSTRORM_INTMEM +
5435                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5436                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5437         REG_WR(bp, BAR_XSTRORM_INTMEM +
5438                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5439                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5440
5441         REG_WR(bp, BAR_TSTRORM_INTMEM +
5442                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5443                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5444         REG_WR(bp, BAR_TSTRORM_INTMEM +
5445                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5446                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5447
5448         REG_WR(bp, BAR_USTRORM_INTMEM +
5449                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5450                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5451         REG_WR(bp, BAR_USTRORM_INTMEM +
5452                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5453                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5454
5455         if (CHIP_IS_E1H(bp)) {
5456                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5457                         IS_E1HMF(bp));
5458                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5459                         IS_E1HMF(bp));
5460                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5461                         IS_E1HMF(bp));
5462                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5463                         IS_E1HMF(bp));
5464
5465                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5466                          bp->e1hov);
5467         }
5468
5469         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5470         max_agg_size =
5471                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5472                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5473                     (u32)0xffff);
5474         for_each_queue(bp, i) {
5475                 struct bnx2x_fastpath *fp = &bp->fp[i];
5476
5477                 REG_WR(bp, BAR_USTRORM_INTMEM +
5478                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5479                        U64_LO(fp->rx_comp_mapping));
5480                 REG_WR(bp, BAR_USTRORM_INTMEM +
5481                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5482                        U64_HI(fp->rx_comp_mapping));
5483
5484                 /* Next page */
5485                 REG_WR(bp, BAR_USTRORM_INTMEM +
5486                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5487                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5488                 REG_WR(bp, BAR_USTRORM_INTMEM +
5489                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5490                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5491
5492                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5493                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5494                          max_agg_size);
5495         }
5496
5497         /* dropless flow control */
5498         if (CHIP_IS_E1H(bp)) {
5499                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5500
5501                 rx_pause.bd_thr_low = 250;
5502                 rx_pause.cqe_thr_low = 250;
5503                 rx_pause.cos = 1;
5504                 rx_pause.sge_thr_low = 0;
5505                 rx_pause.bd_thr_high = 350;
5506                 rx_pause.cqe_thr_high = 350;
5507                 rx_pause.sge_thr_high = 0;
5508
5509                 for_each_queue(bp, i) {
5510                         struct bnx2x_fastpath *fp = &bp->fp[i];
5511
5512                         if (!fp->disable_tpa) {
5513                                 rx_pause.sge_thr_low = 150;
5514                                 rx_pause.sge_thr_high = 250;
5515                         }
5516
5517
5518                         offset = BAR_USTRORM_INTMEM +
5519                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5520                                                                    fp->cl_id);
5521                         for (j = 0;
5522                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5523                              j++)
5524                                 REG_WR(bp, offset + j*4,
5525                                        ((u32 *)&rx_pause)[j]);
5526                 }
5527         }
5528
5529         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5530
5531         /* Init rate shaping and fairness contexts */
5532         if (IS_E1HMF(bp)) {
5533                 int vn;
5534
5535                 /* During init there is no active link
5536                    Until link is up, set link rate to 10Gbps */
5537                 bp->link_vars.line_speed = SPEED_10000;
5538                 bnx2x_init_port_minmax(bp);
5539
5540                 if (!BP_NOMCP(bp))
5541                         bp->mf_config =
5542                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5543                 bnx2x_calc_vn_weight_sum(bp);
5544
5545                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5546                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5547
5548                 /* Enable rate shaping and fairness */
5549                 bp->cmng.flags.cmng_enables |=
5550                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5551
5552         } else {
5553                 /* rate shaping and fairness are disabled */
5554                 DP(NETIF_MSG_IFUP,
5555                    "single function mode  minmax will be disabled\n");
5556         }
5557
5558
5559         /* Store it to internal memory */
5560         if (bp->port.pmf)
5561                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5562                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5563                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5564                                ((u32 *)(&bp->cmng))[i]);
5565 }
5566
5567 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5568 {
5569         switch (load_code) {
5570         case FW_MSG_CODE_DRV_LOAD_COMMON:
5571                 bnx2x_init_internal_common(bp);
5572                 /* no break */
5573
5574         case FW_MSG_CODE_DRV_LOAD_PORT:
5575                 bnx2x_init_internal_port(bp);
5576                 /* no break */
5577
5578         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5579                 bnx2x_init_internal_func(bp);
5580                 break;
5581
5582         default:
5583                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5584                 break;
5585         }
5586 }
5587
5588 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5589 {
5590         int i;
5591
5592         for_each_queue(bp, i) {
5593                 struct bnx2x_fastpath *fp = &bp->fp[i];
5594
5595                 fp->bp = bp;
5596                 fp->state = BNX2X_FP_STATE_CLOSED;
5597                 fp->index = i;
5598                 fp->cl_id = BP_L_ID(bp) + i;
5599 #ifdef BCM_CNIC
5600                 fp->sb_id = fp->cl_id + 1;
5601 #else
5602                 fp->sb_id = fp->cl_id;
5603 #endif
5604                 DP(NETIF_MSG_IFUP,
5605                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5606                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5607                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5608                               fp->sb_id);
5609                 bnx2x_update_fpsb_idx(fp);
5610         }
5611
5612         /* ensure status block indices were read */
5613         rmb();
5614
5615
5616         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5617                           DEF_SB_ID);
5618         bnx2x_update_dsb_idx(bp);
5619         bnx2x_update_coalesce(bp);
5620         bnx2x_init_rx_rings(bp);
5621         bnx2x_init_tx_ring(bp);
5622         bnx2x_init_sp_ring(bp);
5623         bnx2x_init_context(bp);
5624         bnx2x_init_internal(bp, load_code);
5625         bnx2x_init_ind_table(bp);
5626         bnx2x_stats_init(bp);
5627
5628         /* At this point, we are ready for interrupts */
5629         atomic_set(&bp->intr_sem, 0);
5630
5631         /* flush all before enabling interrupts */
5632         mb();
5633         mmiowb();
5634
5635         bnx2x_int_enable(bp);
5636
5637         /* Check for SPIO5 */
5638         bnx2x_attn_int_deasserted0(bp,
5639                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5640                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5641 }
5642
5643 /* end of nic init */
5644
5645 /*
5646  * gzip service functions
5647  */
5648
5649 static int bnx2x_gunzip_init(struct bnx2x *bp)
5650 {
5651         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5652                                               &bp->gunzip_mapping);
5653         if (bp->gunzip_buf  == NULL)
5654                 goto gunzip_nomem1;
5655
5656         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5657         if (bp->strm  == NULL)
5658                 goto gunzip_nomem2;
5659
5660         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5661                                       GFP_KERNEL);
5662         if (bp->strm->workspace == NULL)
5663                 goto gunzip_nomem3;
5664
5665         return 0;
5666
5667 gunzip_nomem3:
5668         kfree(bp->strm);
5669         bp->strm = NULL;
5670
5671 gunzip_nomem2:
5672         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5673                             bp->gunzip_mapping);
5674         bp->gunzip_buf = NULL;
5675
5676 gunzip_nomem1:
5677         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5678                " un-compression\n", bp->dev->name);
5679         return -ENOMEM;
5680 }
5681
5682 static void bnx2x_gunzip_end(struct bnx2x *bp)
5683 {
5684         kfree(bp->strm->workspace);
5685
5686         kfree(bp->strm);
5687         bp->strm = NULL;
5688
5689         if (bp->gunzip_buf) {
5690                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5691                                     bp->gunzip_mapping);
5692                 bp->gunzip_buf = NULL;
5693         }
5694 }
5695
5696 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5697 {
5698         int n, rc;
5699
5700         /* check gzip header */
5701         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5702                 BNX2X_ERR("Bad gzip header\n");
5703                 return -EINVAL;
5704         }
5705
5706         n = 10;
5707
5708 #define FNAME                           0x8
5709
5710         if (zbuf[3] & FNAME)
5711                 while ((zbuf[n++] != 0) && (n < len));
5712
5713         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5714         bp->strm->avail_in = len - n;
5715         bp->strm->next_out = bp->gunzip_buf;
5716         bp->strm->avail_out = FW_BUF_SIZE;
5717
5718         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5719         if (rc != Z_OK)
5720                 return rc;
5721
5722         rc = zlib_inflate(bp->strm, Z_FINISH);
5723         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5724                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5725                        bp->dev->name, bp->strm->msg);
5726
5727         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5728         if (bp->gunzip_outlen & 0x3)
5729                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5730                                     " gunzip_outlen (%d) not aligned\n",
5731                        bp->dev->name, bp->gunzip_outlen);
5732         bp->gunzip_outlen >>= 2;
5733
5734         zlib_inflateEnd(bp->strm);
5735
5736         if (rc == Z_STREAM_END)
5737                 return 0;
5738
5739         return rc;
5740 }
5741
5742 /* nic load/unload */
5743
5744 /*
5745  * General service functions
5746  */
5747
5748 /* send a NIG loopback debug packet */
5749 static void bnx2x_lb_pckt(struct bnx2x *bp)
5750 {
5751         u32 wb_write[3];
5752
5753         /* Ethernet source and destination addresses */
5754         wb_write[0] = 0x55555555;
5755         wb_write[1] = 0x55555555;
5756         wb_write[2] = 0x20;             /* SOP */
5757         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5758
5759         /* NON-IP protocol */
5760         wb_write[0] = 0x09000000;
5761         wb_write[1] = 0x55555555;
5762         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5763         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5764 }
5765
5766 /* some of the internal memories
5767  * are not directly readable from the driver
5768  * to test them we send debug packets
5769  */
5770 static int bnx2x_int_mem_test(struct bnx2x *bp)
5771 {
5772         int factor;
5773         int count, i;
5774         u32 val = 0;
5775
5776         if (CHIP_REV_IS_FPGA(bp))
5777                 factor = 120;
5778         else if (CHIP_REV_IS_EMUL(bp))
5779                 factor = 200;
5780         else
5781                 factor = 1;
5782
5783         DP(NETIF_MSG_HW, "start part1\n");
5784
5785         /* Disable inputs of parser neighbor blocks */
5786         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5787         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5788         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5789         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5790
5791         /*  Write 0 to parser credits for CFC search request */
5792         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5793
5794         /* send Ethernet packet */
5795         bnx2x_lb_pckt(bp);
5796
5797         /* TODO do i reset NIG statistic? */
5798         /* Wait until NIG register shows 1 packet of size 0x10 */
5799         count = 1000 * factor;
5800         while (count) {
5801
5802                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5803                 val = *bnx2x_sp(bp, wb_data[0]);
5804                 if (val == 0x10)
5805                         break;
5806
5807                 msleep(10);
5808                 count--;
5809         }
5810         if (val != 0x10) {
5811                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5812                 return -1;
5813         }
5814
5815         /* Wait until PRS register shows 1 packet */
5816         count = 1000 * factor;
5817         while (count) {
5818                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5819                 if (val == 1)
5820                         break;
5821
5822                 msleep(10);
5823                 count--;
5824         }
5825         if (val != 0x1) {
5826                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5827                 return -2;
5828         }
5829
5830         /* Reset and init BRB, PRS */
5831         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5832         msleep(50);
5833         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5834         msleep(50);
5835         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5836         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5837
5838         DP(NETIF_MSG_HW, "part2\n");
5839
5840         /* Disable inputs of parser neighbor blocks */
5841         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5842         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5843         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5844         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5845
5846         /* Write 0 to parser credits for CFC search request */
5847         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5848
5849         /* send 10 Ethernet packets */
5850         for (i = 0; i < 10; i++)
5851                 bnx2x_lb_pckt(bp);
5852
5853         /* Wait until NIG register shows 10 + 1
5854            packets of size 11*0x10 = 0xb0 */
5855         count = 1000 * factor;
5856         while (count) {
5857
5858                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5859                 val = *bnx2x_sp(bp, wb_data[0]);
5860                 if (val == 0xb0)
5861                         break;
5862
5863                 msleep(10);
5864                 count--;
5865         }
5866         if (val != 0xb0) {
5867                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5868                 return -3;
5869         }
5870
5871         /* Wait until PRS register shows 2 packets */
5872         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5873         if (val != 2)
5874                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5875
5876         /* Write 1 to parser credits for CFC search request */
5877         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5878
5879         /* Wait until PRS register shows 3 packets */
5880         msleep(10 * factor);
5881         /* Wait until NIG register shows 1 packet of size 0x10 */
5882         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5883         if (val != 3)
5884                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5885
5886         /* clear NIG EOP FIFO */
5887         for (i = 0; i < 11; i++)
5888                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5889         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5890         if (val != 1) {
5891                 BNX2X_ERR("clear of NIG failed\n");
5892                 return -4;
5893         }
5894
5895         /* Reset and init BRB, PRS, NIG */
5896         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5897         msleep(50);
5898         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5899         msleep(50);
5900         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5901         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5902 #ifndef BCM_CNIC
5903         /* set NIC mode */
5904         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5905 #endif
5906
5907         /* Enable inputs of parser neighbor blocks */
5908         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5909         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5910         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5911         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5912
5913         DP(NETIF_MSG_HW, "done\n");
5914
5915         return 0; /* OK */
5916 }
5917
5918 static void enable_blocks_attention(struct bnx2x *bp)
5919 {
5920         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5921         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5922         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5923         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5924         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5925         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5926         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5927         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5928         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5929 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5930 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5931         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5932         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5933         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5934 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5935 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5936         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5937         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5938         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5939         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5940 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5941 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5942         if (CHIP_REV_IS_FPGA(bp))
5943                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5944         else
5945                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5946         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5947         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5948         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5949 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5950 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5951         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5952         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5953 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5954         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5955 }
5956
5957
5958 static void bnx2x_reset_common(struct bnx2x *bp)
5959 {
5960         /* reset_common */
5961         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5962                0xd3ffff7f);
5963         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5964 }
5965
5966 static void bnx2x_init_pxp(struct bnx2x *bp)
5967 {
5968         u16 devctl;
5969         int r_order, w_order;
5970
5971         pci_read_config_word(bp->pdev,
5972                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5973         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5974         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5975         if (bp->mrrs == -1)
5976                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5977         else {
5978                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5979                 r_order = bp->mrrs;
5980         }
5981
5982         bnx2x_init_pxp_arb(bp, r_order, w_order);
5983 }
5984
5985 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5986 {
5987         u32 val;
5988         u8 port;
5989         u8 is_required = 0;
5990
5991         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5992               SHARED_HW_CFG_FAN_FAILURE_MASK;
5993
5994         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5995                 is_required = 1;
5996
5997         /*
5998          * The fan failure mechanism is usually related to the PHY type since
5999          * the power consumption of the board is affected by the PHY. Currently,
6000          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6001          */
6002         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6003                 for (port = PORT_0; port < PORT_MAX; port++) {
6004                         u32 phy_type =
6005                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
6006                                          external_phy_config) &
6007                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6008                         is_required |=
6009                                 ((phy_type ==
6010                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6011                                  (phy_type ==
6012                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6013                                  (phy_type ==
6014                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6015                 }
6016
6017         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6018
6019         if (is_required == 0)
6020                 return;
6021
6022         /* Fan failure is indicated by SPIO 5 */
6023         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6024                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
6025
6026         /* set to active low mode */
6027         val = REG_RD(bp, MISC_REG_SPIO_INT);
6028         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6029                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6030         REG_WR(bp, MISC_REG_SPIO_INT, val);
6031
6032         /* enable interrupt to signal the IGU */
6033         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6034         val |= (1 << MISC_REGISTERS_SPIO_5);
6035         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6036 }
6037
6038 static int bnx2x_init_common(struct bnx2x *bp)
6039 {
6040         u32 val, i;
6041 #ifdef BCM_CNIC
6042         u32 wb_write[2];
6043 #endif
6044
6045         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
6046
6047         bnx2x_reset_common(bp);
6048         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6049         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6050
6051         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6052         if (CHIP_IS_E1H(bp))
6053                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6054
6055         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6056         msleep(30);
6057         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6058
6059         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6060         if (CHIP_IS_E1(bp)) {
6061                 /* enable HW interrupt from PXP on USDM overflow
6062                    bit 16 on INT_MASK_0 */
6063                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6064         }
6065
6066         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6067         bnx2x_init_pxp(bp);
6068
6069 #ifdef __BIG_ENDIAN
6070         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6071         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6072         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6073         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6074         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6075         /* make sure this value is 0 */
6076         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6077
6078 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6079         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6080         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6081         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6082         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6083 #endif
6084
6085         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6086 #ifdef BCM_CNIC
6087         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6088         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6089         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6090 #endif
6091
6092         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6093                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6094
6095         /* let the HW do it's magic ... */
6096         msleep(100);
6097         /* finish PXP init */
6098         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6099         if (val != 1) {
6100                 BNX2X_ERR("PXP2 CFG failed\n");
6101                 return -EBUSY;
6102         }
6103         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6104         if (val != 1) {
6105                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6106                 return -EBUSY;
6107         }
6108
6109         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6110         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6111
6112         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6113
6114         /* clean the DMAE memory */
6115         bp->dmae_ready = 1;
6116         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6117
6118         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6119         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6120         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6121         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6122
6123         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6124         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6125         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6126         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6127
6128         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6129
6130 #ifdef BCM_CNIC
6131         wb_write[0] = 0;
6132         wb_write[1] = 0;
6133         for (i = 0; i < 64; i++) {
6134                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6135                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6136
6137                 if (CHIP_IS_E1H(bp)) {
6138                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6139                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6140                                           wb_write, 2);
6141                 }
6142         }
6143 #endif
6144         /* soft reset pulse */
6145         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6146         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6147
6148 #ifdef BCM_CNIC
6149         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6150 #endif
6151
6152         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6153         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6154         if (!CHIP_REV_IS_SLOW(bp)) {
6155                 /* enable hw interrupt from doorbell Q */
6156                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6157         }
6158
6159         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6160         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6161         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6162 #ifndef BCM_CNIC
6163         /* set NIC mode */
6164         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6165 #endif
6166         if (CHIP_IS_E1H(bp))
6167                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6168
6169         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6170         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6171         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6172         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6173
6174         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6175         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6176         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6177         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6178
6179         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6180         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6181         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6182         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6183
6184         /* sync semi rtc */
6185         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6186                0x80000000);
6187         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6188                0x80000000);
6189
6190         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6191         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6192         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6193
6194         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6195         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6196                 REG_WR(bp, i, 0xc0cac01a);
6197                 /* TODO: replace with something meaningful */
6198         }
6199         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6200 #ifdef BCM_CNIC
6201         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6202         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6203         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6204         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6205         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6206         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6207         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6208         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6209         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6210         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6211 #endif
6212         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6213
6214         if (sizeof(union cdu_context) != 1024)
6215                 /* we currently assume that a context is 1024 bytes */
6216                 printk(KERN_ALERT PFX "please adjust the size of"
6217                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6218
6219         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6220         val = (4 << 24) + (0 << 12) + 1024;
6221         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6222
6223         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6224         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6225         /* enable context validation interrupt from CFC */
6226         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6227
6228         /* set the thresholds to prevent CFC/CDU race */
6229         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6230
6231         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6232         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6233
6234         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6235         /* Reset PCIE errors for debug */
6236         REG_WR(bp, 0x2814, 0xffffffff);
6237         REG_WR(bp, 0x3820, 0xffffffff);
6238
6239         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6240         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6241         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6242         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6243
6244         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6245         if (CHIP_IS_E1H(bp)) {
6246                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6247                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6248         }
6249
6250         if (CHIP_REV_IS_SLOW(bp))
6251                 msleep(200);
6252
6253         /* finish CFC init */
6254         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6255         if (val != 1) {
6256                 BNX2X_ERR("CFC LL_INIT failed\n");
6257                 return -EBUSY;
6258         }
6259         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6260         if (val != 1) {
6261                 BNX2X_ERR("CFC AC_INIT failed\n");
6262                 return -EBUSY;
6263         }
6264         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6265         if (val != 1) {
6266                 BNX2X_ERR("CFC CAM_INIT failed\n");
6267                 return -EBUSY;
6268         }
6269         REG_WR(bp, CFC_REG_DEBUG0, 0);
6270
6271         /* read NIG statistic
6272            to see if this is our first up since powerup */
6273         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6274         val = *bnx2x_sp(bp, wb_data[0]);
6275
6276         /* do internal memory self test */
6277         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6278                 BNX2X_ERR("internal mem self test failed\n");
6279                 return -EBUSY;
6280         }
6281
6282         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6283         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6284         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6285         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6286         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6287                 bp->port.need_hw_lock = 1;
6288                 break;
6289
6290         default:
6291                 break;
6292         }
6293
6294         bnx2x_setup_fan_failure_detection(bp);
6295
6296         /* clear PXP2 attentions */
6297         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6298
6299         enable_blocks_attention(bp);
6300
6301         if (!BP_NOMCP(bp)) {
6302                 bnx2x_acquire_phy_lock(bp);
6303                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6304                 bnx2x_release_phy_lock(bp);
6305         } else
6306                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6307
6308         return 0;
6309 }
6310
6311 static int bnx2x_init_port(struct bnx2x *bp)
6312 {
6313         int port = BP_PORT(bp);
6314         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6315         u32 low, high;
6316         u32 val;
6317
6318         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
6319
6320         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6321
6322         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6323         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6324
6325         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6326         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6327         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6328         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6329
6330 #ifdef BCM_CNIC
6331         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6332
6333         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6334         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6335         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6336 #endif
6337         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6338
6339         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6340         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6341                 /* no pause for emulation and FPGA */
6342                 low = 0;
6343                 high = 513;
6344         } else {
6345                 if (IS_E1HMF(bp))
6346                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6347                 else if (bp->dev->mtu > 4096) {
6348                         if (bp->flags & ONE_PORT_FLAG)
6349                                 low = 160;
6350                         else {
6351                                 val = bp->dev->mtu;
6352                                 /* (24*1024 + val*4)/256 */
6353                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6354                         }
6355                 } else
6356                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6357                 high = low + 56;        /* 14*1024/256 */
6358         }
6359         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6360         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6361
6362
6363         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6364
6365         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6366         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6367         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6368         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6369
6370         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6371         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6372         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6373         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6374
6375         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6376         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6377
6378         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6379
6380         /* configure PBF to work without PAUSE mtu 9000 */
6381         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6382
6383         /* update threshold */
6384         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6385         /* update init credit */
6386         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6387
6388         /* probe changes */
6389         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6390         msleep(5);
6391         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6392
6393 #ifdef BCM_CNIC
6394         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6395 #endif
6396         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6397         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6398
6399         if (CHIP_IS_E1(bp)) {
6400                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6401                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6402         }
6403         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6404
6405         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6406         /* init aeu_mask_attn_func_0/1:
6407          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6408          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6409          *             bits 4-7 are used for "per vn group attention" */
6410         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6411                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6412
6413         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6414         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6415         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6416         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6417         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6418
6419         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6420
6421         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6422
6423         if (CHIP_IS_E1H(bp)) {
6424                 /* 0x2 disable e1hov, 0x1 enable */
6425                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6426                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6427
6428                 {
6429                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6430                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6431                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6432                 }
6433         }
6434
6435         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6436         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6437
6438         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6439         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6440                 {
6441                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6442
6443                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6444                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6445
6446                 /* The GPIO should be swapped if the swap register is
6447                    set and active */
6448                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6449                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6450
6451                 /* Select function upon port-swap configuration */
6452                 if (port == 0) {
6453                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6454                         aeu_gpio_mask = (swap_val && swap_override) ?
6455                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6456                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6457                 } else {
6458                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6459                         aeu_gpio_mask = (swap_val && swap_override) ?
6460                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6461                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6462                 }
6463                 val = REG_RD(bp, offset);
6464                 /* add GPIO3 to group */
6465                 val |= aeu_gpio_mask;
6466                 REG_WR(bp, offset, val);
6467                 }
6468                 break;
6469
6470         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6471         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6472                 /* add SPIO 5 to group 0 */
6473                 {
6474                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6475                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6476                 val = REG_RD(bp, reg_addr);
6477                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6478                 REG_WR(bp, reg_addr, val);
6479                 }
6480                 break;
6481
6482         default:
6483                 break;
6484         }
6485
6486         bnx2x__link_reset(bp);
6487
6488         return 0;
6489 }
6490
6491 #define ILT_PER_FUNC            (768/2)
6492 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6493 /* the phys address is shifted right 12 bits and has an added
6494    1=valid bit added to the 53rd bit
6495    then since this is a wide register(TM)
6496    we split it into two 32 bit writes
6497  */
6498 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6499 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6500 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6501 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6502
6503 #ifdef BCM_CNIC
6504 #define CNIC_ILT_LINES          127
6505 #define CNIC_CTX_PER_ILT        16
6506 #else
6507 #define CNIC_ILT_LINES          0
6508 #endif
6509
6510 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6511 {
6512         int reg;
6513
6514         if (CHIP_IS_E1H(bp))
6515                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6516         else /* E1 */
6517                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6518
6519         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6520 }
6521
6522 static int bnx2x_init_func(struct bnx2x *bp)
6523 {
6524         int port = BP_PORT(bp);
6525         int func = BP_FUNC(bp);
6526         u32 addr, val;
6527         int i;
6528
6529         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6530
6531         /* set MSI reconfigure capability */
6532         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6533         val = REG_RD(bp, addr);
6534         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6535         REG_WR(bp, addr, val);
6536
6537         i = FUNC_ILT_BASE(func);
6538
6539         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6540         if (CHIP_IS_E1H(bp)) {
6541                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6542                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6543         } else /* E1 */
6544                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6545                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6546
6547 #ifdef BCM_CNIC
6548         i += 1 + CNIC_ILT_LINES;
6549         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6550         if (CHIP_IS_E1(bp))
6551                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6552         else {
6553                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6554                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6555         }
6556
6557         i++;
6558         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6559         if (CHIP_IS_E1(bp))
6560                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6561         else {
6562                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6563                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6564         }
6565
6566         i++;
6567         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6568         if (CHIP_IS_E1(bp))
6569                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6570         else {
6571                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6572                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6573         }
6574
6575         /* tell the searcher where the T2 table is */
6576         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6577
6578         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6579                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6580
6581         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6582                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6583                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6584
6585         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6586 #endif
6587
6588         if (CHIP_IS_E1H(bp)) {
6589                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6590                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6591                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6592                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6593                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6594                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6595                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6596                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6597                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6598
6599                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6600                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6601         }
6602
6603         /* HC init per function */
6604         if (CHIP_IS_E1H(bp)) {
6605                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6606
6607                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6608                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6609         }
6610         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6611
6612         /* Reset PCIE errors for debug */
6613         REG_WR(bp, 0x2114, 0xffffffff);
6614         REG_WR(bp, 0x2120, 0xffffffff);
6615
6616         return 0;
6617 }
6618
6619 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6620 {
6621         int i, rc = 0;
6622
6623         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6624            BP_FUNC(bp), load_code);
6625
6626         bp->dmae_ready = 0;
6627         mutex_init(&bp->dmae_mutex);
6628         rc = bnx2x_gunzip_init(bp);
6629         if (rc)
6630                 return rc;
6631
6632         switch (load_code) {
6633         case FW_MSG_CODE_DRV_LOAD_COMMON:
6634                 rc = bnx2x_init_common(bp);
6635                 if (rc)
6636                         goto init_hw_err;
6637                 /* no break */
6638
6639         case FW_MSG_CODE_DRV_LOAD_PORT:
6640                 bp->dmae_ready = 1;
6641                 rc = bnx2x_init_port(bp);
6642                 if (rc)
6643                         goto init_hw_err;
6644                 /* no break */
6645
6646         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6647                 bp->dmae_ready = 1;
6648                 rc = bnx2x_init_func(bp);
6649                 if (rc)
6650                         goto init_hw_err;
6651                 break;
6652
6653         default:
6654                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6655                 break;
6656         }
6657
6658         if (!BP_NOMCP(bp)) {
6659                 int func = BP_FUNC(bp);
6660
6661                 bp->fw_drv_pulse_wr_seq =
6662                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6663                                  DRV_PULSE_SEQ_MASK);
6664                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6665         }
6666
6667         /* this needs to be done before gunzip end */
6668         bnx2x_zero_def_sb(bp);
6669         for_each_queue(bp, i)
6670                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6671 #ifdef BCM_CNIC
6672         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6673 #endif
6674
6675 init_hw_err:
6676         bnx2x_gunzip_end(bp);
6677
6678         return rc;
6679 }
6680
6681 static void bnx2x_free_mem(struct bnx2x *bp)
6682 {
6683
6684 #define BNX2X_PCI_FREE(x, y, size) \
6685         do { \
6686                 if (x) { \
6687                         pci_free_consistent(bp->pdev, size, x, y); \
6688                         x = NULL; \
6689                         y = 0; \
6690                 } \
6691         } while (0)
6692
6693 #define BNX2X_FREE(x) \
6694         do { \
6695                 if (x) { \
6696                         vfree(x); \
6697                         x = NULL; \
6698                 } \
6699         } while (0)
6700
6701         int i;
6702
6703         /* fastpath */
6704         /* Common */
6705         for_each_queue(bp, i) {
6706
6707                 /* status blocks */
6708                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6709                                bnx2x_fp(bp, i, status_blk_mapping),
6710                                sizeof(struct host_status_block));
6711         }
6712         /* Rx */
6713         for_each_queue(bp, i) {
6714
6715                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6716                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6717                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6718                                bnx2x_fp(bp, i, rx_desc_mapping),
6719                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6720
6721                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6722                                bnx2x_fp(bp, i, rx_comp_mapping),
6723                                sizeof(struct eth_fast_path_rx_cqe) *
6724                                NUM_RCQ_BD);
6725
6726                 /* SGE ring */
6727                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6728                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6729                                bnx2x_fp(bp, i, rx_sge_mapping),
6730                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6731         }
6732         /* Tx */
6733         for_each_queue(bp, i) {
6734
6735                 /* fastpath tx rings: tx_buf tx_desc */
6736                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6737                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6738                                bnx2x_fp(bp, i, tx_desc_mapping),
6739                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6740         }
6741         /* end of fastpath */
6742
6743         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6744                        sizeof(struct host_def_status_block));
6745
6746         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6747                        sizeof(struct bnx2x_slowpath));
6748
6749 #ifdef BCM_CNIC
6750         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6751         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6752         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6753         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6754         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6755                        sizeof(struct host_status_block));
6756 #endif
6757         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6758
6759 #undef BNX2X_PCI_FREE
6760 #undef BNX2X_KFREE
6761 }
6762
6763 static int bnx2x_alloc_mem(struct bnx2x *bp)
6764 {
6765
6766 #define BNX2X_PCI_ALLOC(x, y, size) \
6767         do { \
6768                 x = pci_alloc_consistent(bp->pdev, size, y); \
6769                 if (x == NULL) \
6770                         goto alloc_mem_err; \
6771                 memset(x, 0, size); \
6772         } while (0)
6773
6774 #define BNX2X_ALLOC(x, size) \
6775         do { \
6776                 x = vmalloc(size); \
6777                 if (x == NULL) \
6778                         goto alloc_mem_err; \
6779                 memset(x, 0, size); \
6780         } while (0)
6781
6782         int i;
6783
6784         /* fastpath */
6785         /* Common */
6786         for_each_queue(bp, i) {
6787                 bnx2x_fp(bp, i, bp) = bp;
6788
6789                 /* status blocks */
6790                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6791                                 &bnx2x_fp(bp, i, status_blk_mapping),
6792                                 sizeof(struct host_status_block));
6793         }
6794         /* Rx */
6795         for_each_queue(bp, i) {
6796
6797                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6798                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6799                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6800                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6801                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6802                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6803
6804                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6805                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6806                                 sizeof(struct eth_fast_path_rx_cqe) *
6807                                 NUM_RCQ_BD);
6808
6809                 /* SGE ring */
6810                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6811                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6812                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6813                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6814                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6815         }
6816         /* Tx */
6817         for_each_queue(bp, i) {
6818
6819                 /* fastpath tx rings: tx_buf tx_desc */
6820                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6821                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6822                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6823                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6824                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6825         }
6826         /* end of fastpath */
6827
6828         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6829                         sizeof(struct host_def_status_block));
6830
6831         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6832                         sizeof(struct bnx2x_slowpath));
6833
6834 #ifdef BCM_CNIC
6835         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6836
6837         /* allocate searcher T2 table
6838            we allocate 1/4 of alloc num for T2
6839           (which is not entered into the ILT) */
6840         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6841
6842         /* Initialize T2 (for 1024 connections) */
6843         for (i = 0; i < 16*1024; i += 64)
6844                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6845
6846         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6847         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6848
6849         /* QM queues (128*MAX_CONN) */
6850         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6851
6852         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6853                         sizeof(struct host_status_block));
6854 #endif
6855
6856         /* Slow path ring */
6857         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6858
6859         return 0;
6860
6861 alloc_mem_err:
6862         bnx2x_free_mem(bp);
6863         return -ENOMEM;
6864
6865 #undef BNX2X_PCI_ALLOC
6866 #undef BNX2X_ALLOC
6867 }
6868
6869 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6870 {
6871         int i;
6872
6873         for_each_queue(bp, i) {
6874                 struct bnx2x_fastpath *fp = &bp->fp[i];
6875
6876                 u16 bd_cons = fp->tx_bd_cons;
6877                 u16 sw_prod = fp->tx_pkt_prod;
6878                 u16 sw_cons = fp->tx_pkt_cons;
6879
6880                 while (sw_cons != sw_prod) {
6881                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6882                         sw_cons++;
6883                 }
6884         }
6885 }
6886
6887 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6888 {
6889         int i, j;
6890
6891         for_each_queue(bp, j) {
6892                 struct bnx2x_fastpath *fp = &bp->fp[j];
6893
6894                 for (i = 0; i < NUM_RX_BD; i++) {
6895                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6896                         struct sk_buff *skb = rx_buf->skb;
6897
6898                         if (skb == NULL)
6899                                 continue;
6900
6901                         pci_unmap_single(bp->pdev,
6902                                          pci_unmap_addr(rx_buf, mapping),
6903                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6904
6905                         rx_buf->skb = NULL;
6906                         dev_kfree_skb(skb);
6907                 }
6908                 if (!fp->disable_tpa)
6909                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6910                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6911                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6912         }
6913 }
6914
6915 static void bnx2x_free_skbs(struct bnx2x *bp)
6916 {
6917         bnx2x_free_tx_skbs(bp);
6918         bnx2x_free_rx_skbs(bp);
6919 }
6920
6921 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6922 {
6923         int i, offset = 1;
6924
6925         free_irq(bp->msix_table[0].vector, bp->dev);
6926         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6927            bp->msix_table[0].vector);
6928
6929 #ifdef BCM_CNIC
6930         offset++;
6931 #endif
6932         for_each_queue(bp, i) {
6933                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6934                    "state %x\n", i, bp->msix_table[i + offset].vector,
6935                    bnx2x_fp(bp, i, state));
6936
6937                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6938         }
6939 }
6940
6941 static void bnx2x_free_irq(struct bnx2x *bp)
6942 {
6943         if (bp->flags & USING_MSIX_FLAG) {
6944                 bnx2x_free_msix_irqs(bp);
6945                 pci_disable_msix(bp->pdev);
6946                 bp->flags &= ~USING_MSIX_FLAG;
6947
6948         } else if (bp->flags & USING_MSI_FLAG) {
6949                 free_irq(bp->pdev->irq, bp->dev);
6950                 pci_disable_msi(bp->pdev);
6951                 bp->flags &= ~USING_MSI_FLAG;
6952
6953         } else
6954                 free_irq(bp->pdev->irq, bp->dev);
6955 }
6956
6957 static int bnx2x_enable_msix(struct bnx2x *bp)
6958 {
6959         int i, rc, offset = 1;
6960         int igu_vec = 0;
6961
6962         bp->msix_table[0].entry = igu_vec;
6963         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6964
6965 #ifdef BCM_CNIC
6966         igu_vec = BP_L_ID(bp) + offset;
6967         bp->msix_table[1].entry = igu_vec;
6968         DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6969         offset++;
6970 #endif
6971         for_each_queue(bp, i) {
6972                 igu_vec = BP_L_ID(bp) + offset + i;
6973                 bp->msix_table[i + offset].entry = igu_vec;
6974                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6975                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6976         }
6977
6978         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6979                              BNX2X_NUM_QUEUES(bp) + offset);
6980         if (rc) {
6981                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6982                 return rc;
6983         }
6984
6985         bp->flags |= USING_MSIX_FLAG;
6986
6987         return 0;
6988 }
6989
6990 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6991 {
6992         int i, rc, offset = 1;
6993
6994         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6995                          bp->dev->name, bp->dev);
6996         if (rc) {
6997                 BNX2X_ERR("request sp irq failed\n");
6998                 return -EBUSY;
6999         }
7000
7001 #ifdef BCM_CNIC
7002         offset++;
7003 #endif
7004         for_each_queue(bp, i) {
7005                 struct bnx2x_fastpath *fp = &bp->fp[i];
7006                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7007                          bp->dev->name, i);
7008
7009                 rc = request_irq(bp->msix_table[i + offset].vector,
7010                                  bnx2x_msix_fp_int, 0, fp->name, fp);
7011                 if (rc) {
7012                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
7013                         bnx2x_free_msix_irqs(bp);
7014                         return -EBUSY;
7015                 }
7016
7017                 fp->state = BNX2X_FP_STATE_IRQ;
7018         }
7019
7020         i = BNX2X_NUM_QUEUES(bp);
7021         printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp[%d] %d"
7022                " ... fp[%d] %d\n",
7023                bp->dev->name, bp->msix_table[0].vector,
7024                0, bp->msix_table[offset].vector,
7025                i - 1, bp->msix_table[offset + i - 1].vector);
7026
7027         return 0;
7028 }
7029
7030 static int bnx2x_enable_msi(struct bnx2x *bp)
7031 {
7032         int rc;
7033
7034         rc = pci_enable_msi(bp->pdev);
7035         if (rc) {
7036                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7037                 return -1;
7038         }
7039         bp->flags |= USING_MSI_FLAG;
7040
7041         return 0;
7042 }
7043
7044 static int bnx2x_req_irq(struct bnx2x *bp)
7045 {
7046         unsigned long flags;
7047         int rc;
7048
7049         if (bp->flags & USING_MSI_FLAG)
7050                 flags = 0;
7051         else
7052                 flags = IRQF_SHARED;
7053
7054         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7055                          bp->dev->name, bp->dev);
7056         if (!rc)
7057                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7058
7059         return rc;
7060 }
7061
7062 static void bnx2x_napi_enable(struct bnx2x *bp)
7063 {
7064         int i;
7065
7066         for_each_queue(bp, i)
7067                 napi_enable(&bnx2x_fp(bp, i, napi));
7068 }
7069
7070 static void bnx2x_napi_disable(struct bnx2x *bp)
7071 {
7072         int i;
7073
7074         for_each_queue(bp, i)
7075                 napi_disable(&bnx2x_fp(bp, i, napi));
7076 }
7077
7078 static void bnx2x_netif_start(struct bnx2x *bp)
7079 {
7080         int intr_sem;
7081
7082         intr_sem = atomic_dec_and_test(&bp->intr_sem);
7083         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7084
7085         if (intr_sem) {
7086                 if (netif_running(bp->dev)) {
7087                         bnx2x_napi_enable(bp);
7088                         bnx2x_int_enable(bp);
7089                         if (bp->state == BNX2X_STATE_OPEN)
7090                                 netif_tx_wake_all_queues(bp->dev);
7091                 }
7092         }
7093 }
7094
7095 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7096 {
7097         bnx2x_int_disable_sync(bp, disable_hw);
7098         bnx2x_napi_disable(bp);
7099         netif_tx_disable(bp->dev);
7100 }
7101
7102 /*
7103  * Init service functions
7104  */
7105
7106 /**
7107  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7108  *
7109  * @param bp driver descriptor
7110  * @param set set or clear an entry (1 or 0)
7111  * @param mac pointer to a buffer containing a MAC
7112  * @param cl_bit_vec bit vector of clients to register a MAC for
7113  * @param cam_offset offset in a CAM to use
7114  * @param with_bcast set broadcast MAC as well
7115  */
7116 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7117                                       u32 cl_bit_vec, u8 cam_offset,
7118                                       u8 with_bcast)
7119 {
7120         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7121         int port = BP_PORT(bp);
7122
7123         /* CAM allocation
7124          * unicasts 0-31:port0 32-63:port1
7125          * multicast 64-127:port0 128-191:port1
7126          */
7127         config->hdr.length = 1 + (with_bcast ? 1 : 0);
7128         config->hdr.offset = cam_offset;
7129         config->hdr.client_id = 0xff;
7130         config->hdr.reserved1 = 0;
7131
7132         /* primary MAC */
7133         config->config_table[0].cam_entry.msb_mac_addr =
7134                                         swab16(*(u16 *)&mac[0]);
7135         config->config_table[0].cam_entry.middle_mac_addr =
7136                                         swab16(*(u16 *)&mac[2]);
7137         config->config_table[0].cam_entry.lsb_mac_addr =
7138                                         swab16(*(u16 *)&mac[4]);
7139         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7140         if (set)
7141                 config->config_table[0].target_table_entry.flags = 0;
7142         else
7143                 CAM_INVALIDATE(config->config_table[0]);
7144         config->config_table[0].target_table_entry.clients_bit_vector =
7145                                                 cpu_to_le32(cl_bit_vec);
7146         config->config_table[0].target_table_entry.vlan_id = 0;
7147
7148         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7149            (set ? "setting" : "clearing"),
7150            config->config_table[0].cam_entry.msb_mac_addr,
7151            config->config_table[0].cam_entry.middle_mac_addr,
7152            config->config_table[0].cam_entry.lsb_mac_addr);
7153
7154         /* broadcast */
7155         if (with_bcast) {
7156                 config->config_table[1].cam_entry.msb_mac_addr =
7157                         cpu_to_le16(0xffff);
7158                 config->config_table[1].cam_entry.middle_mac_addr =
7159                         cpu_to_le16(0xffff);
7160                 config->config_table[1].cam_entry.lsb_mac_addr =
7161                         cpu_to_le16(0xffff);
7162                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7163                 if (set)
7164                         config->config_table[1].target_table_entry.flags =
7165                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7166                 else
7167                         CAM_INVALIDATE(config->config_table[1]);
7168                 config->config_table[1].target_table_entry.clients_bit_vector =
7169                                                         cpu_to_le32(cl_bit_vec);
7170                 config->config_table[1].target_table_entry.vlan_id = 0;
7171         }
7172
7173         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7174                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7175                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7176 }
7177
7178 /**
7179  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7180  *
7181  * @param bp driver descriptor
7182  * @param set set or clear an entry (1 or 0)
7183  * @param mac pointer to a buffer containing a MAC
7184  * @param cl_bit_vec bit vector of clients to register a MAC for
7185  * @param cam_offset offset in a CAM to use
7186  */
7187 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7188                                        u32 cl_bit_vec, u8 cam_offset)
7189 {
7190         struct mac_configuration_cmd_e1h *config =
7191                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7192
7193         config->hdr.length = 1;
7194         config->hdr.offset = cam_offset;
7195         config->hdr.client_id = 0xff;
7196         config->hdr.reserved1 = 0;
7197
7198         /* primary MAC */
7199         config->config_table[0].msb_mac_addr =
7200                                         swab16(*(u16 *)&mac[0]);
7201         config->config_table[0].middle_mac_addr =
7202                                         swab16(*(u16 *)&mac[2]);
7203         config->config_table[0].lsb_mac_addr =
7204                                         swab16(*(u16 *)&mac[4]);
7205         config->config_table[0].clients_bit_vector =
7206                                         cpu_to_le32(cl_bit_vec);
7207         config->config_table[0].vlan_id = 0;
7208         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7209         if (set)
7210                 config->config_table[0].flags = BP_PORT(bp);
7211         else
7212                 config->config_table[0].flags =
7213                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7214
7215         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
7216            (set ? "setting" : "clearing"),
7217            config->config_table[0].msb_mac_addr,
7218            config->config_table[0].middle_mac_addr,
7219            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7220
7221         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7222                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7223                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7224 }
7225
7226 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7227                              int *state_p, int poll)
7228 {
7229         /* can take a while if any port is running */
7230         int cnt = 5000;
7231
7232         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7233            poll ? "polling" : "waiting", state, idx);
7234
7235         might_sleep();
7236         while (cnt--) {
7237                 if (poll) {
7238                         bnx2x_rx_int(bp->fp, 10);
7239                         /* if index is different from 0
7240                          * the reply for some commands will
7241                          * be on the non default queue
7242                          */
7243                         if (idx)
7244                                 bnx2x_rx_int(&bp->fp[idx], 10);
7245                 }
7246
7247                 mb(); /* state is changed by bnx2x_sp_event() */
7248                 if (*state_p == state) {
7249 #ifdef BNX2X_STOP_ON_ERROR
7250                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7251 #endif
7252                         return 0;
7253                 }
7254
7255                 msleep(1);
7256
7257                 if (bp->panic)
7258                         return -EIO;
7259         }
7260
7261         /* timeout! */
7262         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7263                   poll ? "polling" : "waiting", state, idx);
7264 #ifdef BNX2X_STOP_ON_ERROR
7265         bnx2x_panic();
7266 #endif
7267
7268         return -EBUSY;
7269 }
7270
7271 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7272 {
7273         bp->set_mac_pending++;
7274         smp_wmb();
7275
7276         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7277                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
7278
7279         /* Wait for a completion */
7280         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7281 }
7282
7283 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7284 {
7285         bp->set_mac_pending++;
7286         smp_wmb();
7287
7288         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7289                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7290                                   1);
7291
7292         /* Wait for a completion */
7293         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7294 }
7295
7296 #ifdef BCM_CNIC
7297 /**
7298  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7299  * MAC(s). This function will wait until the ramdord completion
7300  * returns.
7301  *
7302  * @param bp driver handle
7303  * @param set set or clear the CAM entry
7304  *
7305  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7306  */
7307 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7308 {
7309         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7310
7311         bp->set_mac_pending++;
7312         smp_wmb();
7313
7314         /* Send a SET_MAC ramrod */
7315         if (CHIP_IS_E1(bp))
7316                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7317                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7318                                   1);
7319         else
7320                 /* CAM allocation for E1H
7321                 * unicasts: by func number
7322                 * multicast: 20+FUNC*20, 20 each
7323                 */
7324                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7325                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7326
7327         /* Wait for a completion when setting */
7328         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7329
7330         return 0;
7331 }
7332 #endif
7333
7334 static int bnx2x_setup_leading(struct bnx2x *bp)
7335 {
7336         int rc;
7337
7338         /* reset IGU state */
7339         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7340
7341         /* SETUP ramrod */
7342         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7343
7344         /* Wait for completion */
7345         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7346
7347         return rc;
7348 }
7349
7350 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7351 {
7352         struct bnx2x_fastpath *fp = &bp->fp[index];
7353
7354         /* reset IGU state */
7355         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7356
7357         /* SETUP ramrod */
7358         fp->state = BNX2X_FP_STATE_OPENING;
7359         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7360                       fp->cl_id, 0);
7361
7362         /* Wait for completion */
7363         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7364                                  &(fp->state), 0);
7365 }
7366
7367 static int bnx2x_poll(struct napi_struct *napi, int budget);
7368
7369 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7370 {
7371
7372         switch (bp->multi_mode) {
7373         case ETH_RSS_MODE_DISABLED:
7374                 bp->num_queues = 1;
7375                 break;
7376
7377         case ETH_RSS_MODE_REGULAR:
7378                 if (num_queues)
7379                         bp->num_queues = min_t(u32, num_queues,
7380                                                   BNX2X_MAX_QUEUES(bp));
7381                 else
7382                         bp->num_queues = min_t(u32, num_online_cpus(),
7383                                                   BNX2X_MAX_QUEUES(bp));
7384                 break;
7385
7386
7387         default:
7388                 bp->num_queues = 1;
7389                 break;
7390         }
7391 }
7392
7393 static int bnx2x_set_num_queues(struct bnx2x *bp)
7394 {
7395         int rc = 0;
7396
7397         switch (int_mode) {
7398         case INT_MODE_INTx:
7399         case INT_MODE_MSI:
7400                 bp->num_queues = 1;
7401                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7402                 break;
7403
7404         case INT_MODE_MSIX:
7405         default:
7406                 /* Set number of queues according to bp->multi_mode value */
7407                 bnx2x_set_num_queues_msix(bp);
7408
7409                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7410                    bp->num_queues);
7411
7412                 /* if we can't use MSI-X we only need one fp,
7413                  * so try to enable MSI-X with the requested number of fp's
7414                  * and fallback to MSI or legacy INTx with one fp
7415                  */
7416                 rc = bnx2x_enable_msix(bp);
7417                 if (rc)
7418                         /* failed to enable MSI-X */
7419                         bp->num_queues = 1;
7420                 break;
7421         }
7422         bp->dev->real_num_tx_queues = bp->num_queues;
7423         return rc;
7424 }
7425
7426 #ifdef BCM_CNIC
7427 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7428 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7429 #endif
7430
7431 /* must be called with rtnl_lock */
7432 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7433 {
7434         u32 load_code;
7435         int i, rc;
7436
7437 #ifdef BNX2X_STOP_ON_ERROR
7438         if (unlikely(bp->panic))
7439                 return -EPERM;
7440 #endif
7441
7442         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7443
7444         rc = bnx2x_set_num_queues(bp);
7445
7446         if (bnx2x_alloc_mem(bp))
7447                 return -ENOMEM;
7448
7449         for_each_queue(bp, i)
7450                 bnx2x_fp(bp, i, disable_tpa) =
7451                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7452
7453         for_each_queue(bp, i)
7454                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7455                                bnx2x_poll, 128);
7456
7457         bnx2x_napi_enable(bp);
7458
7459         if (bp->flags & USING_MSIX_FLAG) {
7460                 rc = bnx2x_req_msix_irqs(bp);
7461                 if (rc) {
7462                         pci_disable_msix(bp->pdev);
7463                         goto load_error1;
7464                 }
7465         } else {
7466                 /* Fall to INTx if failed to enable MSI-X due to lack of
7467                    memory (in bnx2x_set_num_queues()) */
7468                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7469                         bnx2x_enable_msi(bp);
7470                 bnx2x_ack_int(bp);
7471                 rc = bnx2x_req_irq(bp);
7472                 if (rc) {
7473                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7474                         if (bp->flags & USING_MSI_FLAG)
7475                                 pci_disable_msi(bp->pdev);
7476                         goto load_error1;
7477                 }
7478                 if (bp->flags & USING_MSI_FLAG) {
7479                         bp->dev->irq = bp->pdev->irq;
7480                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
7481                                bp->dev->name, bp->pdev->irq);
7482                 }
7483         }
7484
7485         /* Send LOAD_REQUEST command to MCP
7486            Returns the type of LOAD command:
7487            if it is the first port to be initialized
7488            common blocks should be initialized, otherwise - not
7489         */
7490         if (!BP_NOMCP(bp)) {
7491                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7492                 if (!load_code) {
7493                         BNX2X_ERR("MCP response failure, aborting\n");
7494                         rc = -EBUSY;
7495                         goto load_error2;
7496                 }
7497                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7498                         rc = -EBUSY; /* other port in diagnostic mode */
7499                         goto load_error2;
7500                 }
7501
7502         } else {
7503                 int port = BP_PORT(bp);
7504
7505                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7506                    load_count[0], load_count[1], load_count[2]);
7507                 load_count[0]++;
7508                 load_count[1 + port]++;
7509                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7510                    load_count[0], load_count[1], load_count[2]);
7511                 if (load_count[0] == 1)
7512                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7513                 else if (load_count[1 + port] == 1)
7514                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7515                 else
7516                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7517         }
7518
7519         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7520             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7521                 bp->port.pmf = 1;
7522         else
7523                 bp->port.pmf = 0;
7524         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7525
7526         /* Initialize HW */
7527         rc = bnx2x_init_hw(bp, load_code);
7528         if (rc) {
7529                 BNX2X_ERR("HW init failed, aborting\n");
7530                 goto load_error2;
7531         }
7532
7533         /* Setup NIC internals and enable interrupts */
7534         bnx2x_nic_init(bp, load_code);
7535
7536         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7537             (bp->common.shmem2_base))
7538                 SHMEM2_WR(bp, dcc_support,
7539                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7540                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7541
7542         /* Send LOAD_DONE command to MCP */
7543         if (!BP_NOMCP(bp)) {
7544                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7545                 if (!load_code) {
7546                         BNX2X_ERR("MCP response failure, aborting\n");
7547                         rc = -EBUSY;
7548                         goto load_error3;
7549                 }
7550         }
7551
7552         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7553
7554         rc = bnx2x_setup_leading(bp);
7555         if (rc) {
7556                 BNX2X_ERR("Setup leading failed!\n");
7557 #ifndef BNX2X_STOP_ON_ERROR
7558                 goto load_error3;
7559 #else
7560                 bp->panic = 1;
7561                 return -EBUSY;
7562 #endif
7563         }
7564
7565         if (CHIP_IS_E1H(bp))
7566                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7567                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7568                         bp->flags |= MF_FUNC_DIS;
7569                 }
7570
7571         if (bp->state == BNX2X_STATE_OPEN) {
7572 #ifdef BCM_CNIC
7573                 /* Enable Timer scan */
7574                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7575 #endif
7576                 for_each_nondefault_queue(bp, i) {
7577                         rc = bnx2x_setup_multi(bp, i);
7578                         if (rc)
7579 #ifdef BCM_CNIC
7580                                 goto load_error4;
7581 #else
7582                                 goto load_error3;
7583 #endif
7584                 }
7585
7586                 if (CHIP_IS_E1(bp))
7587                         bnx2x_set_eth_mac_addr_e1(bp, 1);
7588                 else
7589                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
7590 #ifdef BCM_CNIC
7591                 /* Set iSCSI L2 MAC */
7592                 mutex_lock(&bp->cnic_mutex);
7593                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7594                         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7595                         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7596                 }
7597                 mutex_unlock(&bp->cnic_mutex);
7598 #endif
7599         }
7600
7601         if (bp->port.pmf)
7602                 bnx2x_initial_phy_init(bp, load_mode);
7603
7604         /* Start fast path */
7605         switch (load_mode) {
7606         case LOAD_NORMAL:
7607                 if (bp->state == BNX2X_STATE_OPEN) {
7608                         /* Tx queue should be only reenabled */
7609                         netif_tx_wake_all_queues(bp->dev);
7610                 }
7611                 /* Initialize the receive filter. */
7612                 bnx2x_set_rx_mode(bp->dev);
7613                 break;
7614
7615         case LOAD_OPEN:
7616                 netif_tx_start_all_queues(bp->dev);
7617                 if (bp->state != BNX2X_STATE_OPEN)
7618                         netif_tx_disable(bp->dev);
7619                 /* Initialize the receive filter. */
7620                 bnx2x_set_rx_mode(bp->dev);
7621                 break;
7622
7623         case LOAD_DIAG:
7624                 /* Initialize the receive filter. */
7625                 bnx2x_set_rx_mode(bp->dev);
7626                 bp->state = BNX2X_STATE_DIAG;
7627                 break;
7628
7629         default:
7630                 break;
7631         }
7632
7633         if (!bp->port.pmf)
7634                 bnx2x__link_status_update(bp);
7635
7636         /* start the timer */
7637         mod_timer(&bp->timer, jiffies + bp->current_interval);
7638
7639 #ifdef BCM_CNIC
7640         bnx2x_setup_cnic_irq_info(bp);
7641         if (bp->state == BNX2X_STATE_OPEN)
7642                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7643 #endif
7644
7645         return 0;
7646
7647 #ifdef BCM_CNIC
7648 load_error4:
7649         /* Disable Timer scan */
7650         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7651 #endif
7652 load_error3:
7653         bnx2x_int_disable_sync(bp, 1);
7654         if (!BP_NOMCP(bp)) {
7655                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7656                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7657         }
7658         bp->port.pmf = 0;
7659         /* Free SKBs, SGEs, TPA pool and driver internals */
7660         bnx2x_free_skbs(bp);
7661         for_each_queue(bp, i)
7662                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7663 load_error2:
7664         /* Release IRQs */
7665         bnx2x_free_irq(bp);
7666 load_error1:
7667         bnx2x_napi_disable(bp);
7668         for_each_queue(bp, i)
7669                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7670         bnx2x_free_mem(bp);
7671
7672         return rc;
7673 }
7674
7675 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7676 {
7677         struct bnx2x_fastpath *fp = &bp->fp[index];
7678         int rc;
7679
7680         /* halt the connection */
7681         fp->state = BNX2X_FP_STATE_HALTING;
7682         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7683
7684         /* Wait for completion */
7685         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7686                                &(fp->state), 1);
7687         if (rc) /* timeout */
7688                 return rc;
7689
7690         /* delete cfc entry */
7691         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7692
7693         /* Wait for completion */
7694         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7695                                &(fp->state), 1);
7696         return rc;
7697 }
7698
7699 static int bnx2x_stop_leading(struct bnx2x *bp)
7700 {
7701         __le16 dsb_sp_prod_idx;
7702         /* if the other port is handling traffic,
7703            this can take a lot of time */
7704         int cnt = 500;
7705         int rc;
7706
7707         might_sleep();
7708
7709         /* Send HALT ramrod */
7710         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7711         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7712
7713         /* Wait for completion */
7714         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7715                                &(bp->fp[0].state), 1);
7716         if (rc) /* timeout */
7717                 return rc;
7718
7719         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7720
7721         /* Send PORT_DELETE ramrod */
7722         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7723
7724         /* Wait for completion to arrive on default status block
7725            we are going to reset the chip anyway
7726            so there is not much to do if this times out
7727          */
7728         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7729                 if (!cnt) {
7730                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7731                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7732                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7733 #ifdef BNX2X_STOP_ON_ERROR
7734                         bnx2x_panic();
7735 #endif
7736                         rc = -EBUSY;
7737                         break;
7738                 }
7739                 cnt--;
7740                 msleep(1);
7741                 rmb(); /* Refresh the dsb_sp_prod */
7742         }
7743         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7744         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7745
7746         return rc;
7747 }
7748
7749 static void bnx2x_reset_func(struct bnx2x *bp)
7750 {
7751         int port = BP_PORT(bp);
7752         int func = BP_FUNC(bp);
7753         int base, i;
7754
7755         /* Configure IGU */
7756         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7757         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7758
7759 #ifdef BCM_CNIC
7760         /* Disable Timer scan */
7761         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7762         /*
7763          * Wait for at least 10ms and up to 2 second for the timers scan to
7764          * complete
7765          */
7766         for (i = 0; i < 200; i++) {
7767                 msleep(10);
7768                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7769                         break;
7770         }
7771 #endif
7772         /* Clear ILT */
7773         base = FUNC_ILT_BASE(func);
7774         for (i = base; i < base + ILT_PER_FUNC; i++)
7775                 bnx2x_ilt_wr(bp, i, 0);
7776 }
7777
7778 static void bnx2x_reset_port(struct bnx2x *bp)
7779 {
7780         int port = BP_PORT(bp);
7781         u32 val;
7782
7783         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7784
7785         /* Do not rcv packets to BRB */
7786         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7787         /* Do not direct rcv packets that are not for MCP to the BRB */
7788         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7789                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7790
7791         /* Configure AEU */
7792         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7793
7794         msleep(100);
7795         /* Check for BRB port occupancy */
7796         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7797         if (val)
7798                 DP(NETIF_MSG_IFDOWN,
7799                    "BRB1 is not empty  %d blocks are occupied\n", val);
7800
7801         /* TODO: Close Doorbell port? */
7802 }
7803
7804 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7805 {
7806         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7807            BP_FUNC(bp), reset_code);
7808
7809         switch (reset_code) {
7810         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7811                 bnx2x_reset_port(bp);
7812                 bnx2x_reset_func(bp);
7813                 bnx2x_reset_common(bp);
7814                 break;
7815
7816         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7817                 bnx2x_reset_port(bp);
7818                 bnx2x_reset_func(bp);
7819                 break;
7820
7821         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7822                 bnx2x_reset_func(bp);
7823                 break;
7824
7825         default:
7826                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7827                 break;
7828         }
7829 }
7830
7831 /* must be called with rtnl_lock */
7832 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7833 {
7834         int port = BP_PORT(bp);
7835         u32 reset_code = 0;
7836         int i, cnt, rc;
7837
7838 #ifdef BCM_CNIC
7839         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7840 #endif
7841         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7842
7843         /* Set "drop all" */
7844         bp->rx_mode = BNX2X_RX_MODE_NONE;
7845         bnx2x_set_storm_rx_mode(bp);
7846
7847         /* Disable HW interrupts, NAPI and Tx */
7848         bnx2x_netif_stop(bp, 1);
7849
7850         del_timer_sync(&bp->timer);
7851         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7852                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7853         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7854
7855         /* Release IRQs */
7856         bnx2x_free_irq(bp);
7857
7858         /* Wait until tx fastpath tasks complete */
7859         for_each_queue(bp, i) {
7860                 struct bnx2x_fastpath *fp = &bp->fp[i];
7861
7862                 cnt = 1000;
7863                 while (bnx2x_has_tx_work_unload(fp)) {
7864
7865                         bnx2x_tx_int(fp);
7866                         if (!cnt) {
7867                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7868                                           i);
7869 #ifdef BNX2X_STOP_ON_ERROR
7870                                 bnx2x_panic();
7871                                 return -EBUSY;
7872 #else
7873                                 break;
7874 #endif
7875                         }
7876                         cnt--;
7877                         msleep(1);
7878                 }
7879         }
7880         /* Give HW time to discard old tx messages */
7881         msleep(1);
7882
7883         if (CHIP_IS_E1(bp)) {
7884                 struct mac_configuration_cmd *config =
7885                                                 bnx2x_sp(bp, mcast_config);
7886
7887                 bnx2x_set_eth_mac_addr_e1(bp, 0);
7888
7889                 for (i = 0; i < config->hdr.length; i++)
7890                         CAM_INVALIDATE(config->config_table[i]);
7891
7892                 config->hdr.length = i;
7893                 if (CHIP_REV_IS_SLOW(bp))
7894                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7895                 else
7896                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7897                 config->hdr.client_id = bp->fp->cl_id;
7898                 config->hdr.reserved1 = 0;
7899
7900                 bp->set_mac_pending++;
7901                 smp_wmb();
7902
7903                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7904                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7905                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7906
7907         } else { /* E1H */
7908                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7909
7910                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7911
7912                 for (i = 0; i < MC_HASH_SIZE; i++)
7913                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7914
7915                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7916         }
7917 #ifdef BCM_CNIC
7918         /* Clear iSCSI L2 MAC */
7919         mutex_lock(&bp->cnic_mutex);
7920         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7921                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7922                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7923         }
7924         mutex_unlock(&bp->cnic_mutex);
7925 #endif
7926
7927         if (unload_mode == UNLOAD_NORMAL)
7928                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7929
7930         else if (bp->flags & NO_WOL_FLAG)
7931                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7932
7933         else if (bp->wol) {
7934                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7935                 u8 *mac_addr = bp->dev->dev_addr;
7936                 u32 val;
7937                 /* The mac address is written to entries 1-4 to
7938                    preserve entry 0 which is used by the PMF */
7939                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7940
7941                 val = (mac_addr[0] << 8) | mac_addr[1];
7942                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7943
7944                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7945                       (mac_addr[4] << 8) | mac_addr[5];
7946                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7947
7948                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7949
7950         } else
7951                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7952
7953         /* Close multi and leading connections
7954            Completions for ramrods are collected in a synchronous way */
7955         for_each_nondefault_queue(bp, i)
7956                 if (bnx2x_stop_multi(bp, i))
7957                         goto unload_error;
7958
7959         rc = bnx2x_stop_leading(bp);
7960         if (rc) {
7961                 BNX2X_ERR("Stop leading failed!\n");
7962 #ifdef BNX2X_STOP_ON_ERROR
7963                 return -EBUSY;
7964 #else
7965                 goto unload_error;
7966 #endif
7967         }
7968
7969 unload_error:
7970         if (!BP_NOMCP(bp))
7971                 reset_code = bnx2x_fw_command(bp, reset_code);
7972         else {
7973                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7974                    load_count[0], load_count[1], load_count[2]);
7975                 load_count[0]--;
7976                 load_count[1 + port]--;
7977                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7978                    load_count[0], load_count[1], load_count[2]);
7979                 if (load_count[0] == 0)
7980                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7981                 else if (load_count[1 + port] == 0)
7982                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7983                 else
7984                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7985         }
7986
7987         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7988             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7989                 bnx2x__link_reset(bp);
7990
7991         /* Reset the chip */
7992         bnx2x_reset_chip(bp, reset_code);
7993
7994         /* Report UNLOAD_DONE to MCP */
7995         if (!BP_NOMCP(bp))
7996                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7997
7998         bp->port.pmf = 0;
7999
8000         /* Free SKBs, SGEs, TPA pool and driver internals */
8001         bnx2x_free_skbs(bp);
8002         for_each_queue(bp, i)
8003                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8004         for_each_queue(bp, i)
8005                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8006         bnx2x_free_mem(bp);
8007
8008         bp->state = BNX2X_STATE_CLOSED;
8009
8010         netif_carrier_off(bp->dev);
8011
8012         return 0;
8013 }
8014
8015 static void bnx2x_reset_task(struct work_struct *work)
8016 {
8017         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8018
8019 #ifdef BNX2X_STOP_ON_ERROR
8020         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8021                   " so reset not done to allow debug dump,\n"
8022                   " you will need to reboot when done\n");
8023         return;
8024 #endif
8025
8026         rtnl_lock();
8027
8028         if (!netif_running(bp->dev))
8029                 goto reset_task_exit;
8030
8031         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8032         bnx2x_nic_load(bp, LOAD_NORMAL);
8033
8034 reset_task_exit:
8035         rtnl_unlock();
8036 }
8037
8038 /* end of nic load/unload */
8039
8040 /* ethtool_ops */
8041
8042 /*
8043  * Init service functions
8044  */
8045
8046 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8047 {
8048         switch (func) {
8049         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8050         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8051         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8052         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8053         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8054         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8055         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8056         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8057         default:
8058                 BNX2X_ERR("Unsupported function index: %d\n", func);
8059                 return (u32)(-1);
8060         }
8061 }
8062
8063 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8064 {
8065         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8066
8067         /* Flush all outstanding writes */
8068         mmiowb();
8069
8070         /* Pretend to be function 0 */
8071         REG_WR(bp, reg, 0);
8072         /* Flush the GRC transaction (in the chip) */
8073         new_val = REG_RD(bp, reg);
8074         if (new_val != 0) {
8075                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8076                           new_val);
8077                 BUG();
8078         }
8079
8080         /* From now we are in the "like-E1" mode */
8081         bnx2x_int_disable(bp);
8082
8083         /* Flush all outstanding writes */
8084         mmiowb();
8085
8086         /* Restore the original funtion settings */
8087         REG_WR(bp, reg, orig_func);
8088         new_val = REG_RD(bp, reg);
8089         if (new_val != orig_func) {
8090                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8091                           orig_func, new_val);
8092                 BUG();
8093         }
8094 }
8095
8096 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8097 {
8098         if (CHIP_IS_E1H(bp))
8099                 bnx2x_undi_int_disable_e1h(bp, func);
8100         else
8101                 bnx2x_int_disable(bp);
8102 }
8103
8104 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8105 {
8106         u32 val;
8107
8108         /* Check if there is any driver already loaded */
8109         val = REG_RD(bp, MISC_REG_UNPREPARED);
8110         if (val == 0x1) {
8111                 /* Check if it is the UNDI driver
8112                  * UNDI driver initializes CID offset for normal bell to 0x7
8113                  */
8114                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8115                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8116                 if (val == 0x7) {
8117                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8118                         /* save our func */
8119                         int func = BP_FUNC(bp);
8120                         u32 swap_en;
8121                         u32 swap_val;
8122
8123                         /* clear the UNDI indication */
8124                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8125
8126                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
8127
8128                         /* try unload UNDI on port 0 */
8129                         bp->func = 0;
8130                         bp->fw_seq =
8131                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8132                                 DRV_MSG_SEQ_NUMBER_MASK);
8133                         reset_code = bnx2x_fw_command(bp, reset_code);
8134
8135                         /* if UNDI is loaded on the other port */
8136                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8137
8138                                 /* send "DONE" for previous unload */
8139                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8140
8141                                 /* unload UNDI on port 1 */
8142                                 bp->func = 1;
8143                                 bp->fw_seq =
8144                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8145                                         DRV_MSG_SEQ_NUMBER_MASK);
8146                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8147
8148                                 bnx2x_fw_command(bp, reset_code);
8149                         }
8150
8151                         /* now it's safe to release the lock */
8152                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8153
8154                         bnx2x_undi_int_disable(bp, func);
8155
8156                         /* close input traffic and wait for it */
8157                         /* Do not rcv packets to BRB */
8158                         REG_WR(bp,
8159                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8160                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8161                         /* Do not direct rcv packets that are not for MCP to
8162                          * the BRB */
8163                         REG_WR(bp,
8164                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8165                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8166                         /* clear AEU */
8167                         REG_WR(bp,
8168                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8169                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8170                         msleep(10);
8171
8172                         /* save NIG port swap info */
8173                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8174                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8175                         /* reset device */
8176                         REG_WR(bp,
8177                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8178                                0xd3ffffff);
8179                         REG_WR(bp,
8180                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8181                                0x1403);
8182                         /* take the NIG out of reset and restore swap values */
8183                         REG_WR(bp,
8184                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8185                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
8186                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8187                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8188
8189                         /* send unload done to the MCP */
8190                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8191
8192                         /* restore our func and fw_seq */
8193                         bp->func = func;
8194                         bp->fw_seq =
8195                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8196                                 DRV_MSG_SEQ_NUMBER_MASK);
8197
8198                 } else
8199                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8200         }
8201 }
8202
8203 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8204 {
8205         u32 val, val2, val3, val4, id;
8206         u16 pmc;
8207
8208         /* Get the chip revision id and number. */
8209         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8210         val = REG_RD(bp, MISC_REG_CHIP_NUM);
8211         id = ((val & 0xffff) << 16);
8212         val = REG_RD(bp, MISC_REG_CHIP_REV);
8213         id |= ((val & 0xf) << 12);
8214         val = REG_RD(bp, MISC_REG_CHIP_METAL);
8215         id |= ((val & 0xff) << 4);
8216         val = REG_RD(bp, MISC_REG_BOND_ID);
8217         id |= (val & 0xf);
8218         bp->common.chip_id = id;
8219         bp->link_params.chip_id = bp->common.chip_id;
8220         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8221
8222         val = (REG_RD(bp, 0x2874) & 0x55);
8223         if ((bp->common.chip_id & 0x1) ||
8224             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8225                 bp->flags |= ONE_PORT_FLAG;
8226                 BNX2X_DEV_INFO("single port device\n");
8227         }
8228
8229         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8230         bp->common.flash_size = (NVRAM_1MB_SIZE <<
8231                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
8232         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8233                        bp->common.flash_size, bp->common.flash_size);
8234
8235         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8236         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8237         bp->link_params.shmem_base = bp->common.shmem_base;
8238         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
8239                        bp->common.shmem_base, bp->common.shmem2_base);
8240
8241         if (!bp->common.shmem_base ||
8242             (bp->common.shmem_base < 0xA0000) ||
8243             (bp->common.shmem_base >= 0xC0000)) {
8244                 BNX2X_DEV_INFO("MCP not active\n");
8245                 bp->flags |= NO_MCP_FLAG;
8246                 return;
8247         }
8248
8249         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8250         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8251                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8252                 BNX2X_ERR("BAD MCP validity signature\n");
8253
8254         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8255         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8256
8257         bp->link_params.hw_led_mode = ((bp->common.hw_config &
8258                                         SHARED_HW_CFG_LED_MODE_MASK) >>
8259                                        SHARED_HW_CFG_LED_MODE_SHIFT);
8260
8261         bp->link_params.feature_config_flags = 0;
8262         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8263         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8264                 bp->link_params.feature_config_flags |=
8265                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8266         else
8267                 bp->link_params.feature_config_flags &=
8268                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8269
8270         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8271         bp->common.bc_ver = val;
8272         BNX2X_DEV_INFO("bc_ver %X\n", val);
8273         if (val < BNX2X_BC_VER) {
8274                 /* for now only warn
8275                  * later we might need to enforce this */
8276                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8277                           " please upgrade BC\n", BNX2X_BC_VER, val);
8278         }
8279         bp->link_params.feature_config_flags |=
8280                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8281                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8282
8283         if (BP_E1HVN(bp) == 0) {
8284                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8285                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8286         } else {
8287                 /* no WOL capability for E1HVN != 0 */
8288                 bp->flags |= NO_WOL_FLAG;
8289         }
8290         BNX2X_DEV_INFO("%sWoL capable\n",
8291                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
8292
8293         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8294         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8295         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8296         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8297
8298         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8299                val, val2, val3, val4);
8300 }
8301
8302 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8303                                                     u32 switch_cfg)
8304 {
8305         int port = BP_PORT(bp);
8306         u32 ext_phy_type;
8307
8308         switch (switch_cfg) {
8309         case SWITCH_CFG_1G:
8310                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8311
8312                 ext_phy_type =
8313                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8314                 switch (ext_phy_type) {
8315                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8316                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8317                                        ext_phy_type);
8318
8319                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8320                                                SUPPORTED_10baseT_Full |
8321                                                SUPPORTED_100baseT_Half |
8322                                                SUPPORTED_100baseT_Full |
8323                                                SUPPORTED_1000baseT_Full |
8324                                                SUPPORTED_2500baseX_Full |
8325                                                SUPPORTED_TP |
8326                                                SUPPORTED_FIBRE |
8327                                                SUPPORTED_Autoneg |
8328                                                SUPPORTED_Pause |
8329                                                SUPPORTED_Asym_Pause);
8330                         break;
8331
8332                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8333                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8334                                        ext_phy_type);
8335
8336                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8337                                                SUPPORTED_10baseT_Full |
8338                                                SUPPORTED_100baseT_Half |
8339                                                SUPPORTED_100baseT_Full |
8340                                                SUPPORTED_1000baseT_Full |
8341                                                SUPPORTED_TP |
8342                                                SUPPORTED_FIBRE |
8343                                                SUPPORTED_Autoneg |
8344                                                SUPPORTED_Pause |
8345                                                SUPPORTED_Asym_Pause);
8346                         break;
8347
8348                 default:
8349                         BNX2X_ERR("NVRAM config error. "
8350                                   "BAD SerDes ext_phy_config 0x%x\n",
8351                                   bp->link_params.ext_phy_config);
8352                         return;
8353                 }
8354
8355                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8356                                            port*0x10);
8357                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8358                 break;
8359
8360         case SWITCH_CFG_10G:
8361                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8362
8363                 ext_phy_type =
8364                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8365                 switch (ext_phy_type) {
8366                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8367                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8368                                        ext_phy_type);
8369
8370                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8371                                                SUPPORTED_10baseT_Full |
8372                                                SUPPORTED_100baseT_Half |
8373                                                SUPPORTED_100baseT_Full |
8374                                                SUPPORTED_1000baseT_Full |
8375                                                SUPPORTED_2500baseX_Full |
8376                                                SUPPORTED_10000baseT_Full |
8377                                                SUPPORTED_TP |
8378                                                SUPPORTED_FIBRE |
8379                                                SUPPORTED_Autoneg |
8380                                                SUPPORTED_Pause |
8381                                                SUPPORTED_Asym_Pause);
8382                         break;
8383
8384                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8385                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8386                                        ext_phy_type);
8387
8388                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8389                                                SUPPORTED_1000baseT_Full |
8390                                                SUPPORTED_FIBRE |
8391                                                SUPPORTED_Autoneg |
8392                                                SUPPORTED_Pause |
8393                                                SUPPORTED_Asym_Pause);
8394                         break;
8395
8396                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8397                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8398                                        ext_phy_type);
8399
8400                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8401                                                SUPPORTED_2500baseX_Full |
8402                                                SUPPORTED_1000baseT_Full |
8403                                                SUPPORTED_FIBRE |
8404                                                SUPPORTED_Autoneg |
8405                                                SUPPORTED_Pause |
8406                                                SUPPORTED_Asym_Pause);
8407                         break;
8408
8409                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8410                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8411                                        ext_phy_type);
8412
8413                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8414                                                SUPPORTED_FIBRE |
8415                                                SUPPORTED_Pause |
8416                                                SUPPORTED_Asym_Pause);
8417                         break;
8418
8419                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8420                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8421                                        ext_phy_type);
8422
8423                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8424                                                SUPPORTED_1000baseT_Full |
8425                                                SUPPORTED_FIBRE |
8426                                                SUPPORTED_Pause |
8427                                                SUPPORTED_Asym_Pause);
8428                         break;
8429
8430                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8431                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8432                                        ext_phy_type);
8433
8434                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8435                                                SUPPORTED_1000baseT_Full |
8436                                                SUPPORTED_Autoneg |
8437                                                SUPPORTED_FIBRE |
8438                                                SUPPORTED_Pause |
8439                                                SUPPORTED_Asym_Pause);
8440                         break;
8441
8442                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8443                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8444                                        ext_phy_type);
8445
8446                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8447                                                SUPPORTED_1000baseT_Full |
8448                                                SUPPORTED_Autoneg |
8449                                                SUPPORTED_FIBRE |
8450                                                SUPPORTED_Pause |
8451                                                SUPPORTED_Asym_Pause);
8452                         break;
8453
8454                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8455                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8456                                        ext_phy_type);
8457
8458                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8459                                                SUPPORTED_TP |
8460                                                SUPPORTED_Autoneg |
8461                                                SUPPORTED_Pause |
8462                                                SUPPORTED_Asym_Pause);
8463                         break;
8464
8465                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8466                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8467                                        ext_phy_type);
8468
8469                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8470                                                SUPPORTED_10baseT_Full |
8471                                                SUPPORTED_100baseT_Half |
8472                                                SUPPORTED_100baseT_Full |
8473                                                SUPPORTED_1000baseT_Full |
8474                                                SUPPORTED_10000baseT_Full |
8475                                                SUPPORTED_TP |
8476                                                SUPPORTED_Autoneg |
8477                                                SUPPORTED_Pause |
8478                                                SUPPORTED_Asym_Pause);
8479                         break;
8480
8481                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8482                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8483                                   bp->link_params.ext_phy_config);
8484                         break;
8485
8486                 default:
8487                         BNX2X_ERR("NVRAM config error. "
8488                                   "BAD XGXS ext_phy_config 0x%x\n",
8489                                   bp->link_params.ext_phy_config);
8490                         return;
8491                 }
8492
8493                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8494                                            port*0x18);
8495                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8496
8497                 break;
8498
8499         default:
8500                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8501                           bp->port.link_config);
8502                 return;
8503         }
8504         bp->link_params.phy_addr = bp->port.phy_addr;
8505
8506         /* mask what we support according to speed_cap_mask */
8507         if (!(bp->link_params.speed_cap_mask &
8508                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8509                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8510
8511         if (!(bp->link_params.speed_cap_mask &
8512                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8513                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8514
8515         if (!(bp->link_params.speed_cap_mask &
8516                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8517                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8518
8519         if (!(bp->link_params.speed_cap_mask &
8520                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8521                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8522
8523         if (!(bp->link_params.speed_cap_mask &
8524                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8525                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8526                                         SUPPORTED_1000baseT_Full);
8527
8528         if (!(bp->link_params.speed_cap_mask &
8529                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8530                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8531
8532         if (!(bp->link_params.speed_cap_mask &
8533                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8534                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8535
8536         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8537 }
8538
8539 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8540 {
8541         bp->link_params.req_duplex = DUPLEX_FULL;
8542
8543         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8544         case PORT_FEATURE_LINK_SPEED_AUTO:
8545                 if (bp->port.supported & SUPPORTED_Autoneg) {
8546                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8547                         bp->port.advertising = bp->port.supported;
8548                 } else {
8549                         u32 ext_phy_type =
8550                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8551
8552                         if ((ext_phy_type ==
8553                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8554                             (ext_phy_type ==
8555                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8556                                 /* force 10G, no AN */
8557                                 bp->link_params.req_line_speed = SPEED_10000;
8558                                 bp->port.advertising =
8559                                                 (ADVERTISED_10000baseT_Full |
8560                                                  ADVERTISED_FIBRE);
8561                                 break;
8562                         }
8563                         BNX2X_ERR("NVRAM config error. "
8564                                   "Invalid link_config 0x%x"
8565                                   "  Autoneg not supported\n",
8566                                   bp->port.link_config);
8567                         return;
8568                 }
8569                 break;
8570
8571         case PORT_FEATURE_LINK_SPEED_10M_FULL:
8572                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8573                         bp->link_params.req_line_speed = SPEED_10;
8574                         bp->port.advertising = (ADVERTISED_10baseT_Full |
8575                                                 ADVERTISED_TP);
8576                 } else {
8577                         BNX2X_ERR("NVRAM config error. "
8578                                   "Invalid link_config 0x%x"
8579                                   "  speed_cap_mask 0x%x\n",
8580                                   bp->port.link_config,
8581                                   bp->link_params.speed_cap_mask);
8582                         return;
8583                 }
8584                 break;
8585
8586         case PORT_FEATURE_LINK_SPEED_10M_HALF:
8587                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8588                         bp->link_params.req_line_speed = SPEED_10;
8589                         bp->link_params.req_duplex = DUPLEX_HALF;
8590                         bp->port.advertising = (ADVERTISED_10baseT_Half |
8591                                                 ADVERTISED_TP);
8592                 } else {
8593                         BNX2X_ERR("NVRAM config error. "
8594                                   "Invalid link_config 0x%x"
8595                                   "  speed_cap_mask 0x%x\n",
8596                                   bp->port.link_config,
8597                                   bp->link_params.speed_cap_mask);
8598                         return;
8599                 }
8600                 break;
8601
8602         case PORT_FEATURE_LINK_SPEED_100M_FULL:
8603                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8604                         bp->link_params.req_line_speed = SPEED_100;
8605                         bp->port.advertising = (ADVERTISED_100baseT_Full |
8606                                                 ADVERTISED_TP);
8607                 } else {
8608                         BNX2X_ERR("NVRAM config error. "
8609                                   "Invalid link_config 0x%x"
8610                                   "  speed_cap_mask 0x%x\n",
8611                                   bp->port.link_config,
8612                                   bp->link_params.speed_cap_mask);
8613                         return;
8614                 }
8615                 break;
8616
8617         case PORT_FEATURE_LINK_SPEED_100M_HALF:
8618                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8619                         bp->link_params.req_line_speed = SPEED_100;
8620                         bp->link_params.req_duplex = DUPLEX_HALF;
8621                         bp->port.advertising = (ADVERTISED_100baseT_Half |
8622                                                 ADVERTISED_TP);
8623                 } else {
8624                         BNX2X_ERR("NVRAM config error. "
8625                                   "Invalid link_config 0x%x"
8626                                   "  speed_cap_mask 0x%x\n",
8627                                   bp->port.link_config,
8628                                   bp->link_params.speed_cap_mask);
8629                         return;
8630                 }
8631                 break;
8632
8633         case PORT_FEATURE_LINK_SPEED_1G:
8634                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8635                         bp->link_params.req_line_speed = SPEED_1000;
8636                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
8637                                                 ADVERTISED_TP);
8638                 } else {
8639                         BNX2X_ERR("NVRAM config error. "
8640                                   "Invalid link_config 0x%x"
8641                                   "  speed_cap_mask 0x%x\n",
8642                                   bp->port.link_config,
8643                                   bp->link_params.speed_cap_mask);
8644                         return;
8645                 }
8646                 break;
8647
8648         case PORT_FEATURE_LINK_SPEED_2_5G:
8649                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8650                         bp->link_params.req_line_speed = SPEED_2500;
8651                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
8652                                                 ADVERTISED_TP);
8653                 } else {
8654                         BNX2X_ERR("NVRAM config error. "
8655                                   "Invalid link_config 0x%x"
8656                                   "  speed_cap_mask 0x%x\n",
8657                                   bp->port.link_config,
8658                                   bp->link_params.speed_cap_mask);
8659                         return;
8660                 }
8661                 break;
8662
8663         case PORT_FEATURE_LINK_SPEED_10G_CX4:
8664         case PORT_FEATURE_LINK_SPEED_10G_KX4:
8665         case PORT_FEATURE_LINK_SPEED_10G_KR:
8666                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8667                         bp->link_params.req_line_speed = SPEED_10000;
8668                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
8669                                                 ADVERTISED_FIBRE);
8670                 } else {
8671                         BNX2X_ERR("NVRAM config error. "
8672                                   "Invalid link_config 0x%x"
8673                                   "  speed_cap_mask 0x%x\n",
8674                                   bp->port.link_config,
8675                                   bp->link_params.speed_cap_mask);
8676                         return;
8677                 }
8678                 break;
8679
8680         default:
8681                 BNX2X_ERR("NVRAM config error. "
8682                           "BAD link speed link_config 0x%x\n",
8683                           bp->port.link_config);
8684                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8685                 bp->port.advertising = bp->port.supported;
8686                 break;
8687         }
8688
8689         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8690                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8691         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8692             !(bp->port.supported & SUPPORTED_Autoneg))
8693                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8694
8695         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8696                        "  advertising 0x%x\n",
8697                        bp->link_params.req_line_speed,
8698                        bp->link_params.req_duplex,
8699                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8700 }
8701
8702 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8703 {
8704         mac_hi = cpu_to_be16(mac_hi);
8705         mac_lo = cpu_to_be32(mac_lo);
8706         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8707         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8708 }
8709
8710 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8711 {
8712         int port = BP_PORT(bp);
8713         u32 val, val2;
8714         u32 config;
8715         u16 i;
8716         u32 ext_phy_type;
8717
8718         bp->link_params.bp = bp;
8719         bp->link_params.port = port;
8720
8721         bp->link_params.lane_config =
8722                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8723         bp->link_params.ext_phy_config =
8724                 SHMEM_RD(bp,
8725                          dev_info.port_hw_config[port].external_phy_config);
8726         /* BCM8727_NOC => BCM8727 no over current */
8727         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8728             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8729                 bp->link_params.ext_phy_config &=
8730                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8731                 bp->link_params.ext_phy_config |=
8732                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8733                 bp->link_params.feature_config_flags |=
8734                         FEATURE_CONFIG_BCM8727_NOC;
8735         }
8736
8737         bp->link_params.speed_cap_mask =
8738                 SHMEM_RD(bp,
8739                          dev_info.port_hw_config[port].speed_capability_mask);
8740
8741         bp->port.link_config =
8742                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8743
8744         /* Get the 4 lanes xgxs config rx and tx */
8745         for (i = 0; i < 2; i++) {
8746                 val = SHMEM_RD(bp,
8747                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8748                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8749                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8750
8751                 val = SHMEM_RD(bp,
8752                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8753                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8754                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8755         }
8756
8757         /* If the device is capable of WoL, set the default state according
8758          * to the HW
8759          */
8760         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8761         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8762                    (config & PORT_FEATURE_WOL_ENABLED));
8763
8764         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8765                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8766                        bp->link_params.lane_config,
8767                        bp->link_params.ext_phy_config,
8768                        bp->link_params.speed_cap_mask, bp->port.link_config);
8769
8770         bp->link_params.switch_cfg |= (bp->port.link_config &
8771                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
8772         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8773
8774         bnx2x_link_settings_requested(bp);
8775
8776         /*
8777          * If connected directly, work with the internal PHY, otherwise, work
8778          * with the external PHY
8779          */
8780         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8781         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8782                 bp->mdio.prtad = bp->link_params.phy_addr;
8783
8784         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8785                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8786                 bp->mdio.prtad =
8787                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8788
8789         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8790         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8791         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8792         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8793         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8794
8795 #ifdef BCM_CNIC
8796         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8797         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8798         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8799 #endif
8800 }
8801
8802 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8803 {
8804         int func = BP_FUNC(bp);
8805         u32 val, val2;
8806         int rc = 0;
8807
8808         bnx2x_get_common_hwinfo(bp);
8809
8810         bp->e1hov = 0;
8811         bp->e1hmf = 0;
8812         if (CHIP_IS_E1H(bp)) {
8813                 bp->mf_config =
8814                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8815
8816                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8817                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8818                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8819                         bp->e1hmf = 1;
8820                 BNX2X_DEV_INFO("%s function mode\n",
8821                                IS_E1HMF(bp) ? "multi" : "single");
8822
8823                 if (IS_E1HMF(bp)) {
8824                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8825                                                                 e1hov_tag) &
8826                                FUNC_MF_CFG_E1HOV_TAG_MASK);
8827                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8828                                 bp->e1hov = val;
8829                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8830                                                "(0x%04x)\n",
8831                                                func, bp->e1hov, bp->e1hov);
8832                         } else {
8833                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8834                                           "  aborting\n", func);
8835                                 rc = -EPERM;
8836                         }
8837                 } else {
8838                         if (BP_E1HVN(bp)) {
8839                                 BNX2X_ERR("!!!  VN %d in single function mode,"
8840                                           "  aborting\n", BP_E1HVN(bp));
8841                                 rc = -EPERM;
8842                         }
8843                 }
8844         }
8845
8846         if (!BP_NOMCP(bp)) {
8847                 bnx2x_get_port_hwinfo(bp);
8848
8849                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8850                               DRV_MSG_SEQ_NUMBER_MASK);
8851                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8852         }
8853
8854         if (IS_E1HMF(bp)) {
8855                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8856                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8857                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8858                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8859                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8860                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8861                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8862                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8863                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8864                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8865                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8866                                ETH_ALEN);
8867                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8868                                ETH_ALEN);
8869                 }
8870
8871                 return rc;
8872         }
8873
8874         if (BP_NOMCP(bp)) {
8875                 /* only supposed to happen on emulation/FPGA */
8876                 BNX2X_ERR("warning random MAC workaround active\n");
8877                 random_ether_addr(bp->dev->dev_addr);
8878                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8879         }
8880
8881         return rc;
8882 }
8883
8884 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8885 {
8886         int func = BP_FUNC(bp);
8887         int timer_interval;
8888         int rc;
8889
8890         /* Disable interrupt handling until HW is initialized */
8891         atomic_set(&bp->intr_sem, 1);
8892         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8893
8894         mutex_init(&bp->port.phy_mutex);
8895         mutex_init(&bp->fw_mb_mutex);
8896 #ifdef BCM_CNIC
8897         mutex_init(&bp->cnic_mutex);
8898 #endif
8899
8900         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8901         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8902
8903         rc = bnx2x_get_hwinfo(bp);
8904
8905         /* need to reset chip if undi was active */
8906         if (!BP_NOMCP(bp))
8907                 bnx2x_undi_unload(bp);
8908
8909         if (CHIP_REV_IS_FPGA(bp))
8910                 printk(KERN_ERR PFX "FPGA detected\n");
8911
8912         if (BP_NOMCP(bp) && (func == 0))
8913                 printk(KERN_ERR PFX
8914                        "MCP disabled, must load devices in order!\n");
8915
8916         /* Set multi queue mode */
8917         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8918             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8919                 printk(KERN_ERR PFX
8920                       "Multi disabled since int_mode requested is not MSI-X\n");
8921                 multi_mode = ETH_RSS_MODE_DISABLED;
8922         }
8923         bp->multi_mode = multi_mode;
8924
8925
8926         /* Set TPA flags */
8927         if (disable_tpa) {
8928                 bp->flags &= ~TPA_ENABLE_FLAG;
8929                 bp->dev->features &= ~NETIF_F_LRO;
8930         } else {
8931                 bp->flags |= TPA_ENABLE_FLAG;
8932                 bp->dev->features |= NETIF_F_LRO;
8933         }
8934
8935         if (CHIP_IS_E1(bp))
8936                 bp->dropless_fc = 0;
8937         else
8938                 bp->dropless_fc = dropless_fc;
8939
8940         bp->mrrs = mrrs;
8941
8942         bp->tx_ring_size = MAX_TX_AVAIL;
8943         bp->rx_ring_size = MAX_RX_AVAIL;
8944
8945         bp->rx_csum = 1;
8946
8947         /* make sure that the numbers are in the right granularity */
8948         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8949         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8950
8951         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8952         bp->current_interval = (poll ? poll : timer_interval);
8953
8954         init_timer(&bp->timer);
8955         bp->timer.expires = jiffies + bp->current_interval;
8956         bp->timer.data = (unsigned long) bp;
8957         bp->timer.function = bnx2x_timer;
8958
8959         return rc;
8960 }
8961
8962 /*
8963  * ethtool service functions
8964  */
8965
8966 /* All ethtool functions called with rtnl_lock */
8967
8968 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8969 {
8970         struct bnx2x *bp = netdev_priv(dev);
8971
8972         cmd->supported = bp->port.supported;
8973         cmd->advertising = bp->port.advertising;
8974
8975         if ((bp->state == BNX2X_STATE_OPEN) &&
8976             !(bp->flags & MF_FUNC_DIS) &&
8977             (bp->link_vars.link_up)) {
8978                 cmd->speed = bp->link_vars.line_speed;
8979                 cmd->duplex = bp->link_vars.duplex;
8980                 if (IS_E1HMF(bp)) {
8981                         u16 vn_max_rate;
8982
8983                         vn_max_rate =
8984                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8985                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8986                         if (vn_max_rate < cmd->speed)
8987                                 cmd->speed = vn_max_rate;
8988                 }
8989         } else {
8990                 cmd->speed = -1;
8991                 cmd->duplex = -1;
8992         }
8993
8994         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8995                 u32 ext_phy_type =
8996                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8997
8998                 switch (ext_phy_type) {
8999                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9000                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9001                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9002                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9003                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9004                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9005                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9006                         cmd->port = PORT_FIBRE;
9007                         break;
9008
9009                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9010                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9011                         cmd->port = PORT_TP;
9012                         break;
9013
9014                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9015                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9016                                   bp->link_params.ext_phy_config);
9017                         break;
9018
9019                 default:
9020                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
9021                            bp->link_params.ext_phy_config);
9022                         break;
9023                 }
9024         } else
9025                 cmd->port = PORT_TP;
9026
9027         cmd->phy_address = bp->mdio.prtad;
9028         cmd->transceiver = XCVR_INTERNAL;
9029
9030         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9031                 cmd->autoneg = AUTONEG_ENABLE;
9032         else
9033                 cmd->autoneg = AUTONEG_DISABLE;
9034
9035         cmd->maxtxpkt = 0;
9036         cmd->maxrxpkt = 0;
9037
9038         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9039            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9040            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9041            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9042            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9043            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9044            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9045
9046         return 0;
9047 }
9048
9049 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9050 {
9051         struct bnx2x *bp = netdev_priv(dev);
9052         u32 advertising;
9053
9054         if (IS_E1HMF(bp))
9055                 return 0;
9056
9057         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9058            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9059            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9060            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9061            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9062            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9063            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9064
9065         if (cmd->autoneg == AUTONEG_ENABLE) {
9066                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9067                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
9068                         return -EINVAL;
9069                 }
9070
9071                 /* advertise the requested speed and duplex if supported */
9072                 cmd->advertising &= bp->port.supported;
9073
9074                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9075                 bp->link_params.req_duplex = DUPLEX_FULL;
9076                 bp->port.advertising |= (ADVERTISED_Autoneg |
9077                                          cmd->advertising);
9078
9079         } else { /* forced speed */
9080                 /* advertise the requested speed and duplex if supported */
9081                 switch (cmd->speed) {
9082                 case SPEED_10:
9083                         if (cmd->duplex == DUPLEX_FULL) {
9084                                 if (!(bp->port.supported &
9085                                       SUPPORTED_10baseT_Full)) {
9086                                         DP(NETIF_MSG_LINK,
9087                                            "10M full not supported\n");
9088                                         return -EINVAL;
9089                                 }
9090
9091                                 advertising = (ADVERTISED_10baseT_Full |
9092                                                ADVERTISED_TP);
9093                         } else {
9094                                 if (!(bp->port.supported &
9095                                       SUPPORTED_10baseT_Half)) {
9096                                         DP(NETIF_MSG_LINK,
9097                                            "10M half not supported\n");
9098                                         return -EINVAL;
9099                                 }
9100
9101                                 advertising = (ADVERTISED_10baseT_Half |
9102                                                ADVERTISED_TP);
9103                         }
9104                         break;
9105
9106                 case SPEED_100:
9107                         if (cmd->duplex == DUPLEX_FULL) {
9108                                 if (!(bp->port.supported &
9109                                                 SUPPORTED_100baseT_Full)) {
9110                                         DP(NETIF_MSG_LINK,
9111                                            "100M full not supported\n");
9112                                         return -EINVAL;
9113                                 }
9114
9115                                 advertising = (ADVERTISED_100baseT_Full |
9116                                                ADVERTISED_TP);
9117                         } else {
9118                                 if (!(bp->port.supported &
9119                                                 SUPPORTED_100baseT_Half)) {
9120                                         DP(NETIF_MSG_LINK,
9121                                            "100M half not supported\n");
9122                                         return -EINVAL;
9123                                 }
9124
9125                                 advertising = (ADVERTISED_100baseT_Half |
9126                                                ADVERTISED_TP);
9127                         }
9128                         break;
9129
9130                 case SPEED_1000:
9131                         if (cmd->duplex != DUPLEX_FULL) {
9132                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
9133                                 return -EINVAL;
9134                         }
9135
9136                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
9137                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
9138                                 return -EINVAL;
9139                         }
9140
9141                         advertising = (ADVERTISED_1000baseT_Full |
9142                                        ADVERTISED_TP);
9143                         break;
9144
9145                 case SPEED_2500:
9146                         if (cmd->duplex != DUPLEX_FULL) {
9147                                 DP(NETIF_MSG_LINK,
9148                                    "2.5G half not supported\n");
9149                                 return -EINVAL;
9150                         }
9151
9152                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
9153                                 DP(NETIF_MSG_LINK,
9154                                    "2.5G full not supported\n");
9155                                 return -EINVAL;
9156                         }
9157
9158                         advertising = (ADVERTISED_2500baseX_Full |
9159                                        ADVERTISED_TP);
9160                         break;
9161
9162                 case SPEED_10000:
9163                         if (cmd->duplex != DUPLEX_FULL) {
9164                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
9165                                 return -EINVAL;
9166                         }
9167
9168                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
9169                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
9170                                 return -EINVAL;
9171                         }
9172
9173                         advertising = (ADVERTISED_10000baseT_Full |
9174                                        ADVERTISED_FIBRE);
9175                         break;
9176
9177                 default:
9178                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
9179                         return -EINVAL;
9180                 }
9181
9182                 bp->link_params.req_line_speed = cmd->speed;
9183                 bp->link_params.req_duplex = cmd->duplex;
9184                 bp->port.advertising = advertising;
9185         }
9186
9187         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
9188            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
9189            bp->link_params.req_line_speed, bp->link_params.req_duplex,
9190            bp->port.advertising);
9191
9192         if (netif_running(dev)) {
9193                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9194                 bnx2x_link_set(bp);
9195         }
9196
9197         return 0;
9198 }
9199
9200 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9201 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9202
9203 static int bnx2x_get_regs_len(struct net_device *dev)
9204 {
9205         struct bnx2x *bp = netdev_priv(dev);
9206         int regdump_len = 0;
9207         int i;
9208
9209         if (CHIP_IS_E1(bp)) {
9210                 for (i = 0; i < REGS_COUNT; i++)
9211                         if (IS_E1_ONLINE(reg_addrs[i].info))
9212                                 regdump_len += reg_addrs[i].size;
9213
9214                 for (i = 0; i < WREGS_COUNT_E1; i++)
9215                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9216                                 regdump_len += wreg_addrs_e1[i].size *
9217                                         (1 + wreg_addrs_e1[i].read_regs_count);
9218
9219         } else { /* E1H */
9220                 for (i = 0; i < REGS_COUNT; i++)
9221                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9222                                 regdump_len += reg_addrs[i].size;
9223
9224                 for (i = 0; i < WREGS_COUNT_E1H; i++)
9225                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9226                                 regdump_len += wreg_addrs_e1h[i].size *
9227                                         (1 + wreg_addrs_e1h[i].read_regs_count);
9228         }
9229         regdump_len *= 4;
9230         regdump_len += sizeof(struct dump_hdr);
9231
9232         return regdump_len;
9233 }
9234
9235 static void bnx2x_get_regs(struct net_device *dev,
9236                            struct ethtool_regs *regs, void *_p)
9237 {
9238         u32 *p = _p, i, j;
9239         struct bnx2x *bp = netdev_priv(dev);
9240         struct dump_hdr dump_hdr = {0};
9241
9242         regs->version = 0;
9243         memset(p, 0, regs->len);
9244
9245         if (!netif_running(bp->dev))
9246                 return;
9247
9248         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9249         dump_hdr.dump_sign = dump_sign_all;
9250         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9251         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9252         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9253         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9254         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9255
9256         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9257         p += dump_hdr.hdr_size + 1;
9258
9259         if (CHIP_IS_E1(bp)) {
9260                 for (i = 0; i < REGS_COUNT; i++)
9261                         if (IS_E1_ONLINE(reg_addrs[i].info))
9262                                 for (j = 0; j < reg_addrs[i].size; j++)
9263                                         *p++ = REG_RD(bp,
9264                                                       reg_addrs[i].addr + j*4);
9265
9266         } else { /* E1H */
9267                 for (i = 0; i < REGS_COUNT; i++)
9268                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9269                                 for (j = 0; j < reg_addrs[i].size; j++)
9270                                         *p++ = REG_RD(bp,
9271                                                       reg_addrs[i].addr + j*4);
9272         }
9273 }
9274
9275 #define PHY_FW_VER_LEN                  10
9276
9277 static void bnx2x_get_drvinfo(struct net_device *dev,
9278                               struct ethtool_drvinfo *info)
9279 {
9280         struct bnx2x *bp = netdev_priv(dev);
9281         u8 phy_fw_ver[PHY_FW_VER_LEN];
9282
9283         strcpy(info->driver, DRV_MODULE_NAME);
9284         strcpy(info->version, DRV_MODULE_VERSION);
9285
9286         phy_fw_ver[0] = '\0';
9287         if (bp->port.pmf) {
9288                 bnx2x_acquire_phy_lock(bp);
9289                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9290                                              (bp->state != BNX2X_STATE_CLOSED),
9291                                              phy_fw_ver, PHY_FW_VER_LEN);
9292                 bnx2x_release_phy_lock(bp);
9293         }
9294
9295         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9296                  (bp->common.bc_ver & 0xff0000) >> 16,
9297                  (bp->common.bc_ver & 0xff00) >> 8,
9298                  (bp->common.bc_ver & 0xff),
9299                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9300         strcpy(info->bus_info, pci_name(bp->pdev));
9301         info->n_stats = BNX2X_NUM_STATS;
9302         info->testinfo_len = BNX2X_NUM_TESTS;
9303         info->eedump_len = bp->common.flash_size;
9304         info->regdump_len = bnx2x_get_regs_len(dev);
9305 }
9306
9307 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9308 {
9309         struct bnx2x *bp = netdev_priv(dev);
9310
9311         if (bp->flags & NO_WOL_FLAG) {
9312                 wol->supported = 0;
9313                 wol->wolopts = 0;
9314         } else {
9315                 wol->supported = WAKE_MAGIC;
9316                 if (bp->wol)
9317                         wol->wolopts = WAKE_MAGIC;
9318                 else
9319                         wol->wolopts = 0;
9320         }
9321         memset(&wol->sopass, 0, sizeof(wol->sopass));
9322 }
9323
9324 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9325 {
9326         struct bnx2x *bp = netdev_priv(dev);
9327
9328         if (wol->wolopts & ~WAKE_MAGIC)
9329                 return -EINVAL;
9330
9331         if (wol->wolopts & WAKE_MAGIC) {
9332                 if (bp->flags & NO_WOL_FLAG)
9333                         return -EINVAL;
9334
9335                 bp->wol = 1;
9336         } else
9337                 bp->wol = 0;
9338
9339         return 0;
9340 }
9341
9342 static u32 bnx2x_get_msglevel(struct net_device *dev)
9343 {
9344         struct bnx2x *bp = netdev_priv(dev);
9345
9346         return bp->msglevel;
9347 }
9348
9349 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9350 {
9351         struct bnx2x *bp = netdev_priv(dev);
9352
9353         if (capable(CAP_NET_ADMIN))
9354                 bp->msglevel = level;
9355 }
9356
9357 static int bnx2x_nway_reset(struct net_device *dev)
9358 {
9359         struct bnx2x *bp = netdev_priv(dev);
9360
9361         if (!bp->port.pmf)
9362                 return 0;
9363
9364         if (netif_running(dev)) {
9365                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9366                 bnx2x_link_set(bp);
9367         }
9368
9369         return 0;
9370 }
9371
9372 static u32 bnx2x_get_link(struct net_device *dev)
9373 {
9374         struct bnx2x *bp = netdev_priv(dev);
9375
9376         if (bp->flags & MF_FUNC_DIS)
9377                 return 0;
9378
9379         return bp->link_vars.link_up;
9380 }
9381
9382 static int bnx2x_get_eeprom_len(struct net_device *dev)
9383 {
9384         struct bnx2x *bp = netdev_priv(dev);
9385
9386         return bp->common.flash_size;
9387 }
9388
9389 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9390 {
9391         int port = BP_PORT(bp);
9392         int count, i;
9393         u32 val = 0;
9394
9395         /* adjust timeout for emulation/FPGA */
9396         count = NVRAM_TIMEOUT_COUNT;
9397         if (CHIP_REV_IS_SLOW(bp))
9398                 count *= 100;
9399
9400         /* request access to nvram interface */
9401         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9402                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9403
9404         for (i = 0; i < count*10; i++) {
9405                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9406                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9407                         break;
9408
9409                 udelay(5);
9410         }
9411
9412         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9413                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9414                 return -EBUSY;
9415         }
9416
9417         return 0;
9418 }
9419
9420 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9421 {
9422         int port = BP_PORT(bp);
9423         int count, i;
9424         u32 val = 0;
9425
9426         /* adjust timeout for emulation/FPGA */
9427         count = NVRAM_TIMEOUT_COUNT;
9428         if (CHIP_REV_IS_SLOW(bp))
9429                 count *= 100;
9430
9431         /* relinquish nvram interface */
9432         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9433                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9434
9435         for (i = 0; i < count*10; i++) {
9436                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9437                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9438                         break;
9439
9440                 udelay(5);
9441         }
9442
9443         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9444                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9445                 return -EBUSY;
9446         }
9447
9448         return 0;
9449 }
9450
9451 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9452 {
9453         u32 val;
9454
9455         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9456
9457         /* enable both bits, even on read */
9458         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9459                (val | MCPR_NVM_ACCESS_ENABLE_EN |
9460                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
9461 }
9462
9463 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9464 {
9465         u32 val;
9466
9467         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9468
9469         /* disable both bits, even after read */
9470         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9471                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9472                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9473 }
9474
9475 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9476                                   u32 cmd_flags)
9477 {
9478         int count, i, rc;
9479         u32 val;
9480
9481         /* build the command word */
9482         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9483
9484         /* need to clear DONE bit separately */
9485         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9486
9487         /* address of the NVRAM to read from */
9488         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9489                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9490
9491         /* issue a read command */
9492         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9493
9494         /* adjust timeout for emulation/FPGA */
9495         count = NVRAM_TIMEOUT_COUNT;
9496         if (CHIP_REV_IS_SLOW(bp))
9497                 count *= 100;
9498
9499         /* wait for completion */
9500         *ret_val = 0;
9501         rc = -EBUSY;
9502         for (i = 0; i < count; i++) {
9503                 udelay(5);
9504                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9505
9506                 if (val & MCPR_NVM_COMMAND_DONE) {
9507                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9508                         /* we read nvram data in cpu order
9509                          * but ethtool sees it as an array of bytes
9510                          * converting to big-endian will do the work */
9511                         *ret_val = cpu_to_be32(val);
9512                         rc = 0;
9513                         break;
9514                 }
9515         }
9516
9517         return rc;
9518 }
9519
9520 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9521                             int buf_size)
9522 {
9523         int rc;
9524         u32 cmd_flags;
9525         __be32 val;
9526
9527         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9528                 DP(BNX2X_MSG_NVM,
9529                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9530                    offset, buf_size);
9531                 return -EINVAL;
9532         }
9533
9534         if (offset + buf_size > bp->common.flash_size) {
9535                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9536                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9537                    offset, buf_size, bp->common.flash_size);
9538                 return -EINVAL;
9539         }
9540
9541         /* request access to nvram interface */
9542         rc = bnx2x_acquire_nvram_lock(bp);
9543         if (rc)
9544                 return rc;
9545
9546         /* enable access to nvram interface */
9547         bnx2x_enable_nvram_access(bp);
9548
9549         /* read the first word(s) */
9550         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9551         while ((buf_size > sizeof(u32)) && (rc == 0)) {
9552                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9553                 memcpy(ret_buf, &val, 4);
9554
9555                 /* advance to the next dword */
9556                 offset += sizeof(u32);
9557                 ret_buf += sizeof(u32);
9558                 buf_size -= sizeof(u32);
9559                 cmd_flags = 0;
9560         }
9561
9562         if (rc == 0) {
9563                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9564                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9565                 memcpy(ret_buf, &val, 4);
9566         }
9567
9568         /* disable access to nvram interface */
9569         bnx2x_disable_nvram_access(bp);
9570         bnx2x_release_nvram_lock(bp);
9571
9572         return rc;
9573 }
9574
9575 static int bnx2x_get_eeprom(struct net_device *dev,
9576                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9577 {
9578         struct bnx2x *bp = netdev_priv(dev);
9579         int rc;
9580
9581         if (!netif_running(dev))
9582                 return -EAGAIN;
9583
9584         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9585            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9586            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9587            eeprom->len, eeprom->len);
9588
9589         /* parameters already validated in ethtool_get_eeprom */
9590
9591         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9592
9593         return rc;
9594 }
9595
9596 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9597                                    u32 cmd_flags)
9598 {
9599         int count, i, rc;
9600
9601         /* build the command word */
9602         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9603
9604         /* need to clear DONE bit separately */
9605         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9606
9607         /* write the data */
9608         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9609
9610         /* address of the NVRAM to write to */
9611         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9612                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9613
9614         /* issue the write command */
9615         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9616
9617         /* adjust timeout for emulation/FPGA */
9618         count = NVRAM_TIMEOUT_COUNT;
9619         if (CHIP_REV_IS_SLOW(bp))
9620                 count *= 100;
9621
9622         /* wait for completion */
9623         rc = -EBUSY;
9624         for (i = 0; i < count; i++) {
9625                 udelay(5);
9626                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9627                 if (val & MCPR_NVM_COMMAND_DONE) {
9628                         rc = 0;
9629                         break;
9630                 }
9631         }
9632
9633         return rc;
9634 }
9635
9636 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
9637
9638 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9639                               int buf_size)
9640 {
9641         int rc;
9642         u32 cmd_flags;
9643         u32 align_offset;
9644         __be32 val;
9645
9646         if (offset + buf_size > bp->common.flash_size) {
9647                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9648                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9649                    offset, buf_size, bp->common.flash_size);
9650                 return -EINVAL;
9651         }
9652
9653         /* request access to nvram interface */
9654         rc = bnx2x_acquire_nvram_lock(bp);
9655         if (rc)
9656                 return rc;
9657
9658         /* enable access to nvram interface */
9659         bnx2x_enable_nvram_access(bp);
9660
9661         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9662         align_offset = (offset & ~0x03);
9663         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9664
9665         if (rc == 0) {
9666                 val &= ~(0xff << BYTE_OFFSET(offset));
9667                 val |= (*data_buf << BYTE_OFFSET(offset));
9668
9669                 /* nvram data is returned as an array of bytes
9670                  * convert it back to cpu order */
9671                 val = be32_to_cpu(val);
9672
9673                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9674                                              cmd_flags);
9675         }
9676
9677         /* disable access to nvram interface */
9678         bnx2x_disable_nvram_access(bp);
9679         bnx2x_release_nvram_lock(bp);
9680
9681         return rc;
9682 }
9683
9684 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9685                              int buf_size)
9686 {
9687         int rc;
9688         u32 cmd_flags;
9689         u32 val;
9690         u32 written_so_far;
9691
9692         if (buf_size == 1)      /* ethtool */
9693                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9694
9695         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9696                 DP(BNX2X_MSG_NVM,
9697                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9698                    offset, buf_size);
9699                 return -EINVAL;
9700         }
9701
9702         if (offset + buf_size > bp->common.flash_size) {
9703                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9704                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9705                    offset, buf_size, bp->common.flash_size);
9706                 return -EINVAL;
9707         }
9708
9709         /* request access to nvram interface */
9710         rc = bnx2x_acquire_nvram_lock(bp);
9711         if (rc)
9712                 return rc;
9713
9714         /* enable access to nvram interface */
9715         bnx2x_enable_nvram_access(bp);
9716
9717         written_so_far = 0;
9718         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9719         while ((written_so_far < buf_size) && (rc == 0)) {
9720                 if (written_so_far == (buf_size - sizeof(u32)))
9721                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9722                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9723                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9724                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9725                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9726
9727                 memcpy(&val, data_buf, 4);
9728
9729                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9730
9731                 /* advance to the next dword */
9732                 offset += sizeof(u32);
9733                 data_buf += sizeof(u32);
9734                 written_so_far += sizeof(u32);
9735                 cmd_flags = 0;
9736         }
9737
9738         /* disable access to nvram interface */
9739         bnx2x_disable_nvram_access(bp);
9740         bnx2x_release_nvram_lock(bp);
9741
9742         return rc;
9743 }
9744
9745 static int bnx2x_set_eeprom(struct net_device *dev,
9746                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9747 {
9748         struct bnx2x *bp = netdev_priv(dev);
9749         int port = BP_PORT(bp);
9750         int rc = 0;
9751
9752         if (!netif_running(dev))
9753                 return -EAGAIN;
9754
9755         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9756            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9757            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9758            eeprom->len, eeprom->len);
9759
9760         /* parameters already validated in ethtool_set_eeprom */
9761
9762         /* PHY eeprom can be accessed only by the PMF */
9763         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9764             !bp->port.pmf)
9765                 return -EINVAL;
9766
9767         if (eeprom->magic == 0x50485950) {
9768                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9769                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9770
9771                 bnx2x_acquire_phy_lock(bp);
9772                 rc |= bnx2x_link_reset(&bp->link_params,
9773                                        &bp->link_vars, 0);
9774                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9775                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9776                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9777                                        MISC_REGISTERS_GPIO_HIGH, port);
9778                 bnx2x_release_phy_lock(bp);
9779                 bnx2x_link_report(bp);
9780
9781         } else if (eeprom->magic == 0x50485952) {
9782                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9783                 if (bp->state == BNX2X_STATE_OPEN) {
9784                         bnx2x_acquire_phy_lock(bp);
9785                         rc |= bnx2x_link_reset(&bp->link_params,
9786                                                &bp->link_vars, 1);
9787
9788                         rc |= bnx2x_phy_init(&bp->link_params,
9789                                              &bp->link_vars);
9790                         bnx2x_release_phy_lock(bp);
9791                         bnx2x_calc_fc_adv(bp);
9792                 }
9793         } else if (eeprom->magic == 0x53985943) {
9794                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9795                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9796                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9797                         u8 ext_phy_addr =
9798                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9799
9800                         /* DSP Remove Download Mode */
9801                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9802                                        MISC_REGISTERS_GPIO_LOW, port);
9803
9804                         bnx2x_acquire_phy_lock(bp);
9805
9806                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9807
9808                         /* wait 0.5 sec to allow it to run */
9809                         msleep(500);
9810                         bnx2x_ext_phy_hw_reset(bp, port);
9811                         msleep(500);
9812                         bnx2x_release_phy_lock(bp);
9813                 }
9814         } else
9815                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9816
9817         return rc;
9818 }
9819
9820 static int bnx2x_get_coalesce(struct net_device *dev,
9821                               struct ethtool_coalesce *coal)
9822 {
9823         struct bnx2x *bp = netdev_priv(dev);
9824
9825         memset(coal, 0, sizeof(struct ethtool_coalesce));
9826
9827         coal->rx_coalesce_usecs = bp->rx_ticks;
9828         coal->tx_coalesce_usecs = bp->tx_ticks;
9829
9830         return 0;
9831 }
9832
9833 #define BNX2X_MAX_COALES_TOUT  (0xf0*12) /* Maximal coalescing timeout in us */
9834 static int bnx2x_set_coalesce(struct net_device *dev,
9835                               struct ethtool_coalesce *coal)
9836 {
9837         struct bnx2x *bp = netdev_priv(dev);
9838
9839         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9840         if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9841                 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9842
9843         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9844         if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9845                 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9846
9847         if (netif_running(dev))
9848                 bnx2x_update_coalesce(bp);
9849
9850         return 0;
9851 }
9852
9853 static void bnx2x_get_ringparam(struct net_device *dev,
9854                                 struct ethtool_ringparam *ering)
9855 {
9856         struct bnx2x *bp = netdev_priv(dev);
9857
9858         ering->rx_max_pending = MAX_RX_AVAIL;
9859         ering->rx_mini_max_pending = 0;
9860         ering->rx_jumbo_max_pending = 0;
9861
9862         ering->rx_pending = bp->rx_ring_size;
9863         ering->rx_mini_pending = 0;
9864         ering->rx_jumbo_pending = 0;
9865
9866         ering->tx_max_pending = MAX_TX_AVAIL;
9867         ering->tx_pending = bp->tx_ring_size;
9868 }
9869
9870 static int bnx2x_set_ringparam(struct net_device *dev,
9871                                struct ethtool_ringparam *ering)
9872 {
9873         struct bnx2x *bp = netdev_priv(dev);
9874         int rc = 0;
9875
9876         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9877             (ering->tx_pending > MAX_TX_AVAIL) ||
9878             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9879                 return -EINVAL;
9880
9881         bp->rx_ring_size = ering->rx_pending;
9882         bp->tx_ring_size = ering->tx_pending;
9883
9884         if (netif_running(dev)) {
9885                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9886                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9887         }
9888
9889         return rc;
9890 }
9891
9892 static void bnx2x_get_pauseparam(struct net_device *dev,
9893                                  struct ethtool_pauseparam *epause)
9894 {
9895         struct bnx2x *bp = netdev_priv(dev);
9896
9897         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9898                            BNX2X_FLOW_CTRL_AUTO) &&
9899                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9900
9901         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9902                             BNX2X_FLOW_CTRL_RX);
9903         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9904                             BNX2X_FLOW_CTRL_TX);
9905
9906         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9907            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9908            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9909 }
9910
9911 static int bnx2x_set_pauseparam(struct net_device *dev,
9912                                 struct ethtool_pauseparam *epause)
9913 {
9914         struct bnx2x *bp = netdev_priv(dev);
9915
9916         if (IS_E1HMF(bp))
9917                 return 0;
9918
9919         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9920            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9921            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9922
9923         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9924
9925         if (epause->rx_pause)
9926                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9927
9928         if (epause->tx_pause)
9929                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9930
9931         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9932                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9933
9934         if (epause->autoneg) {
9935                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9936                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9937                         return -EINVAL;
9938                 }
9939
9940                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9941                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9942         }
9943
9944         DP(NETIF_MSG_LINK,
9945            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9946
9947         if (netif_running(dev)) {
9948                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9949                 bnx2x_link_set(bp);
9950         }
9951
9952         return 0;
9953 }
9954
9955 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9956 {
9957         struct bnx2x *bp = netdev_priv(dev);
9958         int changed = 0;
9959         int rc = 0;
9960
9961         /* TPA requires Rx CSUM offloading */
9962         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9963                 if (!(dev->features & NETIF_F_LRO)) {
9964                         dev->features |= NETIF_F_LRO;
9965                         bp->flags |= TPA_ENABLE_FLAG;
9966                         changed = 1;
9967                 }
9968
9969         } else if (dev->features & NETIF_F_LRO) {
9970                 dev->features &= ~NETIF_F_LRO;
9971                 bp->flags &= ~TPA_ENABLE_FLAG;
9972                 changed = 1;
9973         }
9974
9975         if (changed && netif_running(dev)) {
9976                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9977                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9978         }
9979
9980         return rc;
9981 }
9982
9983 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9984 {
9985         struct bnx2x *bp = netdev_priv(dev);
9986
9987         return bp->rx_csum;
9988 }
9989
9990 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9991 {
9992         struct bnx2x *bp = netdev_priv(dev);
9993         int rc = 0;
9994
9995         bp->rx_csum = data;
9996
9997         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9998            TPA'ed packets will be discarded due to wrong TCP CSUM */
9999         if (!data) {
10000                 u32 flags = ethtool_op_get_flags(dev);
10001
10002                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10003         }
10004
10005         return rc;
10006 }
10007
10008 static int bnx2x_set_tso(struct net_device *dev, u32 data)
10009 {
10010         if (data) {
10011                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10012                 dev->features |= NETIF_F_TSO6;
10013         } else {
10014                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
10015                 dev->features &= ~NETIF_F_TSO6;
10016         }
10017
10018         return 0;
10019 }
10020
10021 static const struct {
10022         char string[ETH_GSTRING_LEN];
10023 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
10024         { "register_test (offline)" },
10025         { "memory_test (offline)" },
10026         { "loopback_test (offline)" },
10027         { "nvram_test (online)" },
10028         { "interrupt_test (online)" },
10029         { "link_test (online)" },
10030         { "idle check (online)" }
10031 };
10032
10033 static int bnx2x_test_registers(struct bnx2x *bp)
10034 {
10035         int idx, i, rc = -ENODEV;
10036         u32 wr_val = 0;
10037         int port = BP_PORT(bp);
10038         static const struct {
10039                 u32  offset0;
10040                 u32  offset1;
10041                 u32  mask;
10042         } reg_tbl[] = {
10043 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
10044                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
10045                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
10046                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
10047                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
10048                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
10049                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
10050                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
10051                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
10052                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
10053 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
10054                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
10055                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
10056                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
10057                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
10058                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10059                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
10060                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
10061                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
10062                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
10063 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
10064                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
10065                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
10066                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
10067                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
10068                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
10069                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
10070                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
10071                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
10072                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
10073 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
10074                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
10075                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
10076                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10077                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
10078                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10079                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
10080
10081                 { 0xffffffff, 0, 0x00000000 }
10082         };
10083
10084         if (!netif_running(bp->dev))
10085                 return rc;
10086
10087         /* Repeat the test twice:
10088            First by writing 0x00000000, second by writing 0xffffffff */
10089         for (idx = 0; idx < 2; idx++) {
10090
10091                 switch (idx) {
10092                 case 0:
10093                         wr_val = 0;
10094                         break;
10095                 case 1:
10096                         wr_val = 0xffffffff;
10097                         break;
10098                 }
10099
10100                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10101                         u32 offset, mask, save_val, val;
10102
10103                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10104                         mask = reg_tbl[i].mask;
10105
10106                         save_val = REG_RD(bp, offset);
10107
10108                         REG_WR(bp, offset, wr_val);
10109                         val = REG_RD(bp, offset);
10110
10111                         /* Restore the original register's value */
10112                         REG_WR(bp, offset, save_val);
10113
10114                         /* verify that value is as expected value */
10115                         if ((val & mask) != (wr_val & mask))
10116                                 goto test_reg_exit;
10117                 }
10118         }
10119
10120         rc = 0;
10121
10122 test_reg_exit:
10123         return rc;
10124 }
10125
10126 static int bnx2x_test_memory(struct bnx2x *bp)
10127 {
10128         int i, j, rc = -ENODEV;
10129         u32 val;
10130         static const struct {
10131                 u32 offset;
10132                 int size;
10133         } mem_tbl[] = {
10134                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
10135                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10136                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
10137                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
10138                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
10139                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
10140                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
10141
10142                 { 0xffffffff, 0 }
10143         };
10144         static const struct {
10145                 char *name;
10146                 u32 offset;
10147                 u32 e1_mask;
10148                 u32 e1h_mask;
10149         } prty_tbl[] = {
10150                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
10151                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
10152                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
10153                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
10154                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
10155                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
10156
10157                 { NULL, 0xffffffff, 0, 0 }
10158         };
10159
10160         if (!netif_running(bp->dev))
10161                 return rc;
10162
10163         /* Go through all the memories */
10164         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10165                 for (j = 0; j < mem_tbl[i].size; j++)
10166                         REG_RD(bp, mem_tbl[i].offset + j*4);
10167
10168         /* Check the parity status */
10169         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10170                 val = REG_RD(bp, prty_tbl[i].offset);
10171                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10172                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
10173                         DP(NETIF_MSG_HW,
10174                            "%s is 0x%x\n", prty_tbl[i].name, val);
10175                         goto test_mem_exit;
10176                 }
10177         }
10178
10179         rc = 0;
10180
10181 test_mem_exit:
10182         return rc;
10183 }
10184
10185 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10186 {
10187         int cnt = 1000;
10188
10189         if (link_up)
10190                 while (bnx2x_link_test(bp) && cnt--)
10191                         msleep(10);
10192 }
10193
10194 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10195 {
10196         unsigned int pkt_size, num_pkts, i;
10197         struct sk_buff *skb;
10198         unsigned char *packet;
10199         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10200         struct bnx2x_fastpath *fp_tx = &bp->fp[0];
10201         u16 tx_start_idx, tx_idx;
10202         u16 rx_start_idx, rx_idx;
10203         u16 pkt_prod, bd_prod;
10204         struct sw_tx_bd *tx_buf;
10205         struct eth_tx_start_bd *tx_start_bd;
10206         struct eth_tx_parse_bd *pbd = NULL;
10207         dma_addr_t mapping;
10208         union eth_rx_cqe *cqe;
10209         u8 cqe_fp_flags;
10210         struct sw_rx_bd *rx_buf;
10211         u16 len;
10212         int rc = -ENODEV;
10213
10214         /* check the loopback mode */
10215         switch (loopback_mode) {
10216         case BNX2X_PHY_LOOPBACK:
10217                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10218                         return -EINVAL;
10219                 break;
10220         case BNX2X_MAC_LOOPBACK:
10221                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10222                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10223                 break;
10224         default:
10225                 return -EINVAL;
10226         }
10227
10228         /* prepare the loopback packet */
10229         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10230                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10231         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10232         if (!skb) {
10233                 rc = -ENOMEM;
10234                 goto test_loopback_exit;
10235         }
10236         packet = skb_put(skb, pkt_size);
10237         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10238         memset(packet + ETH_ALEN, 0, ETH_ALEN);
10239         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10240         for (i = ETH_HLEN; i < pkt_size; i++)
10241                 packet[i] = (unsigned char) (i & 0xff);
10242
10243         /* send the loopback packet */
10244         num_pkts = 0;
10245         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10246         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10247
10248         pkt_prod = fp_tx->tx_pkt_prod++;
10249         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10250         tx_buf->first_bd = fp_tx->tx_bd_prod;
10251         tx_buf->skb = skb;
10252         tx_buf->flags = 0;
10253
10254         bd_prod = TX_BD(fp_tx->tx_bd_prod);
10255         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10256         mapping = pci_map_single(bp->pdev, skb->data,
10257                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10258         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10259         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10260         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10261         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10262         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10263         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10264         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10265                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10266
10267         /* turn on parsing and get a BD */
10268         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10269         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10270
10271         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10272
10273         wmb();
10274
10275         fp_tx->tx_db.data.prod += 2;
10276         barrier();
10277         DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
10278
10279         mmiowb();
10280
10281         num_pkts++;
10282         fp_tx->tx_bd_prod += 2; /* start + pbd */
10283
10284         udelay(100);
10285
10286         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10287         if (tx_idx != tx_start_idx + num_pkts)
10288                 goto test_loopback_exit;
10289
10290         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10291         if (rx_idx != rx_start_idx + num_pkts)
10292                 goto test_loopback_exit;
10293
10294         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10295         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10296         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10297                 goto test_loopback_rx_exit;
10298
10299         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10300         if (len != pkt_size)
10301                 goto test_loopback_rx_exit;
10302
10303         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10304         skb = rx_buf->skb;
10305         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10306         for (i = ETH_HLEN; i < pkt_size; i++)
10307                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10308                         goto test_loopback_rx_exit;
10309
10310         rc = 0;
10311
10312 test_loopback_rx_exit:
10313
10314         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10315         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10316         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10317         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10318
10319         /* Update producers */
10320         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10321                              fp_rx->rx_sge_prod);
10322
10323 test_loopback_exit:
10324         bp->link_params.loopback_mode = LOOPBACK_NONE;
10325
10326         return rc;
10327 }
10328
10329 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10330 {
10331         int rc = 0, res;
10332
10333         if (!netif_running(bp->dev))
10334                 return BNX2X_LOOPBACK_FAILED;
10335
10336         bnx2x_netif_stop(bp, 1);
10337         bnx2x_acquire_phy_lock(bp);
10338
10339         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10340         if (res) {
10341                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
10342                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10343         }
10344
10345         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10346         if (res) {
10347                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
10348                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10349         }
10350
10351         bnx2x_release_phy_lock(bp);
10352         bnx2x_netif_start(bp);
10353
10354         return rc;
10355 }
10356
10357 #define CRC32_RESIDUAL                  0xdebb20e3
10358
10359 static int bnx2x_test_nvram(struct bnx2x *bp)
10360 {
10361         static const struct {
10362                 int offset;
10363                 int size;
10364         } nvram_tbl[] = {
10365                 {     0,  0x14 }, /* bootstrap */
10366                 {  0x14,  0xec }, /* dir */
10367                 { 0x100, 0x350 }, /* manuf_info */
10368                 { 0x450,  0xf0 }, /* feature_info */
10369                 { 0x640,  0x64 }, /* upgrade_key_info */
10370                 { 0x6a4,  0x64 },
10371                 { 0x708,  0x70 }, /* manuf_key_info */
10372                 { 0x778,  0x70 },
10373                 {     0,     0 }
10374         };
10375         __be32 buf[0x350 / 4];
10376         u8 *data = (u8 *)buf;
10377         int i, rc;
10378         u32 magic, crc;
10379
10380         rc = bnx2x_nvram_read(bp, 0, data, 4);
10381         if (rc) {
10382                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10383                 goto test_nvram_exit;
10384         }
10385
10386         magic = be32_to_cpu(buf[0]);
10387         if (magic != 0x669955aa) {
10388                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10389                 rc = -ENODEV;
10390                 goto test_nvram_exit;
10391         }
10392
10393         for (i = 0; nvram_tbl[i].size; i++) {
10394
10395                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10396                                       nvram_tbl[i].size);
10397                 if (rc) {
10398                         DP(NETIF_MSG_PROBE,
10399                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10400                         goto test_nvram_exit;
10401                 }
10402
10403                 crc = ether_crc_le(nvram_tbl[i].size, data);
10404                 if (crc != CRC32_RESIDUAL) {
10405                         DP(NETIF_MSG_PROBE,
10406                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10407                         rc = -ENODEV;
10408                         goto test_nvram_exit;
10409                 }
10410         }
10411
10412 test_nvram_exit:
10413         return rc;
10414 }
10415
10416 static int bnx2x_test_intr(struct bnx2x *bp)
10417 {
10418         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10419         int i, rc;
10420
10421         if (!netif_running(bp->dev))
10422                 return -ENODEV;
10423
10424         config->hdr.length = 0;
10425         if (CHIP_IS_E1(bp))
10426                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10427         else
10428                 config->hdr.offset = BP_FUNC(bp);
10429         config->hdr.client_id = bp->fp->cl_id;
10430         config->hdr.reserved1 = 0;
10431
10432         bp->set_mac_pending++;
10433         smp_wmb();
10434         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10435                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10436                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10437         if (rc == 0) {
10438                 for (i = 0; i < 10; i++) {
10439                         if (!bp->set_mac_pending)
10440                                 break;
10441                         smp_rmb();
10442                         msleep_interruptible(10);
10443                 }
10444                 if (i == 10)
10445                         rc = -ENODEV;
10446         }
10447
10448         return rc;
10449 }
10450
10451 static void bnx2x_self_test(struct net_device *dev,
10452                             struct ethtool_test *etest, u64 *buf)
10453 {
10454         struct bnx2x *bp = netdev_priv(dev);
10455
10456         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10457
10458         if (!netif_running(dev))
10459                 return;
10460
10461         /* offline tests are not supported in MF mode */
10462         if (IS_E1HMF(bp))
10463                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10464
10465         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10466                 int port = BP_PORT(bp);
10467                 u32 val;
10468                 u8 link_up;
10469
10470                 /* save current value of input enable for TX port IF */
10471                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10472                 /* disable input for TX port IF */
10473                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10474
10475                 link_up = (bnx2x_link_test(bp) == 0);
10476                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10477                 bnx2x_nic_load(bp, LOAD_DIAG);
10478                 /* wait until link state is restored */
10479                 bnx2x_wait_for_link(bp, link_up);
10480
10481                 if (bnx2x_test_registers(bp) != 0) {
10482                         buf[0] = 1;
10483                         etest->flags |= ETH_TEST_FL_FAILED;
10484                 }
10485                 if (bnx2x_test_memory(bp) != 0) {
10486                         buf[1] = 1;
10487                         etest->flags |= ETH_TEST_FL_FAILED;
10488                 }
10489                 buf[2] = bnx2x_test_loopback(bp, link_up);
10490                 if (buf[2] != 0)
10491                         etest->flags |= ETH_TEST_FL_FAILED;
10492
10493                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10494
10495                 /* restore input for TX port IF */
10496                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10497
10498                 bnx2x_nic_load(bp, LOAD_NORMAL);
10499                 /* wait until link state is restored */
10500                 bnx2x_wait_for_link(bp, link_up);
10501         }
10502         if (bnx2x_test_nvram(bp) != 0) {
10503                 buf[3] = 1;
10504                 etest->flags |= ETH_TEST_FL_FAILED;
10505         }
10506         if (bnx2x_test_intr(bp) != 0) {
10507                 buf[4] = 1;
10508                 etest->flags |= ETH_TEST_FL_FAILED;
10509         }
10510         if (bp->port.pmf)
10511                 if (bnx2x_link_test(bp) != 0) {
10512                         buf[5] = 1;
10513                         etest->flags |= ETH_TEST_FL_FAILED;
10514                 }
10515
10516 #ifdef BNX2X_EXTRA_DEBUG
10517         bnx2x_panic_dump(bp);
10518 #endif
10519 }
10520
10521 static const struct {
10522         long offset;
10523         int size;
10524         u8 string[ETH_GSTRING_LEN];
10525 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10526 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10527         { Q_STATS_OFFSET32(error_bytes_received_hi),
10528                                                 8, "[%d]: rx_error_bytes" },
10529         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10530                                                 8, "[%d]: rx_ucast_packets" },
10531         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10532                                                 8, "[%d]: rx_mcast_packets" },
10533         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10534                                                 8, "[%d]: rx_bcast_packets" },
10535         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10536         { Q_STATS_OFFSET32(rx_err_discard_pkt),
10537                                          4, "[%d]: rx_phy_ip_err_discards"},
10538         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10539                                          4, "[%d]: rx_skb_alloc_discard" },
10540         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10541
10542 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10543         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10544                                                         8, "[%d]: tx_packets" }
10545 };
10546
10547 static const struct {
10548         long offset;
10549         int size;
10550         u32 flags;
10551 #define STATS_FLAGS_PORT                1
10552 #define STATS_FLAGS_FUNC                2
10553 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10554         u8 string[ETH_GSTRING_LEN];
10555 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10556 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10557                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
10558         { STATS_OFFSET32(error_bytes_received_hi),
10559                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10560         { STATS_OFFSET32(total_unicast_packets_received_hi),
10561                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10562         { STATS_OFFSET32(total_multicast_packets_received_hi),
10563                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10564         { STATS_OFFSET32(total_broadcast_packets_received_hi),
10565                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10566         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10567                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10568         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10569                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
10570         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10571                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10572         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10573                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10574 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10575                                 8, STATS_FLAGS_PORT, "rx_fragments" },
10576         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10577                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
10578         { STATS_OFFSET32(no_buff_discard_hi),
10579                                 8, STATS_FLAGS_BOTH, "rx_discards" },
10580         { STATS_OFFSET32(mac_filter_discard),
10581                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10582         { STATS_OFFSET32(xxoverflow_discard),
10583                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10584         { STATS_OFFSET32(brb_drop_hi),
10585                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10586         { STATS_OFFSET32(brb_truncate_hi),
10587                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10588         { STATS_OFFSET32(pause_frames_received_hi),
10589                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10590         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10591                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10592         { STATS_OFFSET32(nig_timer_max),
10593                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10594 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10595                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10596         { STATS_OFFSET32(rx_skb_alloc_failed),
10597                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10598         { STATS_OFFSET32(hw_csum_err),
10599                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10600
10601         { STATS_OFFSET32(total_bytes_transmitted_hi),
10602                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
10603         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10604                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10605         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10606                                 8, STATS_FLAGS_BOTH, "tx_packets" },
10607         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10608                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10609         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10610                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10611         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10612                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10613         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10614                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10615 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10616                                 8, STATS_FLAGS_PORT, "tx_deferred" },
10617         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10618                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10619         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10620                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10621         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10622                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10623         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10624                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10625         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10626                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10627         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10628                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10629         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10630                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10631         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10632                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10633         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10634                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10635 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10636                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10637         { STATS_OFFSET32(pause_frames_sent_hi),
10638                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10639 };
10640
10641 #define IS_PORT_STAT(i) \
10642         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10643 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10644 #define IS_E1HMF_MODE_STAT(bp) \
10645                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10646
10647 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10648 {
10649         struct bnx2x *bp = netdev_priv(dev);
10650         int i, num_stats;
10651
10652         switch(stringset) {
10653         case ETH_SS_STATS:
10654                 if (is_multi(bp)) {
10655                         num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
10656                         if (!IS_E1HMF_MODE_STAT(bp))
10657                                 num_stats += BNX2X_NUM_STATS;
10658                 } else {
10659                         if (IS_E1HMF_MODE_STAT(bp)) {
10660                                 num_stats = 0;
10661                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
10662                                         if (IS_FUNC_STAT(i))
10663                                                 num_stats++;
10664                         } else
10665                                 num_stats = BNX2X_NUM_STATS;
10666                 }
10667                 return num_stats;
10668
10669         case ETH_SS_TEST:
10670                 return BNX2X_NUM_TESTS;
10671
10672         default:
10673                 return -EINVAL;
10674         }
10675 }
10676
10677 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10678 {
10679         struct bnx2x *bp = netdev_priv(dev);
10680         int i, j, k;
10681
10682         switch (stringset) {
10683         case ETH_SS_STATS:
10684                 if (is_multi(bp)) {
10685                         k = 0;
10686                         for_each_queue(bp, i) {
10687                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10688                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10689                                                 bnx2x_q_stats_arr[j].string, i);
10690                                 k += BNX2X_NUM_Q_STATS;
10691                         }
10692                         if (IS_E1HMF_MODE_STAT(bp))
10693                                 break;
10694                         for (j = 0; j < BNX2X_NUM_STATS; j++)
10695                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10696                                        bnx2x_stats_arr[j].string);
10697                 } else {
10698                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10699                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10700                                         continue;
10701                                 strcpy(buf + j*ETH_GSTRING_LEN,
10702                                        bnx2x_stats_arr[i].string);
10703                                 j++;
10704                         }
10705                 }
10706                 break;
10707
10708         case ETH_SS_TEST:
10709                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10710                 break;
10711         }
10712 }
10713
10714 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10715                                     struct ethtool_stats *stats, u64 *buf)
10716 {
10717         struct bnx2x *bp = netdev_priv(dev);
10718         u32 *hw_stats, *offset;
10719         int i, j, k;
10720
10721         if (is_multi(bp)) {
10722                 k = 0;
10723                 for_each_queue(bp, i) {
10724                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10725                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10726                                 if (bnx2x_q_stats_arr[j].size == 0) {
10727                                         /* skip this counter */
10728                                         buf[k + j] = 0;
10729                                         continue;
10730                                 }
10731                                 offset = (hw_stats +
10732                                           bnx2x_q_stats_arr[j].offset);
10733                                 if (bnx2x_q_stats_arr[j].size == 4) {
10734                                         /* 4-byte counter */
10735                                         buf[k + j] = (u64) *offset;
10736                                         continue;
10737                                 }
10738                                 /* 8-byte counter */
10739                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10740                         }
10741                         k += BNX2X_NUM_Q_STATS;
10742                 }
10743                 if (IS_E1HMF_MODE_STAT(bp))
10744                         return;
10745                 hw_stats = (u32 *)&bp->eth_stats;
10746                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10747                         if (bnx2x_stats_arr[j].size == 0) {
10748                                 /* skip this counter */
10749                                 buf[k + j] = 0;
10750                                 continue;
10751                         }
10752                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
10753                         if (bnx2x_stats_arr[j].size == 4) {
10754                                 /* 4-byte counter */
10755                                 buf[k + j] = (u64) *offset;
10756                                 continue;
10757                         }
10758                         /* 8-byte counter */
10759                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
10760                 }
10761         } else {
10762                 hw_stats = (u32 *)&bp->eth_stats;
10763                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10764                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10765                                 continue;
10766                         if (bnx2x_stats_arr[i].size == 0) {
10767                                 /* skip this counter */
10768                                 buf[j] = 0;
10769                                 j++;
10770                                 continue;
10771                         }
10772                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
10773                         if (bnx2x_stats_arr[i].size == 4) {
10774                                 /* 4-byte counter */
10775                                 buf[j] = (u64) *offset;
10776                                 j++;
10777                                 continue;
10778                         }
10779                         /* 8-byte counter */
10780                         buf[j] = HILO_U64(*offset, *(offset + 1));
10781                         j++;
10782                 }
10783         }
10784 }
10785
10786 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10787 {
10788         struct bnx2x *bp = netdev_priv(dev);
10789         int i;
10790
10791         if (!netif_running(dev))
10792                 return 0;
10793
10794         if (!bp->port.pmf)
10795                 return 0;
10796
10797         if (data == 0)
10798                 data = 2;
10799
10800         for (i = 0; i < (data * 2); i++) {
10801                 if ((i % 2) == 0)
10802                         bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10803                                       SPEED_1000);
10804                 else
10805                         bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
10806
10807                 msleep_interruptible(500);
10808                 if (signal_pending(current))
10809                         break;
10810         }
10811
10812         if (bp->link_vars.link_up)
10813                 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10814                               bp->link_vars.line_speed);
10815
10816         return 0;
10817 }
10818
10819 static const struct ethtool_ops bnx2x_ethtool_ops = {
10820         .get_settings           = bnx2x_get_settings,
10821         .set_settings           = bnx2x_set_settings,
10822         .get_drvinfo            = bnx2x_get_drvinfo,
10823         .get_regs_len           = bnx2x_get_regs_len,
10824         .get_regs               = bnx2x_get_regs,
10825         .get_wol                = bnx2x_get_wol,
10826         .set_wol                = bnx2x_set_wol,
10827         .get_msglevel           = bnx2x_get_msglevel,
10828         .set_msglevel           = bnx2x_set_msglevel,
10829         .nway_reset             = bnx2x_nway_reset,
10830         .get_link               = bnx2x_get_link,
10831         .get_eeprom_len         = bnx2x_get_eeprom_len,
10832         .get_eeprom             = bnx2x_get_eeprom,
10833         .set_eeprom             = bnx2x_set_eeprom,
10834         .get_coalesce           = bnx2x_get_coalesce,
10835         .set_coalesce           = bnx2x_set_coalesce,
10836         .get_ringparam          = bnx2x_get_ringparam,
10837         .set_ringparam          = bnx2x_set_ringparam,
10838         .get_pauseparam         = bnx2x_get_pauseparam,
10839         .set_pauseparam         = bnx2x_set_pauseparam,
10840         .get_rx_csum            = bnx2x_get_rx_csum,
10841         .set_rx_csum            = bnx2x_set_rx_csum,
10842         .get_tx_csum            = ethtool_op_get_tx_csum,
10843         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10844         .set_flags              = bnx2x_set_flags,
10845         .get_flags              = ethtool_op_get_flags,
10846         .get_sg                 = ethtool_op_get_sg,
10847         .set_sg                 = ethtool_op_set_sg,
10848         .get_tso                = ethtool_op_get_tso,
10849         .set_tso                = bnx2x_set_tso,
10850         .self_test              = bnx2x_self_test,
10851         .get_sset_count         = bnx2x_get_sset_count,
10852         .get_strings            = bnx2x_get_strings,
10853         .phys_id                = bnx2x_phys_id,
10854         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10855 };
10856
10857 /* end of ethtool_ops */
10858
10859 /****************************************************************************
10860 * General service functions
10861 ****************************************************************************/
10862
10863 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10864 {
10865         u16 pmcsr;
10866
10867         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10868
10869         switch (state) {
10870         case PCI_D0:
10871                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10872                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10873                                        PCI_PM_CTRL_PME_STATUS));
10874
10875                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10876                         /* delay required during transition out of D3hot */
10877                         msleep(20);
10878                 break;
10879
10880         case PCI_D3hot:
10881                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10882                 pmcsr |= 3;
10883
10884                 if (bp->wol)
10885                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10886
10887                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10888                                       pmcsr);
10889
10890                 /* No more memory access after this point until
10891                 * device is brought back to D0.
10892                 */
10893                 break;
10894
10895         default:
10896                 return -EINVAL;
10897         }
10898         return 0;
10899 }
10900
10901 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10902 {
10903         u16 rx_cons_sb;
10904
10905         /* Tell compiler that status block fields can change */
10906         barrier();
10907         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10908         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10909                 rx_cons_sb++;
10910         return (fp->rx_comp_cons != rx_cons_sb);
10911 }
10912
10913 /*
10914  * net_device service functions
10915  */
10916
10917 static int bnx2x_poll(struct napi_struct *napi, int budget)
10918 {
10919         int work_done = 0;
10920         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10921                                                  napi);
10922         struct bnx2x *bp = fp->bp;
10923
10924         while (1) {
10925 #ifdef BNX2X_STOP_ON_ERROR
10926                 if (unlikely(bp->panic)) {
10927                         napi_complete(napi);
10928                         return 0;
10929                 }
10930 #endif
10931
10932                 if (bnx2x_has_tx_work(fp))
10933                         bnx2x_tx_int(fp);
10934
10935                 if (bnx2x_has_rx_work(fp)) {
10936                         work_done += bnx2x_rx_int(fp, budget - work_done);
10937
10938                         /* must not complete if we consumed full budget */
10939                         if (work_done >= budget)
10940                                 break;
10941                 }
10942
10943                 /* Fall out from the NAPI loop if needed */
10944                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10945                         bnx2x_update_fpsb_idx(fp);
10946                 /* bnx2x_has_rx_work() reads the status block, thus we need
10947                  * to ensure that status block indices have been actually read
10948                  * (bnx2x_update_fpsb_idx) prior to this check
10949                  * (bnx2x_has_rx_work) so that we won't write the "newer"
10950                  * value of the status block to IGU (if there was a DMA right
10951                  * after bnx2x_has_rx_work and if there is no rmb, the memory
10952                  * reading (bnx2x_update_fpsb_idx) may be postponed to right
10953                  * before bnx2x_ack_sb). In this case there will never be
10954                  * another interrupt until there is another update of the
10955                  * status block, while there is still unhandled work.
10956                  */
10957                         rmb();
10958
10959                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10960                                 napi_complete(napi);
10961                                 /* Re-enable interrupts */
10962                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10963                                              le16_to_cpu(fp->fp_c_idx),
10964                                              IGU_INT_NOP, 1);
10965                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10966                                              le16_to_cpu(fp->fp_u_idx),
10967                                              IGU_INT_ENABLE, 1);
10968                                 break;
10969                         }
10970                 }
10971         }
10972
10973         return work_done;
10974 }
10975
10976
10977 /* we split the first BD into headers and data BDs
10978  * to ease the pain of our fellow microcode engineers
10979  * we use one mapping for both BDs
10980  * So far this has only been observed to happen
10981  * in Other Operating Systems(TM)
10982  */
10983 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10984                                    struct bnx2x_fastpath *fp,
10985                                    struct sw_tx_bd *tx_buf,
10986                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
10987                                    u16 bd_prod, int nbd)
10988 {
10989         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10990         struct eth_tx_bd *d_tx_bd;
10991         dma_addr_t mapping;
10992         int old_len = le16_to_cpu(h_tx_bd->nbytes);
10993
10994         /* first fix first BD */
10995         h_tx_bd->nbd = cpu_to_le16(nbd);
10996         h_tx_bd->nbytes = cpu_to_le16(hlen);
10997
10998         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10999            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11000            h_tx_bd->addr_lo, h_tx_bd->nbd);
11001
11002         /* now get a new data BD
11003          * (after the pbd) and fill it */
11004         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11005         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11006
11007         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11008                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11009
11010         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11011         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11012         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
11013
11014         /* this marks the BD as one that has no individual mapping */
11015         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11016
11017         DP(NETIF_MSG_TX_QUEUED,
11018            "TSO split data size is %d (%x:%x)\n",
11019            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11020
11021         /* update tx_bd */
11022         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
11023
11024         return bd_prod;
11025 }
11026
11027 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11028 {
11029         if (fix > 0)
11030                 csum = (u16) ~csum_fold(csum_sub(csum,
11031                                 csum_partial(t_header - fix, fix, 0)));
11032
11033         else if (fix < 0)
11034                 csum = (u16) ~csum_fold(csum_add(csum,
11035                                 csum_partial(t_header, -fix, 0)));
11036
11037         return swab16(csum);
11038 }
11039
11040 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11041 {
11042         u32 rc;
11043
11044         if (skb->ip_summed != CHECKSUM_PARTIAL)
11045                 rc = XMIT_PLAIN;
11046
11047         else {
11048                 if (skb->protocol == htons(ETH_P_IPV6)) {
11049                         rc = XMIT_CSUM_V6;
11050                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11051                                 rc |= XMIT_CSUM_TCP;
11052
11053                 } else {
11054                         rc = XMIT_CSUM_V4;
11055                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11056                                 rc |= XMIT_CSUM_TCP;
11057                 }
11058         }
11059
11060         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11061                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
11062
11063         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11064                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
11065
11066         return rc;
11067 }
11068
11069 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11070 /* check if packet requires linearization (packet is too fragmented)
11071    no need to check fragmentation if page size > 8K (there will be no
11072    violation to FW restrictions) */
11073 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11074                              u32 xmit_type)
11075 {
11076         int to_copy = 0;
11077         int hlen = 0;
11078         int first_bd_sz = 0;
11079
11080         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11081         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11082
11083                 if (xmit_type & XMIT_GSO) {
11084                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11085                         /* Check if LSO packet needs to be copied:
11086                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11087                         int wnd_size = MAX_FETCH_BD - 3;
11088                         /* Number of windows to check */
11089                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11090                         int wnd_idx = 0;
11091                         int frag_idx = 0;
11092                         u32 wnd_sum = 0;
11093
11094                         /* Headers length */
11095                         hlen = (int)(skb_transport_header(skb) - skb->data) +
11096                                 tcp_hdrlen(skb);
11097
11098                         /* Amount of data (w/o headers) on linear part of SKB*/
11099                         first_bd_sz = skb_headlen(skb) - hlen;
11100
11101                         wnd_sum  = first_bd_sz;
11102
11103                         /* Calculate the first sum - it's special */
11104                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11105                                 wnd_sum +=
11106                                         skb_shinfo(skb)->frags[frag_idx].size;
11107
11108                         /* If there was data on linear skb data - check it */
11109                         if (first_bd_sz > 0) {
11110                                 if (unlikely(wnd_sum < lso_mss)) {
11111                                         to_copy = 1;
11112                                         goto exit_lbl;
11113                                 }
11114
11115                                 wnd_sum -= first_bd_sz;
11116                         }
11117
11118                         /* Others are easier: run through the frag list and
11119                            check all windows */
11120                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11121                                 wnd_sum +=
11122                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11123
11124                                 if (unlikely(wnd_sum < lso_mss)) {
11125                                         to_copy = 1;
11126                                         break;
11127                                 }
11128                                 wnd_sum -=
11129                                         skb_shinfo(skb)->frags[wnd_idx].size;
11130                         }
11131                 } else {
11132                         /* in non-LSO too fragmented packet should always
11133                            be linearized */
11134                         to_copy = 1;
11135                 }
11136         }
11137
11138 exit_lbl:
11139         if (unlikely(to_copy))
11140                 DP(NETIF_MSG_TX_QUEUED,
11141                    "Linearization IS REQUIRED for %s packet. "
11142                    "num_frags %d  hlen %d  first_bd_sz %d\n",
11143                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11144                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11145
11146         return to_copy;
11147 }
11148 #endif
11149
11150 /* called with netif_tx_lock
11151  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11152  * netif_wake_queue()
11153  */
11154 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11155 {
11156         struct bnx2x *bp = netdev_priv(dev);
11157         struct bnx2x_fastpath *fp;
11158         struct netdev_queue *txq;
11159         struct sw_tx_bd *tx_buf;
11160         struct eth_tx_start_bd *tx_start_bd;
11161         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
11162         struct eth_tx_parse_bd *pbd = NULL;
11163         u16 pkt_prod, bd_prod;
11164         int nbd, fp_index;
11165         dma_addr_t mapping;
11166         u32 xmit_type = bnx2x_xmit_type(bp, skb);
11167         int i;
11168         u8 hlen = 0;
11169         __le16 pkt_size = 0;
11170
11171 #ifdef BNX2X_STOP_ON_ERROR
11172         if (unlikely(bp->panic))
11173                 return NETDEV_TX_BUSY;
11174 #endif
11175
11176         fp_index = skb_get_queue_mapping(skb);
11177         txq = netdev_get_tx_queue(dev, fp_index);
11178
11179         fp = &bp->fp[fp_index];
11180
11181         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11182                 fp->eth_q_stats.driver_xoff++;
11183                 netif_tx_stop_queue(txq);
11184                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11185                 return NETDEV_TX_BUSY;
11186         }
11187
11188         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
11189            "  gso type %x  xmit_type %x\n",
11190            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11191            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11192
11193 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11194         /* First, check if we need to linearize the skb (due to FW
11195            restrictions). No need to check fragmentation if page size > 8K
11196            (there will be no violation to FW restrictions) */
11197         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11198                 /* Statistics of linearization */
11199                 bp->lin_cnt++;
11200                 if (skb_linearize(skb) != 0) {
11201                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11202                            "silently dropping this SKB\n");
11203                         dev_kfree_skb_any(skb);
11204                         return NETDEV_TX_OK;
11205                 }
11206         }
11207 #endif
11208
11209         /*
11210         Please read carefully. First we use one BD which we mark as start,
11211         then we have a parsing info BD (used for TSO or xsum),
11212         and only then we have the rest of the TSO BDs.
11213         (don't forget to mark the last one as last,
11214         and to unmap only AFTER you write to the BD ...)
11215         And above all, all pdb sizes are in words - NOT DWORDS!
11216         */
11217
11218         pkt_prod = fp->tx_pkt_prod++;
11219         bd_prod = TX_BD(fp->tx_bd_prod);
11220
11221         /* get a tx_buf and first BD */
11222         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11223         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11224
11225         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11226         tx_start_bd->general_data = (UNICAST_ADDRESS <<
11227                                      ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11228         /* header nbd */
11229         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11230
11231         /* remember the first BD of the packet */
11232         tx_buf->first_bd = fp->tx_bd_prod;
11233         tx_buf->skb = skb;
11234         tx_buf->flags = 0;
11235
11236         DP(NETIF_MSG_TX_QUEUED,
11237            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
11238            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11239
11240 #ifdef BCM_VLAN
11241         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11242             (bp->flags & HW_VLAN_TX_FLAG)) {
11243                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11244                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11245         } else
11246 #endif
11247                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11248
11249         /* turn on parsing and get a BD */
11250         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11251         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11252
11253         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11254
11255         if (xmit_type & XMIT_CSUM) {
11256                 hlen = (skb_network_header(skb) - skb->data) / 2;
11257
11258                 /* for now NS flag is not used in Linux */
11259                 pbd->global_data =
11260                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11261                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11262
11263                 pbd->ip_hlen = (skb_transport_header(skb) -
11264                                 skb_network_header(skb)) / 2;
11265
11266                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11267
11268                 pbd->total_hlen = cpu_to_le16(hlen);
11269                 hlen = hlen*2;
11270
11271                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11272
11273                 if (xmit_type & XMIT_CSUM_V4)
11274                         tx_start_bd->bd_flags.as_bitfield |=
11275                                                 ETH_TX_BD_FLAGS_IP_CSUM;
11276                 else
11277                         tx_start_bd->bd_flags.as_bitfield |=
11278                                                 ETH_TX_BD_FLAGS_IPV6;
11279
11280                 if (xmit_type & XMIT_CSUM_TCP) {
11281                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11282
11283                 } else {
11284                         s8 fix = SKB_CS_OFF(skb); /* signed! */
11285
11286                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11287
11288                         DP(NETIF_MSG_TX_QUEUED,
11289                            "hlen %d  fix %d  csum before fix %x\n",
11290                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11291
11292                         /* HW bug: fixup the CSUM */
11293                         pbd->tcp_pseudo_csum =
11294                                 bnx2x_csum_fix(skb_transport_header(skb),
11295                                                SKB_CS(skb), fix);
11296
11297                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11298                            pbd->tcp_pseudo_csum);
11299                 }
11300         }
11301
11302         mapping = pci_map_single(bp->pdev, skb->data,
11303                                  skb_headlen(skb), PCI_DMA_TODEVICE);
11304
11305         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11306         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11307         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11308         tx_start_bd->nbd = cpu_to_le16(nbd);
11309         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11310         pkt_size = tx_start_bd->nbytes;
11311
11312         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
11313            "  nbytes %d  flags %x  vlan %x\n",
11314            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11315            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11316            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11317
11318         if (xmit_type & XMIT_GSO) {
11319
11320                 DP(NETIF_MSG_TX_QUEUED,
11321                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
11322                    skb->len, hlen, skb_headlen(skb),
11323                    skb_shinfo(skb)->gso_size);
11324
11325                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11326
11327                 if (unlikely(skb_headlen(skb) > hlen))
11328                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11329                                                  hlen, bd_prod, ++nbd);
11330
11331                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11332                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11333                 pbd->tcp_flags = pbd_tcp_flags(skb);
11334
11335                 if (xmit_type & XMIT_GSO_V4) {
11336                         pbd->ip_id = swab16(ip_hdr(skb)->id);
11337                         pbd->tcp_pseudo_csum =
11338                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11339                                                           ip_hdr(skb)->daddr,
11340                                                           0, IPPROTO_TCP, 0));
11341
11342                 } else
11343                         pbd->tcp_pseudo_csum =
11344                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11345                                                         &ipv6_hdr(skb)->daddr,
11346                                                         0, IPPROTO_TCP, 0));
11347
11348                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11349         }
11350         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11351
11352         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11353                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11354
11355                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11356                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11357                 if (total_pkt_bd == NULL)
11358                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11359
11360                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11361                                        frag->size, PCI_DMA_TODEVICE);
11362
11363                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11364                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11365                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11366                 le16_add_cpu(&pkt_size, frag->size);
11367
11368                 DP(NETIF_MSG_TX_QUEUED,
11369                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
11370                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11371                    le16_to_cpu(tx_data_bd->nbytes));
11372         }
11373
11374         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11375
11376         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11377
11378         /* now send a tx doorbell, counting the next BD
11379          * if the packet contains or ends with it
11380          */
11381         if (TX_BD_POFF(bd_prod) < nbd)
11382                 nbd++;
11383
11384         if (total_pkt_bd != NULL)
11385                 total_pkt_bd->total_pkt_bytes = pkt_size;
11386
11387         if (pbd)
11388                 DP(NETIF_MSG_TX_QUEUED,
11389                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
11390                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
11391                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11392                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11393                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11394
11395         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
11396
11397         /*
11398          * Make sure that the BD data is updated before updating the producer
11399          * since FW might read the BD right after the producer is updated.
11400          * This is only applicable for weak-ordered memory model archs such
11401          * as IA-64. The following barrier is also mandatory since FW will
11402          * assumes packets must have BDs.
11403          */
11404         wmb();
11405
11406         fp->tx_db.data.prod += nbd;
11407         barrier();
11408         DOORBELL(bp, fp->index, fp->tx_db.raw);
11409
11410         mmiowb();
11411
11412         fp->tx_bd_prod += nbd;
11413
11414         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11415                 netif_tx_stop_queue(txq);
11416                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11417                    if we put Tx into XOFF state. */
11418                 smp_mb();
11419                 fp->eth_q_stats.driver_xoff++;
11420                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11421                         netif_tx_wake_queue(txq);
11422         }
11423         fp->tx_pkt++;
11424
11425         return NETDEV_TX_OK;
11426 }
11427
11428 /* called with rtnl_lock */
11429 static int bnx2x_open(struct net_device *dev)
11430 {
11431         struct bnx2x *bp = netdev_priv(dev);
11432
11433         netif_carrier_off(dev);
11434
11435         bnx2x_set_power_state(bp, PCI_D0);
11436
11437         return bnx2x_nic_load(bp, LOAD_OPEN);
11438 }
11439
11440 /* called with rtnl_lock */
11441 static int bnx2x_close(struct net_device *dev)
11442 {
11443         struct bnx2x *bp = netdev_priv(dev);
11444
11445         /* Unload the driver, release IRQs */
11446         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11447         if (atomic_read(&bp->pdev->enable_cnt) == 1)
11448                 if (!CHIP_REV_IS_SLOW(bp))
11449                         bnx2x_set_power_state(bp, PCI_D3hot);
11450
11451         return 0;
11452 }
11453
11454 /* called with netif_tx_lock from dev_mcast.c */
11455 static void bnx2x_set_rx_mode(struct net_device *dev)
11456 {
11457         struct bnx2x *bp = netdev_priv(dev);
11458         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11459         int port = BP_PORT(bp);
11460
11461         if (bp->state != BNX2X_STATE_OPEN) {
11462                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11463                 return;
11464         }
11465
11466         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11467
11468         if (dev->flags & IFF_PROMISC)
11469                 rx_mode = BNX2X_RX_MODE_PROMISC;
11470
11471         else if ((dev->flags & IFF_ALLMULTI) ||
11472                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11473                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11474
11475         else { /* some multicasts */
11476                 if (CHIP_IS_E1(bp)) {
11477                         int i, old, offset;
11478                         struct dev_mc_list *mclist;
11479                         struct mac_configuration_cmd *config =
11480                                                 bnx2x_sp(bp, mcast_config);
11481
11482                         for (i = 0, mclist = dev->mc_list;
11483                              mclist && (i < dev->mc_count);
11484                              i++, mclist = mclist->next) {
11485
11486                                 config->config_table[i].
11487                                         cam_entry.msb_mac_addr =
11488                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
11489                                 config->config_table[i].
11490                                         cam_entry.middle_mac_addr =
11491                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
11492                                 config->config_table[i].
11493                                         cam_entry.lsb_mac_addr =
11494                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
11495                                 config->config_table[i].cam_entry.flags =
11496                                                         cpu_to_le16(port);
11497                                 config->config_table[i].
11498                                         target_table_entry.flags = 0;
11499                                 config->config_table[i].target_table_entry.
11500                                         clients_bit_vector =
11501                                                 cpu_to_le32(1 << BP_L_ID(bp));
11502                                 config->config_table[i].
11503                                         target_table_entry.vlan_id = 0;
11504
11505                                 DP(NETIF_MSG_IFUP,
11506                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11507                                    config->config_table[i].
11508                                                 cam_entry.msb_mac_addr,
11509                                    config->config_table[i].
11510                                                 cam_entry.middle_mac_addr,
11511                                    config->config_table[i].
11512                                                 cam_entry.lsb_mac_addr);
11513                         }
11514                         old = config->hdr.length;
11515                         if (old > i) {
11516                                 for (; i < old; i++) {
11517                                         if (CAM_IS_INVALID(config->
11518                                                            config_table[i])) {
11519                                                 /* already invalidated */
11520                                                 break;
11521                                         }
11522                                         /* invalidate */
11523                                         CAM_INVALIDATE(config->
11524                                                        config_table[i]);
11525                                 }
11526                         }
11527
11528                         if (CHIP_REV_IS_SLOW(bp))
11529                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11530                         else
11531                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
11532
11533                         config->hdr.length = i;
11534                         config->hdr.offset = offset;
11535                         config->hdr.client_id = bp->fp->cl_id;
11536                         config->hdr.reserved1 = 0;
11537
11538                         bp->set_mac_pending++;
11539                         smp_wmb();
11540
11541                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11542                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11543                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11544                                       0);
11545                 } else { /* E1H */
11546                         /* Accept one or more multicasts */
11547                         struct dev_mc_list *mclist;
11548                         u32 mc_filter[MC_HASH_SIZE];
11549                         u32 crc, bit, regidx;
11550                         int i;
11551
11552                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11553
11554                         for (i = 0, mclist = dev->mc_list;
11555                              mclist && (i < dev->mc_count);
11556                              i++, mclist = mclist->next) {
11557
11558                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11559                                    mclist->dmi_addr);
11560
11561                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11562                                 bit = (crc >> 24) & 0xff;
11563                                 regidx = bit >> 5;
11564                                 bit &= 0x1f;
11565                                 mc_filter[regidx] |= (1 << bit);
11566                         }
11567
11568                         for (i = 0; i < MC_HASH_SIZE; i++)
11569                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11570                                        mc_filter[i]);
11571                 }
11572         }
11573
11574         bp->rx_mode = rx_mode;
11575         bnx2x_set_storm_rx_mode(bp);
11576 }
11577
11578 /* called with rtnl_lock */
11579 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11580 {
11581         struct sockaddr *addr = p;
11582         struct bnx2x *bp = netdev_priv(dev);
11583
11584         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11585                 return -EINVAL;
11586
11587         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11588         if (netif_running(dev)) {
11589                 if (CHIP_IS_E1(bp))
11590                         bnx2x_set_eth_mac_addr_e1(bp, 1);
11591                 else
11592                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
11593         }
11594
11595         return 0;
11596 }
11597
11598 /* called with rtnl_lock */
11599 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11600                            int devad, u16 addr)
11601 {
11602         struct bnx2x *bp = netdev_priv(netdev);
11603         u16 value;
11604         int rc;
11605         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11606
11607         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11608            prtad, devad, addr);
11609
11610         if (prtad != bp->mdio.prtad) {
11611                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11612                    prtad, bp->mdio.prtad);
11613                 return -EINVAL;
11614         }
11615
11616         /* The HW expects different devad if CL22 is used */
11617         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11618
11619         bnx2x_acquire_phy_lock(bp);
11620         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11621                              devad, addr, &value);
11622         bnx2x_release_phy_lock(bp);
11623         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11624
11625         if (!rc)
11626                 rc = value;
11627         return rc;
11628 }
11629
11630 /* called with rtnl_lock */
11631 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11632                             u16 addr, u16 value)
11633 {
11634         struct bnx2x *bp = netdev_priv(netdev);
11635         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11636         int rc;
11637
11638         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11639                            " value 0x%x\n", prtad, devad, addr, value);
11640
11641         if (prtad != bp->mdio.prtad) {
11642                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11643                    prtad, bp->mdio.prtad);
11644                 return -EINVAL;
11645         }
11646
11647         /* The HW expects different devad if CL22 is used */
11648         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11649
11650         bnx2x_acquire_phy_lock(bp);
11651         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11652                               devad, addr, value);
11653         bnx2x_release_phy_lock(bp);
11654         return rc;
11655 }
11656
11657 /* called with rtnl_lock */
11658 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11659 {
11660         struct bnx2x *bp = netdev_priv(dev);
11661         struct mii_ioctl_data *mdio = if_mii(ifr);
11662
11663         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11664            mdio->phy_id, mdio->reg_num, mdio->val_in);
11665
11666         if (!netif_running(dev))
11667                 return -EAGAIN;
11668
11669         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11670 }
11671
11672 /* called with rtnl_lock */
11673 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11674 {
11675         struct bnx2x *bp = netdev_priv(dev);
11676         int rc = 0;
11677
11678         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11679             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11680                 return -EINVAL;
11681
11682         /* This does not race with packet allocation
11683          * because the actual alloc size is
11684          * only updated as part of load
11685          */
11686         dev->mtu = new_mtu;
11687
11688         if (netif_running(dev)) {
11689                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11690                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11691         }
11692
11693         return rc;
11694 }
11695
11696 static void bnx2x_tx_timeout(struct net_device *dev)
11697 {
11698         struct bnx2x *bp = netdev_priv(dev);
11699
11700 #ifdef BNX2X_STOP_ON_ERROR
11701         if (!bp->panic)
11702                 bnx2x_panic();
11703 #endif
11704         /* This allows the netif to be shutdown gracefully before resetting */
11705         schedule_work(&bp->reset_task);
11706 }
11707
11708 #ifdef BCM_VLAN
11709 /* called with rtnl_lock */
11710 static void bnx2x_vlan_rx_register(struct net_device *dev,
11711                                    struct vlan_group *vlgrp)
11712 {
11713         struct bnx2x *bp = netdev_priv(dev);
11714
11715         bp->vlgrp = vlgrp;
11716
11717         /* Set flags according to the required capabilities */
11718         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11719
11720         if (dev->features & NETIF_F_HW_VLAN_TX)
11721                 bp->flags |= HW_VLAN_TX_FLAG;
11722
11723         if (dev->features & NETIF_F_HW_VLAN_RX)
11724                 bp->flags |= HW_VLAN_RX_FLAG;
11725
11726         if (netif_running(dev))
11727                 bnx2x_set_client_config(bp);
11728 }
11729
11730 #endif
11731
11732 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11733 static void poll_bnx2x(struct net_device *dev)
11734 {
11735         struct bnx2x *bp = netdev_priv(dev);
11736
11737         disable_irq(bp->pdev->irq);
11738         bnx2x_interrupt(bp->pdev->irq, dev);
11739         enable_irq(bp->pdev->irq);
11740 }
11741 #endif
11742
11743 static const struct net_device_ops bnx2x_netdev_ops = {
11744         .ndo_open               = bnx2x_open,
11745         .ndo_stop               = bnx2x_close,
11746         .ndo_start_xmit         = bnx2x_start_xmit,
11747         .ndo_set_multicast_list = bnx2x_set_rx_mode,
11748         .ndo_set_mac_address    = bnx2x_change_mac_addr,
11749         .ndo_validate_addr      = eth_validate_addr,
11750         .ndo_do_ioctl           = bnx2x_ioctl,
11751         .ndo_change_mtu         = bnx2x_change_mtu,
11752         .ndo_tx_timeout         = bnx2x_tx_timeout,
11753 #ifdef BCM_VLAN
11754         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
11755 #endif
11756 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11757         .ndo_poll_controller    = poll_bnx2x,
11758 #endif
11759 };
11760
11761 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11762                                     struct net_device *dev)
11763 {
11764         struct bnx2x *bp;
11765         int rc;
11766
11767         SET_NETDEV_DEV(dev, &pdev->dev);
11768         bp = netdev_priv(dev);
11769
11770         bp->dev = dev;
11771         bp->pdev = pdev;
11772         bp->flags = 0;
11773         bp->func = PCI_FUNC(pdev->devfn);
11774
11775         rc = pci_enable_device(pdev);
11776         if (rc) {
11777                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11778                 goto err_out;
11779         }
11780
11781         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11782                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11783                        " aborting\n");
11784                 rc = -ENODEV;
11785                 goto err_out_disable;
11786         }
11787
11788         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11789                 printk(KERN_ERR PFX "Cannot find second PCI device"
11790                        " base address, aborting\n");
11791                 rc = -ENODEV;
11792                 goto err_out_disable;
11793         }
11794
11795         if (atomic_read(&pdev->enable_cnt) == 1) {
11796                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11797                 if (rc) {
11798                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11799                                " aborting\n");
11800                         goto err_out_disable;
11801                 }
11802
11803                 pci_set_master(pdev);
11804                 pci_save_state(pdev);
11805         }
11806
11807         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11808         if (bp->pm_cap == 0) {
11809                 printk(KERN_ERR PFX "Cannot find power management"
11810                        " capability, aborting\n");
11811                 rc = -EIO;
11812                 goto err_out_release;
11813         }
11814
11815         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11816         if (bp->pcie_cap == 0) {
11817                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11818                        " aborting\n");
11819                 rc = -EIO;
11820                 goto err_out_release;
11821         }
11822
11823         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11824                 bp->flags |= USING_DAC_FLAG;
11825                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11826                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11827                                " failed, aborting\n");
11828                         rc = -EIO;
11829                         goto err_out_release;
11830                 }
11831
11832         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11833                 printk(KERN_ERR PFX "System does not support DMA,"
11834                        " aborting\n");
11835                 rc = -EIO;
11836                 goto err_out_release;
11837         }
11838
11839         dev->mem_start = pci_resource_start(pdev, 0);
11840         dev->base_addr = dev->mem_start;
11841         dev->mem_end = pci_resource_end(pdev, 0);
11842
11843         dev->irq = pdev->irq;
11844
11845         bp->regview = pci_ioremap_bar(pdev, 0);
11846         if (!bp->regview) {
11847                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11848                 rc = -ENOMEM;
11849                 goto err_out_release;
11850         }
11851
11852         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11853                                         min_t(u64, BNX2X_DB_SIZE,
11854                                               pci_resource_len(pdev, 2)));
11855         if (!bp->doorbells) {
11856                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11857                 rc = -ENOMEM;
11858                 goto err_out_unmap;
11859         }
11860
11861         bnx2x_set_power_state(bp, PCI_D0);
11862
11863         /* clean indirect addresses */
11864         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11865                                PCICFG_VENDOR_ID_OFFSET);
11866         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11867         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11868         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11869         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11870
11871         dev->watchdog_timeo = TX_TIMEOUT;
11872
11873         dev->netdev_ops = &bnx2x_netdev_ops;
11874         dev->ethtool_ops = &bnx2x_ethtool_ops;
11875         dev->features |= NETIF_F_SG;
11876         dev->features |= NETIF_F_HW_CSUM;
11877         if (bp->flags & USING_DAC_FLAG)
11878                 dev->features |= NETIF_F_HIGHDMA;
11879         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11880         dev->features |= NETIF_F_TSO6;
11881 #ifdef BCM_VLAN
11882         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11883         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11884
11885         dev->vlan_features |= NETIF_F_SG;
11886         dev->vlan_features |= NETIF_F_HW_CSUM;
11887         if (bp->flags & USING_DAC_FLAG)
11888                 dev->vlan_features |= NETIF_F_HIGHDMA;
11889         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11890         dev->vlan_features |= NETIF_F_TSO6;
11891 #endif
11892
11893         /* get_port_hwinfo() will set prtad and mmds properly */
11894         bp->mdio.prtad = MDIO_PRTAD_NONE;
11895         bp->mdio.mmds = 0;
11896         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11897         bp->mdio.dev = dev;
11898         bp->mdio.mdio_read = bnx2x_mdio_read;
11899         bp->mdio.mdio_write = bnx2x_mdio_write;
11900
11901         return 0;
11902
11903 err_out_unmap:
11904         if (bp->regview) {
11905                 iounmap(bp->regview);
11906                 bp->regview = NULL;
11907         }
11908         if (bp->doorbells) {
11909                 iounmap(bp->doorbells);
11910                 bp->doorbells = NULL;
11911         }
11912
11913 err_out_release:
11914         if (atomic_read(&pdev->enable_cnt) == 1)
11915                 pci_release_regions(pdev);
11916
11917 err_out_disable:
11918         pci_disable_device(pdev);
11919         pci_set_drvdata(pdev, NULL);
11920
11921 err_out:
11922         return rc;
11923 }
11924
11925 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11926                                                  int *width, int *speed)
11927 {
11928         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11929
11930         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11931
11932         /* return value of 1=2.5GHz 2=5GHz */
11933         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11934 }
11935
11936 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11937 {
11938         const struct firmware *firmware = bp->firmware;
11939         struct bnx2x_fw_file_hdr *fw_hdr;
11940         struct bnx2x_fw_file_section *sections;
11941         u32 offset, len, num_ops;
11942         u16 *ops_offsets;
11943         int i;
11944         const u8 *fw_ver;
11945
11946         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11947                 return -EINVAL;
11948
11949         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11950         sections = (struct bnx2x_fw_file_section *)fw_hdr;
11951
11952         /* Make sure none of the offsets and sizes make us read beyond
11953          * the end of the firmware data */
11954         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11955                 offset = be32_to_cpu(sections[i].offset);
11956                 len = be32_to_cpu(sections[i].len);
11957                 if (offset + len > firmware->size) {
11958                         printk(KERN_ERR PFX "Section %d length is out of "
11959                                             "bounds\n", i);
11960                         return -EINVAL;
11961                 }
11962         }
11963
11964         /* Likewise for the init_ops offsets */
11965         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11966         ops_offsets = (u16 *)(firmware->data + offset);
11967         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11968
11969         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11970                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11971                         printk(KERN_ERR PFX "Section offset %d is out of "
11972                                             "bounds\n", i);
11973                         return -EINVAL;
11974                 }
11975         }
11976
11977         /* Check FW version */
11978         offset = be32_to_cpu(fw_hdr->fw_version.offset);
11979         fw_ver = firmware->data + offset;
11980         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11981             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11982             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11983             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11984                 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11985                                     " Should be %d.%d.%d.%d\n",
11986                        fw_ver[0], fw_ver[1], fw_ver[2],
11987                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11988                        BCM_5710_FW_MINOR_VERSION,
11989                        BCM_5710_FW_REVISION_VERSION,
11990                        BCM_5710_FW_ENGINEERING_VERSION);
11991                 return -EINVAL;
11992         }
11993
11994         return 0;
11995 }
11996
11997 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11998 {
11999         const __be32 *source = (const __be32 *)_source;
12000         u32 *target = (u32 *)_target;
12001         u32 i;
12002
12003         for (i = 0; i < n/4; i++)
12004                 target[i] = be32_to_cpu(source[i]);
12005 }
12006
12007 /*
12008    Ops array is stored in the following format:
12009    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12010  */
12011 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12012 {
12013         const __be32 *source = (const __be32 *)_source;
12014         struct raw_op *target = (struct raw_op *)_target;
12015         u32 i, j, tmp;
12016
12017         for (i = 0, j = 0; i < n/8; i++, j += 2) {
12018                 tmp = be32_to_cpu(source[j]);
12019                 target[i].op = (tmp >> 24) & 0xff;
12020                 target[i].offset =  tmp & 0xffffff;
12021                 target[i].raw_data = be32_to_cpu(source[j+1]);
12022         }
12023 }
12024
12025 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12026 {
12027         const __be16 *source = (const __be16 *)_source;
12028         u16 *target = (u16 *)_target;
12029         u32 i;
12030
12031         for (i = 0; i < n/2; i++)
12032                 target[i] = be16_to_cpu(source[i]);
12033 }
12034
12035 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12036         do { \
12037                 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12038                 bp->arr = kmalloc(len, GFP_KERNEL); \
12039                 if (!bp->arr) { \
12040                         printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12041                                             "for "#arr"\n", len); \
12042                         goto lbl; \
12043                 } \
12044                 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12045                      (u8 *)bp->arr, len); \
12046         } while (0)
12047
12048 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12049 {
12050         const char *fw_file_name;
12051         struct bnx2x_fw_file_hdr *fw_hdr;
12052         int rc;
12053
12054         if (CHIP_IS_E1(bp))
12055                 fw_file_name = FW_FILE_NAME_E1;
12056         else
12057                 fw_file_name = FW_FILE_NAME_E1H;
12058
12059         printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12060
12061         rc = request_firmware(&bp->firmware, fw_file_name, dev);
12062         if (rc) {
12063                 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12064                        fw_file_name);
12065                 goto request_firmware_exit;
12066         }
12067
12068         rc = bnx2x_check_firmware(bp);
12069         if (rc) {
12070                 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12071                 goto request_firmware_exit;
12072         }
12073
12074         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12075
12076         /* Initialize the pointers to the init arrays */
12077         /* Blob */
12078         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12079
12080         /* Opcodes */
12081         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12082
12083         /* Offsets */
12084         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12085                             be16_to_cpu_n);
12086
12087         /* STORMs firmware */
12088         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12089                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12090         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
12091                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12092         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12093                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12094         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
12095                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
12096         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12097                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12098         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
12099                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12100         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12101                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12102         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
12103                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
12104
12105         return 0;
12106
12107 init_offsets_alloc_err:
12108         kfree(bp->init_ops);
12109 init_ops_alloc_err:
12110         kfree(bp->init_data);
12111 request_firmware_exit:
12112         release_firmware(bp->firmware);
12113
12114         return rc;
12115 }
12116
12117
12118 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12119                                     const struct pci_device_id *ent)
12120 {
12121         struct net_device *dev = NULL;
12122         struct bnx2x *bp;
12123         int pcie_width, pcie_speed;
12124         int rc;
12125
12126         /* dev zeroed in init_etherdev */
12127         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12128         if (!dev) {
12129                 printk(KERN_ERR PFX "Cannot allocate net device\n");
12130                 return -ENOMEM;
12131         }
12132
12133         bp = netdev_priv(dev);
12134         bp->msglevel = debug;
12135
12136         pci_set_drvdata(pdev, dev);
12137
12138         rc = bnx2x_init_dev(pdev, dev);
12139         if (rc < 0) {
12140                 free_netdev(dev);
12141                 return rc;
12142         }
12143
12144         rc = bnx2x_init_bp(bp);
12145         if (rc)
12146                 goto init_one_exit;
12147
12148         /* Set init arrays */
12149         rc = bnx2x_init_firmware(bp, &pdev->dev);
12150         if (rc) {
12151                 printk(KERN_ERR PFX "Error loading firmware\n");
12152                 goto init_one_exit;
12153         }
12154
12155         rc = register_netdev(dev);
12156         if (rc) {
12157                 dev_err(&pdev->dev, "Cannot register net device\n");
12158                 goto init_one_exit;
12159         }
12160
12161         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12162         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
12163                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
12164                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12165                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12166                dev->base_addr, bp->pdev->irq);
12167         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
12168
12169         return 0;
12170
12171 init_one_exit:
12172         if (bp->regview)
12173                 iounmap(bp->regview);
12174
12175         if (bp->doorbells)
12176                 iounmap(bp->doorbells);
12177
12178         free_netdev(dev);
12179
12180         if (atomic_read(&pdev->enable_cnt) == 1)
12181                 pci_release_regions(pdev);
12182
12183         pci_disable_device(pdev);
12184         pci_set_drvdata(pdev, NULL);
12185
12186         return rc;
12187 }
12188
12189 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12190 {
12191         struct net_device *dev = pci_get_drvdata(pdev);
12192         struct bnx2x *bp;
12193
12194         if (!dev) {
12195                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12196                 return;
12197         }
12198         bp = netdev_priv(dev);
12199
12200         unregister_netdev(dev);
12201
12202         kfree(bp->init_ops_offsets);
12203         kfree(bp->init_ops);
12204         kfree(bp->init_data);
12205         release_firmware(bp->firmware);
12206
12207         if (bp->regview)
12208                 iounmap(bp->regview);
12209
12210         if (bp->doorbells)
12211                 iounmap(bp->doorbells);
12212
12213         free_netdev(dev);
12214
12215         if (atomic_read(&pdev->enable_cnt) == 1)
12216                 pci_release_regions(pdev);
12217
12218         pci_disable_device(pdev);
12219         pci_set_drvdata(pdev, NULL);
12220 }
12221
12222 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12223 {
12224         struct net_device *dev = pci_get_drvdata(pdev);
12225         struct bnx2x *bp;
12226
12227         if (!dev) {
12228                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12229                 return -ENODEV;
12230         }
12231         bp = netdev_priv(dev);
12232
12233         rtnl_lock();
12234
12235         pci_save_state(pdev);
12236
12237         if (!netif_running(dev)) {
12238                 rtnl_unlock();
12239                 return 0;
12240         }
12241
12242         netif_device_detach(dev);
12243
12244         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12245
12246         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12247
12248         rtnl_unlock();
12249
12250         return 0;
12251 }
12252
12253 static int bnx2x_resume(struct pci_dev *pdev)
12254 {
12255         struct net_device *dev = pci_get_drvdata(pdev);
12256         struct bnx2x *bp;
12257         int rc;
12258
12259         if (!dev) {
12260                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12261                 return -ENODEV;
12262         }
12263         bp = netdev_priv(dev);
12264
12265         rtnl_lock();
12266
12267         pci_restore_state(pdev);
12268
12269         if (!netif_running(dev)) {
12270                 rtnl_unlock();
12271                 return 0;
12272         }
12273
12274         bnx2x_set_power_state(bp, PCI_D0);
12275         netif_device_attach(dev);
12276
12277         rc = bnx2x_nic_load(bp, LOAD_OPEN);
12278
12279         rtnl_unlock();
12280
12281         return rc;
12282 }
12283
12284 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12285 {
12286         int i;
12287
12288         bp->state = BNX2X_STATE_ERROR;
12289
12290         bp->rx_mode = BNX2X_RX_MODE_NONE;
12291
12292         bnx2x_netif_stop(bp, 0);
12293
12294         del_timer_sync(&bp->timer);
12295         bp->stats_state = STATS_STATE_DISABLED;
12296         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12297
12298         /* Release IRQs */
12299         bnx2x_free_irq(bp);
12300
12301         if (CHIP_IS_E1(bp)) {
12302                 struct mac_configuration_cmd *config =
12303                                                 bnx2x_sp(bp, mcast_config);
12304
12305                 for (i = 0; i < config->hdr.length; i++)
12306                         CAM_INVALIDATE(config->config_table[i]);
12307         }
12308
12309         /* Free SKBs, SGEs, TPA pool and driver internals */
12310         bnx2x_free_skbs(bp);
12311         for_each_queue(bp, i)
12312                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12313         for_each_queue(bp, i)
12314                 netif_napi_del(&bnx2x_fp(bp, i, napi));
12315         bnx2x_free_mem(bp);
12316
12317         bp->state = BNX2X_STATE_CLOSED;
12318
12319         netif_carrier_off(bp->dev);
12320
12321         return 0;
12322 }
12323
12324 static void bnx2x_eeh_recover(struct bnx2x *bp)
12325 {
12326         u32 val;
12327
12328         mutex_init(&bp->port.phy_mutex);
12329
12330         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12331         bp->link_params.shmem_base = bp->common.shmem_base;
12332         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12333
12334         if (!bp->common.shmem_base ||
12335             (bp->common.shmem_base < 0xA0000) ||
12336             (bp->common.shmem_base >= 0xC0000)) {
12337                 BNX2X_DEV_INFO("MCP not active\n");
12338                 bp->flags |= NO_MCP_FLAG;
12339                 return;
12340         }
12341
12342         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12343         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12344                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12345                 BNX2X_ERR("BAD MCP validity signature\n");
12346
12347         if (!BP_NOMCP(bp)) {
12348                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12349                               & DRV_MSG_SEQ_NUMBER_MASK);
12350                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12351         }
12352 }
12353
12354 /**
12355  * bnx2x_io_error_detected - called when PCI error is detected
12356  * @pdev: Pointer to PCI device
12357  * @state: The current pci connection state
12358  *
12359  * This function is called after a PCI bus error affecting
12360  * this device has been detected.
12361  */
12362 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12363                                                 pci_channel_state_t state)
12364 {
12365         struct net_device *dev = pci_get_drvdata(pdev);
12366         struct bnx2x *bp = netdev_priv(dev);
12367
12368         rtnl_lock();
12369
12370         netif_device_detach(dev);
12371
12372         if (state == pci_channel_io_perm_failure) {
12373                 rtnl_unlock();
12374                 return PCI_ERS_RESULT_DISCONNECT;
12375         }
12376
12377         if (netif_running(dev))
12378                 bnx2x_eeh_nic_unload(bp);
12379
12380         pci_disable_device(pdev);
12381
12382         rtnl_unlock();
12383
12384         /* Request a slot reset */
12385         return PCI_ERS_RESULT_NEED_RESET;
12386 }
12387
12388 /**
12389  * bnx2x_io_slot_reset - called after the PCI bus has been reset
12390  * @pdev: Pointer to PCI device
12391  *
12392  * Restart the card from scratch, as if from a cold-boot.
12393  */
12394 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12395 {
12396         struct net_device *dev = pci_get_drvdata(pdev);
12397         struct bnx2x *bp = netdev_priv(dev);
12398
12399         rtnl_lock();
12400
12401         if (pci_enable_device(pdev)) {
12402                 dev_err(&pdev->dev,
12403                         "Cannot re-enable PCI device after reset\n");
12404                 rtnl_unlock();
12405                 return PCI_ERS_RESULT_DISCONNECT;
12406         }
12407
12408         pci_set_master(pdev);
12409         pci_restore_state(pdev);
12410
12411         if (netif_running(dev))
12412                 bnx2x_set_power_state(bp, PCI_D0);
12413
12414         rtnl_unlock();
12415
12416         return PCI_ERS_RESULT_RECOVERED;
12417 }
12418
12419 /**
12420  * bnx2x_io_resume - called when traffic can start flowing again
12421  * @pdev: Pointer to PCI device
12422  *
12423  * This callback is called when the error recovery driver tells us that
12424  * its OK to resume normal operation.
12425  */
12426 static void bnx2x_io_resume(struct pci_dev *pdev)
12427 {
12428         struct net_device *dev = pci_get_drvdata(pdev);
12429         struct bnx2x *bp = netdev_priv(dev);
12430
12431         rtnl_lock();
12432
12433         bnx2x_eeh_recover(bp);
12434
12435         if (netif_running(dev))
12436                 bnx2x_nic_load(bp, LOAD_NORMAL);
12437
12438         netif_device_attach(dev);
12439
12440         rtnl_unlock();
12441 }
12442
12443 static struct pci_error_handlers bnx2x_err_handler = {
12444         .error_detected = bnx2x_io_error_detected,
12445         .slot_reset     = bnx2x_io_slot_reset,
12446         .resume         = bnx2x_io_resume,
12447 };
12448
12449 static struct pci_driver bnx2x_pci_driver = {
12450         .name        = DRV_MODULE_NAME,
12451         .id_table    = bnx2x_pci_tbl,
12452         .probe       = bnx2x_init_one,
12453         .remove      = __devexit_p(bnx2x_remove_one),
12454         .suspend     = bnx2x_suspend,
12455         .resume      = bnx2x_resume,
12456         .err_handler = &bnx2x_err_handler,
12457 };
12458
12459 static int __init bnx2x_init(void)
12460 {
12461         int ret;
12462
12463         printk(KERN_INFO "%s", version);
12464
12465         bnx2x_wq = create_singlethread_workqueue("bnx2x");
12466         if (bnx2x_wq == NULL) {
12467                 printk(KERN_ERR PFX "Cannot create workqueue\n");
12468                 return -ENOMEM;
12469         }
12470
12471         ret = pci_register_driver(&bnx2x_pci_driver);
12472         if (ret) {
12473                 printk(KERN_ERR PFX "Cannot register driver\n");
12474                 destroy_workqueue(bnx2x_wq);
12475         }
12476         return ret;
12477 }
12478
12479 static void __exit bnx2x_cleanup(void)
12480 {
12481         pci_unregister_driver(&bnx2x_pci_driver);
12482
12483         destroy_workqueue(bnx2x_wq);
12484 }
12485
12486 module_init(bnx2x_init);
12487 module_exit(bnx2x_cleanup);
12488
12489 #ifdef BCM_CNIC
12490
12491 /* count denotes the number of new completions we have seen */
12492 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12493 {
12494         struct eth_spe *spe;
12495
12496 #ifdef BNX2X_STOP_ON_ERROR
12497         if (unlikely(bp->panic))
12498                 return;
12499 #endif
12500
12501         spin_lock_bh(&bp->spq_lock);
12502         bp->cnic_spq_pending -= count;
12503
12504         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12505              bp->cnic_spq_pending++) {
12506
12507                 if (!bp->cnic_kwq_pending)
12508                         break;
12509
12510                 spe = bnx2x_sp_get_next(bp);
12511                 *spe = *bp->cnic_kwq_cons;
12512
12513                 bp->cnic_kwq_pending--;
12514
12515                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12516                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12517
12518                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12519                         bp->cnic_kwq_cons = bp->cnic_kwq;
12520                 else
12521                         bp->cnic_kwq_cons++;
12522         }
12523         bnx2x_sp_prod_update(bp);
12524         spin_unlock_bh(&bp->spq_lock);
12525 }
12526
12527 static int bnx2x_cnic_sp_queue(struct net_device *dev,
12528                                struct kwqe_16 *kwqes[], u32 count)
12529 {
12530         struct bnx2x *bp = netdev_priv(dev);
12531         int i;
12532
12533 #ifdef BNX2X_STOP_ON_ERROR
12534         if (unlikely(bp->panic))
12535                 return -EIO;
12536 #endif
12537
12538         spin_lock_bh(&bp->spq_lock);
12539
12540         for (i = 0; i < count; i++) {
12541                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12542
12543                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12544                         break;
12545
12546                 *bp->cnic_kwq_prod = *spe;
12547
12548                 bp->cnic_kwq_pending++;
12549
12550                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12551                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
12552                    spe->data.mac_config_addr.hi,
12553                    spe->data.mac_config_addr.lo,
12554                    bp->cnic_kwq_pending);
12555
12556                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12557                         bp->cnic_kwq_prod = bp->cnic_kwq;
12558                 else
12559                         bp->cnic_kwq_prod++;
12560         }
12561
12562         spin_unlock_bh(&bp->spq_lock);
12563
12564         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12565                 bnx2x_cnic_sp_post(bp, 0);
12566
12567         return i;
12568 }
12569
12570 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12571 {
12572         struct cnic_ops *c_ops;
12573         int rc = 0;
12574
12575         mutex_lock(&bp->cnic_mutex);
12576         c_ops = bp->cnic_ops;
12577         if (c_ops)
12578                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12579         mutex_unlock(&bp->cnic_mutex);
12580
12581         return rc;
12582 }
12583
12584 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12585 {
12586         struct cnic_ops *c_ops;
12587         int rc = 0;
12588
12589         rcu_read_lock();
12590         c_ops = rcu_dereference(bp->cnic_ops);
12591         if (c_ops)
12592                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12593         rcu_read_unlock();
12594
12595         return rc;
12596 }
12597
12598 /*
12599  * for commands that have no data
12600  */
12601 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12602 {
12603         struct cnic_ctl_info ctl = {0};
12604
12605         ctl.cmd = cmd;
12606
12607         return bnx2x_cnic_ctl_send(bp, &ctl);
12608 }
12609
12610 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12611 {
12612         struct cnic_ctl_info ctl;
12613
12614         /* first we tell CNIC and only then we count this as a completion */
12615         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12616         ctl.data.comp.cid = cid;
12617
12618         bnx2x_cnic_ctl_send_bh(bp, &ctl);
12619         bnx2x_cnic_sp_post(bp, 1);
12620 }
12621
12622 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12623 {
12624         struct bnx2x *bp = netdev_priv(dev);
12625         int rc = 0;
12626
12627         switch (ctl->cmd) {
12628         case DRV_CTL_CTXTBL_WR_CMD: {
12629                 u32 index = ctl->data.io.offset;
12630                 dma_addr_t addr = ctl->data.io.dma_addr;
12631
12632                 bnx2x_ilt_wr(bp, index, addr);
12633                 break;
12634         }
12635
12636         case DRV_CTL_COMPLETION_CMD: {
12637                 int count = ctl->data.comp.comp_count;
12638
12639                 bnx2x_cnic_sp_post(bp, count);
12640                 break;
12641         }
12642
12643         /* rtnl_lock is held.  */
12644         case DRV_CTL_START_L2_CMD: {
12645                 u32 cli = ctl->data.ring.client_id;
12646
12647                 bp->rx_mode_cl_mask |= (1 << cli);
12648                 bnx2x_set_storm_rx_mode(bp);
12649                 break;
12650         }
12651
12652         /* rtnl_lock is held.  */
12653         case DRV_CTL_STOP_L2_CMD: {
12654                 u32 cli = ctl->data.ring.client_id;
12655
12656                 bp->rx_mode_cl_mask &= ~(1 << cli);
12657                 bnx2x_set_storm_rx_mode(bp);
12658                 break;
12659         }
12660
12661         default:
12662                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12663                 rc = -EINVAL;
12664         }
12665
12666         return rc;
12667 }
12668
12669 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12670 {
12671         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12672
12673         if (bp->flags & USING_MSIX_FLAG) {
12674                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12675                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12676                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12677         } else {
12678                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12679                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12680         }
12681         cp->irq_arr[0].status_blk = bp->cnic_sb;
12682         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12683         cp->irq_arr[1].status_blk = bp->def_status_blk;
12684         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12685
12686         cp->num_irq = 2;
12687 }
12688
12689 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12690                                void *data)
12691 {
12692         struct bnx2x *bp = netdev_priv(dev);
12693         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12694
12695         if (ops == NULL)
12696                 return -EINVAL;
12697
12698         if (atomic_read(&bp->intr_sem) != 0)
12699                 return -EBUSY;
12700
12701         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12702         if (!bp->cnic_kwq)
12703                 return -ENOMEM;
12704
12705         bp->cnic_kwq_cons = bp->cnic_kwq;
12706         bp->cnic_kwq_prod = bp->cnic_kwq;
12707         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12708
12709         bp->cnic_spq_pending = 0;
12710         bp->cnic_kwq_pending = 0;
12711
12712         bp->cnic_data = data;
12713
12714         cp->num_irq = 0;
12715         cp->drv_state = CNIC_DRV_STATE_REGD;
12716
12717         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12718
12719         bnx2x_setup_cnic_irq_info(bp);
12720         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12721         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12722         rcu_assign_pointer(bp->cnic_ops, ops);
12723
12724         return 0;
12725 }
12726
12727 static int bnx2x_unregister_cnic(struct net_device *dev)
12728 {
12729         struct bnx2x *bp = netdev_priv(dev);
12730         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12731
12732         mutex_lock(&bp->cnic_mutex);
12733         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12734                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12735                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12736         }
12737         cp->drv_state = 0;
12738         rcu_assign_pointer(bp->cnic_ops, NULL);
12739         mutex_unlock(&bp->cnic_mutex);
12740         synchronize_rcu();
12741         kfree(bp->cnic_kwq);
12742         bp->cnic_kwq = NULL;
12743
12744         return 0;
12745 }
12746
12747 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12748 {
12749         struct bnx2x *bp = netdev_priv(dev);
12750         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12751
12752         cp->drv_owner = THIS_MODULE;
12753         cp->chip_id = CHIP_ID(bp);
12754         cp->pdev = bp->pdev;
12755         cp->io_base = bp->regview;
12756         cp->io_base2 = bp->doorbells;
12757         cp->max_kwqe_pending = 8;
12758         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12759         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12760         cp->ctx_tbl_len = CNIC_ILT_LINES;
12761         cp->starting_cid = BCM_CNIC_CID_START;
12762         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12763         cp->drv_ctl = bnx2x_drv_ctl;
12764         cp->drv_register_cnic = bnx2x_register_cnic;
12765         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12766
12767         return cp;
12768 }
12769 EXPORT_SYMBOL(bnx2x_cnic_probe);
12770
12771 #endif /* BCM_CNIC */
12772