1 /* bnx2x.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Eliezer Tamir <eliezert@broadcom.com>
10 * Based on code from Michael Chan's bnx2 driver
11 * UDP CSUM errata workaround by Arik Gendelman
12 * Slowpath rework by Vladislav Zolotarov
13 * Statistics and Link management by Yitchak Gertner
17 /* define this to make the driver freeze on error
18 * to allow getting debug info
19 * (you will need to reboot afterwards)
21 /*#define BNX2X_STOP_ON_ERROR*/
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/kernel.h>
26 #include <linux/device.h> /* for dev_info() */
27 #include <linux/timer.h>
28 #include <linux/errno.h>
29 #include <linux/ioport.h>
30 #include <linux/slab.h>
31 #include <linux/vmalloc.h>
32 #include <linux/interrupt.h>
33 #include <linux/pci.h>
34 #include <linux/init.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/bitops.h>
40 #include <linux/irq.h>
41 #include <linux/delay.h>
42 #include <asm/byteorder.h>
43 #include <linux/time.h>
44 #include <linux/ethtool.h>
45 #include <linux/mii.h>
46 #ifdef NETIF_F_HW_VLAN_TX
47 #include <linux/if_vlan.h>
52 #include <net/checksum.h>
53 #include <linux/workqueue.h>
54 #include <linux/crc32.h>
55 #include <linux/prefetch.h>
56 #include <linux/zlib.h>
57 #include <linux/version.h>
60 #include "bnx2x_reg.h"
61 #include "bnx2x_fw_defs.h"
62 #include "bnx2x_hsi.h"
64 #include "bnx2x_init.h"
66 #define DRV_MODULE_VERSION "0.40.15"
67 #define DRV_MODULE_RELDATE "$DateTime: 2007/11/15 07:28:37 $"
68 #define BNX2X_BC_VER 0x040200
70 /* Time in jiffies before concluding the transmitter is hung. */
71 #define TX_TIMEOUT (5*HZ)
73 static char version[] __devinitdata =
74 "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
75 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77 MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_INFO(cvs_version, "$Revision: #404 $");
90 module_param(use_inta, int, 0);
91 module_param(poll, int, 0);
92 module_param(onefunc, int, 0);
93 module_param(debug, int, 0);
94 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
95 MODULE_PARM_DESC(poll, "use polling (for debug)");
96 MODULE_PARM_DESC(onefunc, "enable only first function");
97 MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
98 MODULE_PARM_DESC(debug, "default debug msglevel");
101 module_param(use_multi, int, 0);
102 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
105 enum bnx2x_board_type {
109 /* indexed by board_t, above */
112 } board_info[] __devinitdata = {
113 { "Broadcom NetXtreme II BCM57710 XGb" }
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
122 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
124 /****************************************************************************
125 * General service functions
126 ****************************************************************************/
129 * locking is done by mcp
131 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
133 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
134 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
136 PCICFG_VENDOR_ID_OFFSET);
140 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
145 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
147 PCICFG_VENDOR_ID_OFFSET);
153 static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171 /* DP(NETIF_MSG_DMAE, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); */
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
177 static void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
178 u32 dst_addr, u32 len32)
180 struct dmae_command *dmae = &bp->dmae;
182 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
185 memset(dmae, 0, sizeof(struct dmae_command));
187 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
188 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
189 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
191 DMAE_CMD_ENDIANITY_B_DW_SWAP |
193 DMAE_CMD_ENDIANITY_DW_SWAP |
195 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
196 dmae->src_addr_lo = U64_LO(dma_addr);
197 dmae->src_addr_hi = U64_HI(dma_addr);
198 dmae->dst_addr_lo = dst_addr >> 2;
199 dmae->dst_addr_hi = 0;
201 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
202 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
203 dmae->comp_val = BNX2X_WB_COMP_VAL;
206 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
207 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
208 "dst_addr [%x:%08x (%08x)]\n"
209 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
210 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
211 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
212 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
215 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
216 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
217 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
222 bnx2x_post_dmae(bp, dmae, port * 8);
225 /* adjust timeout for emulation/FPGA */
226 if (CHIP_REV_IS_SLOW(bp))
228 while (*wb_comp != BNX2X_WB_COMP_VAL) {
229 /* DP(NETIF_MSG_DMAE, "wb_comp 0x%08x\n", *wb_comp); */
232 BNX2X_ERR("dmae timeout!\n");
240 static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
242 struct dmae_command *dmae = &bp->dmae;
244 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
247 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
248 memset(dmae, 0, sizeof(struct dmae_command));
250 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
251 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
252 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
254 DMAE_CMD_ENDIANITY_B_DW_SWAP |
256 DMAE_CMD_ENDIANITY_DW_SWAP |
258 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
259 dmae->src_addr_lo = src_addr >> 2;
260 dmae->src_addr_hi = 0;
261 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
262 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
264 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
265 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
266 dmae->comp_val = BNX2X_WB_COMP_VAL;
269 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
270 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
271 "dst_addr [%x:%08x (%08x)]\n"
272 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
273 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
274 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
275 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
280 bnx2x_post_dmae(bp, dmae, port * 8);
283 while (*wb_comp != BNX2X_WB_COMP_VAL) {
286 BNX2X_ERR("dmae timeout!\n");
292 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
293 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
294 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
299 static int bnx2x_mc_assert(struct bnx2x *bp)
303 const char storm[] = {"XTCU"};
304 const u32 intmem_base[] = {
311 /* Go through all instances of all SEMIs */
312 for (i = 0; i < 4; i++) {
313 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
316 BNX2X_LOG("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
319 /* print the asserts */
320 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
321 u32 row0, row1, row2, row3;
323 row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) +
325 row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 +
327 row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 +
329 row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 +
332 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
333 BNX2X_LOG("DATA %cSTORM_ASSERT_INDEX 0x%x ="
334 " 0x%08x 0x%08x 0x%08x 0x%08x\n",
335 storm[i], j, row3, row2, row1, row0);
345 static void bnx2x_fw_dump(struct bnx2x *bp)
351 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
352 mark = ((mark + 0x3) & ~0x3);
353 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
355 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
356 for (word = 0; word < 8; word++)
357 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
360 printk(KERN_CONT "%s", (char *)data);
362 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
363 for (word = 0; word < 8; word++)
364 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
367 printk(KERN_CONT "%s", (char *)data);
369 printk("\n" KERN_ERR PFX "end of fw dump\n");
372 static void bnx2x_panic_dump(struct bnx2x *bp)
377 BNX2X_ERR("begin crash dump -----------------\n");
379 for_each_queue(bp, i) {
380 struct bnx2x_fastpath *fp = &bp->fp[i];
381 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
383 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
384 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)"
385 " *rx_cons_sb(%x) rx_comp_prod(%x)"
386 " rx_comp_cons(%x) fp_c_idx(%x) fp_u_idx(%x)"
388 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
389 fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
390 fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
391 fp->fp_u_idx, hw_prods->packets_prod,
394 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
395 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
396 for (j = start; j < end; j++) {
397 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
399 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
400 sw_bd->skb, sw_bd->first_bd);
403 start = TX_BD(fp->tx_bd_cons - 10);
404 end = TX_BD(fp->tx_bd_cons + 254);
405 for (j = start; j < end; j++) {
406 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
408 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
409 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
412 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
413 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
414 for (j = start; j < end; j++) {
415 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
416 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
418 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
419 j, rx_bd[0], rx_bd[1], sw_bd->skb);
422 start = RCQ_BD(fp->rx_comp_cons - 10);
423 end = RCQ_BD(fp->rx_comp_cons + 503);
424 for (j = start; j < end; j++) {
425 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
427 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
428 j, cqe[0], cqe[1], cqe[2], cqe[3]);
432 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
433 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
434 " spq_prod_idx(%u)\n",
435 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
436 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
440 BNX2X_ERR("end crash dump -----------------\n");
442 bp->stats_state = STATS_STATE_DISABLE;
443 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
446 static void bnx2x_enable_int(struct bnx2x *bp)
449 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
450 u32 val = REG_RD(bp, addr);
451 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
454 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
455 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
456 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
458 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
459 HC_CONFIG_0_REG_INT_LINE_EN_0 |
460 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
461 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
464 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) msi %d\n",
465 val, port, addr, msix);
467 REG_WR(bp, addr, val);
470 static void bnx2x_disable_int(struct bnx2x *bp)
473 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
474 u32 val = REG_RD(bp, addr);
476 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
477 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
478 HC_CONFIG_0_REG_INT_LINE_EN_0 |
479 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
481 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
484 REG_WR(bp, addr, val);
485 if (REG_RD(bp, addr) != val)
486 BNX2X_ERR("BUG! proper val not read from IGU!\n");
489 static void bnx2x_disable_int_sync(struct bnx2x *bp)
492 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
495 atomic_inc(&bp->intr_sem);
496 /* prevent the HW from sending interrupts */
497 bnx2x_disable_int(bp);
499 /* make sure all ISRs are done */
501 for_each_queue(bp, i)
502 synchronize_irq(bp->msix_table[i].vector);
504 /* one more for the Slow Path IRQ */
505 synchronize_irq(bp->msix_table[i].vector);
507 synchronize_irq(bp->pdev->irq);
509 /* make sure sp_task is not running */
510 cancel_work_sync(&bp->sp_task);
517 * general service functions
520 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
521 u8 storm, u16 index, u8 op, u8 update)
523 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8;
524 struct igu_ack_register igu_ack;
526 igu_ack.status_block_index = index;
527 igu_ack.sb_id_and_flags =
528 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
529 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
530 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
531 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
533 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
534 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
535 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
538 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
540 struct host_status_block *fpsb = fp->status_blk;
543 barrier(); /* status block is written to by the chip */
544 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
545 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
548 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
549 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
555 static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
557 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
559 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
562 if ((rx_cons_sb != fp->rx_comp_cons) ||
563 (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons))
569 static u16 bnx2x_ack_int(struct bnx2x *bp)
571 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8;
572 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
574 /* DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
575 result, BAR_IGU_INTMEM + igu_addr); */
578 #warning IGU_DEBUG active
580 BNX2X_ERR("read %x from IGU\n", result);
581 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
589 * fast path service functions
592 /* free skb in the packet ring at pos idx
593 * return idx of last bd freed
595 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
598 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
599 struct eth_tx_bd *tx_bd;
600 struct sk_buff *skb = tx_buf->skb;
601 u16 bd_idx = tx_buf->first_bd;
604 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
608 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
609 tx_bd = &fp->tx_desc_ring[bd_idx];
610 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
611 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
613 nbd = le16_to_cpu(tx_bd->nbd) - 1;
614 #ifdef BNX2X_STOP_ON_ERROR
615 if (nbd > (MAX_SKB_FRAGS + 2)) {
616 BNX2X_ERR("bad nbd!\n");
621 /* Skip a parse bd and the TSO split header bd
622 since they have no mapping */
624 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
626 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
627 ETH_TX_BD_FLAGS_TCP_CSUM |
628 ETH_TX_BD_FLAGS_SW_LSO)) {
630 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
631 tx_bd = &fp->tx_desc_ring[bd_idx];
632 /* is this a TSO split header bd? */
633 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
635 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
642 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
643 tx_bd = &fp->tx_desc_ring[bd_idx];
644 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
645 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
647 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
653 tx_buf->first_bd = 0;
659 static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
665 /* Tell compiler that prod and cons can change */
667 prod = fp->tx_bd_prod;
668 cons = fp->tx_bd_cons;
670 used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
671 (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
674 /* used = prod - cons - prod/size + cons/size */
675 used -= NUM_TX_BD - NUM_TX_RINGS;
678 BUG_TRAP(used <= fp->bp->tx_ring_size);
679 BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
681 return (fp->bp->tx_ring_size - used);
684 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
686 struct bnx2x *bp = fp->bp;
687 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
690 #ifdef BNX2X_STOP_ON_ERROR
691 if (unlikely(bp->panic))
695 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
696 sw_cons = fp->tx_pkt_cons;
698 while (sw_cons != hw_cons) {
701 pkt_cons = TX_BD(sw_cons);
703 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
705 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %d\n",
706 hw_cons, sw_cons, pkt_cons);
708 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
710 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
713 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
721 fp->tx_pkt_cons = sw_cons;
722 fp->tx_bd_cons = bd_cons;
724 /* Need to make the tx_cons update visible to start_xmit()
725 * before checking for netif_queue_stopped(). Without the
726 * memory barrier, there is a small possibility that start_xmit()
727 * will miss it and cause the queue to be stopped forever.
731 /* TBD need a thresh? */
732 if (unlikely(netif_queue_stopped(bp->dev))) {
734 netif_tx_lock(bp->dev);
736 if (netif_queue_stopped(bp->dev) &&
737 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
738 netif_wake_queue(bp->dev);
740 netif_tx_unlock(bp->dev);
745 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
746 union eth_rx_cqe *rr_cqe)
748 struct bnx2x *bp = fp->bp;
749 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
750 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
752 DP(NETIF_MSG_RX_STATUS,
753 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
754 fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
759 switch (command | fp->state) {
760 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
761 BNX2X_FP_STATE_OPENING):
762 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
764 fp->state = BNX2X_FP_STATE_OPEN;
767 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
768 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
770 fp->state = BNX2X_FP_STATE_HALTED;
774 BNX2X_ERR("unexpected MC reply(%d) state is %x\n",
777 mb(); /* force bnx2x_wait_ramrod to see the change */
781 switch (command | bp->state) {
782 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
783 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
784 bp->state = BNX2X_STATE_OPEN;
787 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
788 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
789 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
790 fp->state = BNX2X_FP_STATE_HALTED;
793 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
794 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n",
796 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
799 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
800 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
803 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
804 DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n");
808 BNX2X_ERR("unexpected ramrod (%d) state is %x\n",
812 mb(); /* force bnx2x_wait_ramrod to see the change */
815 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
816 struct bnx2x_fastpath *fp, u16 index)
819 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
820 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
823 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
824 if (unlikely(skb == NULL))
827 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
829 if (unlikely(dma_mapping_error(mapping))) {
836 pci_unmap_addr_set(rx_buf, mapping, mapping);
838 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
839 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
844 /* note that we are not allocating a new skb,
845 * we are just moving one from cons to prod
846 * we are not creating a new mapping,
847 * so there is no need to check for dma_mapping_error().
849 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
850 struct sk_buff *skb, u16 cons, u16 prod)
852 struct bnx2x *bp = fp->bp;
853 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
854 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
855 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
856 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
858 pci_dma_sync_single_for_device(bp->pdev,
859 pci_unmap_addr(cons_rx_buf, mapping),
860 bp->rx_offset + RX_COPY_THRESH,
863 prod_rx_buf->skb = cons_rx_buf->skb;
864 pci_unmap_addr_set(prod_rx_buf, mapping,
865 pci_unmap_addr(cons_rx_buf, mapping));
869 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
871 struct bnx2x *bp = fp->bp;
872 u16 bd_cons, bd_prod, comp_ring_cons;
873 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
876 #ifdef BNX2X_STOP_ON_ERROR
877 if (unlikely(bp->panic))
881 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
882 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
885 bd_cons = fp->rx_bd_cons;
886 bd_prod = fp->rx_bd_prod;
887 sw_comp_cons = fp->rx_comp_cons;
888 sw_comp_prod = fp->rx_comp_prod;
890 /* Memory barrier necessary as speculative reads of the rx
891 * buffer can be ahead of the index in the status block
895 DP(NETIF_MSG_RX_STATUS,
896 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
897 fp->index, hw_comp_cons, sw_comp_cons);
899 while (sw_comp_cons != hw_comp_cons) {
900 unsigned int len, pad;
901 struct sw_rx_bd *rx_buf;
903 union eth_rx_cqe *cqe;
905 comp_ring_cons = RCQ_BD(sw_comp_cons);
906 bd_prod = RX_BD(bd_prod);
907 bd_cons = RX_BD(bd_cons);
909 cqe = &fp->rx_comp_ring[comp_ring_cons];
911 DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u sw_comp_cons %u"
912 " comp_ring (%u) bd_ring (%u,%u)\n",
913 hw_comp_cons, sw_comp_cons,
914 comp_ring_cons, bd_prod, bd_cons);
915 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
916 " queue %x vlan %x len %x\n",
917 cqe->fast_path_cqe.type,
918 cqe->fast_path_cqe.error_type_flags,
919 cqe->fast_path_cqe.status_flags,
920 cqe->fast_path_cqe.rss_hash_result,
921 cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len);
923 /* is this a slowpath msg? */
924 if (unlikely(cqe->fast_path_cqe.type)) {
925 bnx2x_sp_event(fp, cqe);
928 /* this is an rx packet */
930 rx_buf = &fp->rx_buf_ring[bd_cons];
933 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
934 pad = cqe->fast_path_cqe.placement_offset;
936 pci_dma_sync_single_for_device(bp->pdev,
937 pci_unmap_addr(rx_buf, mapping),
938 pad + RX_COPY_THRESH,
941 prefetch(((char *)(skb)) + 128);
943 /* is this an error packet? */
944 if (unlikely(cqe->fast_path_cqe.error_type_flags &
945 ETH_RX_ERROR_FALGS)) {
946 /* do we sometimes forward error packets anyway? */
948 "ERROR flags(%u) Rx packet(%u)\n",
949 cqe->fast_path_cqe.error_type_flags,
951 /* TBD make sure MC counts this as a drop */
955 /* Since we don't have a jumbo ring
956 * copy small packets if mtu > 1500
958 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
959 (len <= RX_COPY_THRESH)) {
960 struct sk_buff *new_skb;
962 new_skb = netdev_alloc_skb(bp->dev,
964 if (new_skb == NULL) {
966 "ERROR packet dropped "
967 "because of alloc failure\n");
968 /* TBD count this as a drop? */
973 skb_copy_from_linear_data_offset(skb, pad,
974 new_skb->data + pad, len);
975 skb_reserve(new_skb, pad);
976 skb_put(new_skb, len);
978 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
982 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
983 pci_unmap_single(bp->pdev,
984 pci_unmap_addr(rx_buf, mapping),
987 skb_reserve(skb, pad);
992 "ERROR packet dropped because "
993 "of alloc failure\n");
995 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
999 skb->protocol = eth_type_trans(skb, bp->dev);
1001 skb->ip_summed = CHECKSUM_NONE;
1002 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1003 skb->ip_summed = CHECKSUM_UNNECESSARY;
1005 /* TBD do we pass bad csum packets in promisc */
1009 if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags)
1010 & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
1011 && (bp->vlgrp != NULL))
1012 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1013 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1016 netif_receive_skb(skb);
1018 bp->dev->last_rx = jiffies;
1023 bd_cons = NEXT_RX_IDX(bd_cons);
1024 bd_prod = NEXT_RX_IDX(bd_prod);
1026 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1027 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1030 if ((rx_pkt == budget))
1034 fp->rx_bd_cons = bd_cons;
1035 fp->rx_bd_prod = bd_prod;
1036 fp->rx_comp_cons = sw_comp_cons;
1037 fp->rx_comp_prod = sw_comp_prod;
1039 REG_WR(bp, BAR_TSTRORM_INTMEM +
1040 TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
1042 mmiowb(); /* keep prod updates ordered */
1044 fp->rx_pkt += rx_pkt;
1050 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1052 struct bnx2x_fastpath *fp = fp_cookie;
1053 struct bnx2x *bp = fp->bp;
1054 struct net_device *dev = bp->dev;
1055 int index = fp->index;
1057 DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index);
1058 bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1060 #ifdef BNX2X_STOP_ON_ERROR
1061 if (unlikely(bp->panic))
1065 prefetch(fp->rx_cons_sb);
1066 prefetch(fp->tx_cons_sb);
1067 prefetch(&fp->status_blk->c_status_block.status_block_index);
1068 prefetch(&fp->status_blk->u_status_block.status_block_index);
1070 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1074 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1076 struct net_device *dev = dev_instance;
1077 struct bnx2x *bp = netdev_priv(dev);
1078 u16 status = bnx2x_ack_int(bp);
1080 if (unlikely(status == 0)) {
1081 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1085 DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
1087 #ifdef BNX2X_STOP_ON_ERROR
1088 if (unlikely(bp->panic))
1092 /* Return here if interrupt is shared and is disabled */
1093 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1094 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1099 struct bnx2x_fastpath *fp = &bp->fp[0];
1101 prefetch(fp->rx_cons_sb);
1102 prefetch(fp->tx_cons_sb);
1103 prefetch(&fp->status_blk->c_status_block.status_block_index);
1104 prefetch(&fp->status_blk->u_status_block.status_block_index);
1106 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1113 if (unlikely(status & 0x1)) {
1115 schedule_work(&bp->sp_task);
1122 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
1128 /* end of fast path */
1133 * General service functions
1136 static void bnx2x_leds_set(struct bnx2x *bp, unsigned int speed)
1138 int port = bp->port;
1140 NIG_WR(NIG_REG_LED_MODE_P0 + port*4,
1141 ((bp->hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
1142 SHARED_HW_CFG_LED_MODE_SHIFT));
1143 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
1145 /* Set blinking rate to ~15.9Hz */
1146 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
1147 LED_BLINK_RATE_VAL);
1148 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port*4, 1);
1150 /* On Ax chip versions for speeds less than 10G
1151 LED scheme is different */
1152 if ((CHIP_REV(bp) == CHIP_REV_Ax) && (speed < SPEED_10000)) {
1153 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 1);
1154 NIG_WR(NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4, 0);
1155 NIG_WR(NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + port*4, 1);
1159 static void bnx2x_leds_unset(struct bnx2x *bp)
1161 int port = bp->port;
1163 NIG_WR(NIG_REG_LED_10G_P0 + port*4, 0);
1164 NIG_WR(NIG_REG_LED_MODE_P0 + port*4, SHARED_HW_CFG_LED_MAC1);
1167 static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
1169 u32 val = REG_RD(bp, reg);
1172 REG_WR(bp, reg, val);
1176 static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
1178 u32 val = REG_RD(bp, reg);
1181 REG_WR(bp, reg, val);
1185 static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1189 u32 resource_bit = (1 << resource);
1192 /* Validating that the resource is within range */
1193 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1195 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1196 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1200 /* Validating that the resource is not already taken */
1201 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1202 if (lock_status & resource_bit) {
1203 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1204 lock_status, resource_bit);
1208 /* Try for 1 second every 5ms */
1209 for (cnt = 0; cnt < 200; cnt++) {
1210 /* Try to acquire the lock */
1211 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8 + 4,
1213 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1214 if (lock_status & resource_bit)
1219 DP(NETIF_MSG_HW, "Timeout\n");
1223 static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1226 u32 resource_bit = (1 << resource);
1229 /* Validating that the resource is within range */
1230 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1232 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1233 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1237 /* Validating that the resource is currently taken */
1238 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1239 if (!(lock_status & resource_bit)) {
1240 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1241 lock_status, resource_bit);
1245 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8, resource_bit);
1249 static int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1251 /* The GPIO should be swapped if swap register is set and active */
1252 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1253 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port;
1254 int gpio_shift = gpio_num +
1255 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1256 u32 gpio_mask = (1 << gpio_shift);
1259 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1260 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1264 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1265 /* read GPIO and mask except the float bits */
1266 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1269 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1270 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1271 gpio_num, gpio_shift);
1272 /* clear FLOAT and set CLR */
1273 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1274 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1277 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1278 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1279 gpio_num, gpio_shift);
1280 /* clear FLOAT and set SET */
1281 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1282 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1285 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1286 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1287 gpio_num, gpio_shift);
1289 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1296 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1297 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1302 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1304 u32 spio_mask = (1 << spio_num);
1307 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1308 (spio_num > MISC_REGISTERS_SPIO_7)) {
1309 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1313 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1314 /* read SPIO and mask except the float bits */
1315 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1318 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1319 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1320 /* clear FLOAT and set CLR */
1321 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1322 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1325 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1326 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1327 /* clear FLOAT and set SET */
1328 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1329 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1332 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1333 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1335 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1342 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1343 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1348 static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
1350 int port = bp->port;
1351 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1355 /* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x val 0x%08x\n",
1356 bp->phy_addr, reg, val); */
1358 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1360 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1361 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1362 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1363 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1367 tmp = ((bp->phy_addr << 21) | (reg << 16) |
1368 (val & EMAC_MDIO_COMM_DATA) |
1369 EMAC_MDIO_COMM_COMMAND_WRITE_22 |
1370 EMAC_MDIO_COMM_START_BUSY);
1371 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1373 for (i = 0; i < 50; i++) {
1376 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1377 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1383 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1384 BNX2X_ERR("write phy register failed\n");
1391 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1393 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1394 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1395 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1401 static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
1403 int port = bp->port;
1404 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1408 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1410 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1411 val &= ~EMAC_MDIO_MODE_AUTO_POLL;
1412 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1413 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1417 val = ((bp->phy_addr << 21) | (reg << 16) |
1418 EMAC_MDIO_COMM_COMMAND_READ_22 |
1419 EMAC_MDIO_COMM_START_BUSY);
1420 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1422 for (i = 0; i < 50; i++) {
1425 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1426 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1427 val &= EMAC_MDIO_COMM_DATA;
1432 if (val & EMAC_MDIO_COMM_START_BUSY) {
1433 BNX2X_ERR("read phy register failed\n");
1442 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1444 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1445 val |= EMAC_MDIO_MODE_AUTO_POLL;
1446 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1449 /* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x ret_val 0x%08x\n",
1450 bp->phy_addr, reg, *ret_val); */
1455 static int bnx2x_mdio45_ctrl_write(struct bnx2x *bp, u32 mdio_ctrl,
1456 u32 phy_addr, u32 reg, u32 addr, u32 val)
1461 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1462 * (a value of 49==0x31) and make sure that the AUTO poll is off
1464 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1465 tmp &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1466 tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
1467 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1468 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1469 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1473 tmp = ((phy_addr << 21) | (reg << 16) | addr |
1474 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1475 EMAC_MDIO_COMM_START_BUSY);
1476 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
1478 for (i = 0; i < 50; i++) {
1481 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1482 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1487 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1488 BNX2X_ERR("write phy register failed\n");
1494 tmp = ((phy_addr << 21) | (reg << 16) | val |
1495 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
1496 EMAC_MDIO_COMM_START_BUSY);
1497 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
1499 for (i = 0; i < 50; i++) {
1502 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1503 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1509 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1510 BNX2X_ERR("write phy register failed\n");
1516 /* unset clause 45 mode, set the MDIO clock to a faster value
1517 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1519 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1520 tmp &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1521 tmp |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1522 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
1523 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1524 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1529 static int bnx2x_mdio45_write(struct bnx2x *bp, u32 phy_addr, u32 reg,
1532 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1534 return bnx2x_mdio45_ctrl_write(bp, emac_base, phy_addr,
1538 static int bnx2x_mdio45_ctrl_read(struct bnx2x *bp, u32 mdio_ctrl,
1539 u32 phy_addr, u32 reg, u32 addr,
1545 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1546 * (a value of 49==0x31) and make sure that the AUTO poll is off
1548 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1549 val &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1550 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
1551 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1552 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1553 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1557 val = ((phy_addr << 21) | (reg << 16) | addr |
1558 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1559 EMAC_MDIO_COMM_START_BUSY);
1560 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
1562 for (i = 0; i < 50; i++) {
1565 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1566 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1571 if (val & EMAC_MDIO_COMM_START_BUSY) {
1572 BNX2X_ERR("read phy register failed\n");
1579 val = ((phy_addr << 21) | (reg << 16) |
1580 EMAC_MDIO_COMM_COMMAND_READ_45 |
1581 EMAC_MDIO_COMM_START_BUSY);
1582 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
1584 for (i = 0; i < 50; i++) {
1587 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1588 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1589 val &= EMAC_MDIO_COMM_DATA;
1594 if (val & EMAC_MDIO_COMM_START_BUSY) {
1595 BNX2X_ERR("read phy register failed\n");
1604 /* unset clause 45 mode, set the MDIO clock to a faster value
1605 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1607 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1608 val &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1609 val |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1610 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
1611 val |= EMAC_MDIO_MODE_AUTO_POLL;
1612 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1617 static int bnx2x_mdio45_read(struct bnx2x *bp, u32 phy_addr, u32 reg,
1618 u32 addr, u32 *ret_val)
1620 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1622 return bnx2x_mdio45_ctrl_read(bp, emac_base, phy_addr,
1623 reg, addr, ret_val);
1626 static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 phy_addr, u32 reg,
1633 for (i = 0; i < 10; i++) {
1634 bnx2x_mdio45_write(bp, phy_addr, reg, addr, val);
1636 bnx2x_mdio45_read(bp, phy_addr, reg, addr, &rd_val);
1637 /* if the read value is not the same as the value we wrote,
1638 we should write it again */
1642 BNX2X_ERR("MDIO write in CL45 failed\n");
1650 static void bnx2x_pause_resolve(struct bnx2x *bp, u32 pause_result)
1652 switch (pause_result) { /* ASYM P ASYM P */
1653 case 0xb: /* 1 0 1 1 */
1654 bp->flow_ctrl = FLOW_CTRL_TX;
1657 case 0xe: /* 1 1 1 0 */
1658 bp->flow_ctrl = FLOW_CTRL_RX;
1661 case 0x5: /* 0 1 0 1 */
1662 case 0x7: /* 0 1 1 1 */
1663 case 0xd: /* 1 1 0 1 */
1664 case 0xf: /* 1 1 1 1 */
1665 bp->flow_ctrl = FLOW_CTRL_BOTH;
1673 static u8 bnx2x_ext_phy_resove_fc(struct bnx2x *bp)
1676 u32 ld_pause; /* local */
1677 u32 lp_pause; /* link partner */
1678 u32 an_complete; /* AN complete */
1682 ext_phy_addr = ((bp->ext_phy_config &
1683 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1684 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1687 bnx2x_mdio45_read(bp, ext_phy_addr,
1688 EXT_PHY_KR_AUTO_NEG_DEVAD,
1689 EXT_PHY_KR_STATUS, &an_complete);
1690 bnx2x_mdio45_read(bp, ext_phy_addr,
1691 EXT_PHY_KR_AUTO_NEG_DEVAD,
1692 EXT_PHY_KR_STATUS, &an_complete);
1694 if (an_complete & EXT_PHY_KR_AUTO_NEG_COMPLETE) {
1696 bnx2x_mdio45_read(bp, ext_phy_addr,
1697 EXT_PHY_KR_AUTO_NEG_DEVAD,
1698 EXT_PHY_KR_AUTO_NEG_ADVERT, &ld_pause);
1699 bnx2x_mdio45_read(bp, ext_phy_addr,
1700 EXT_PHY_KR_AUTO_NEG_DEVAD,
1701 EXT_PHY_KR_LP_AUTO_NEG, &lp_pause);
1702 pause_result = (ld_pause &
1703 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 8;
1704 pause_result |= (lp_pause &
1705 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 10;
1706 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
1708 bnx2x_pause_resolve(bp, pause_result);
1713 static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
1715 u32 ld_pause; /* local driver */
1716 u32 lp_pause; /* link partner */
1721 /* resolve from gp_status in case of AN complete and not sgmii */
1722 if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
1723 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1724 (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
1725 (XGXS_EXT_PHY_TYPE(bp) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1727 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
1728 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1730 bnx2x_mdio22_read(bp,
1731 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1733 pause_result = (ld_pause &
1734 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1735 pause_result |= (lp_pause &
1736 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1737 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
1738 bnx2x_pause_resolve(bp, pause_result);
1739 } else if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) ||
1740 !(bnx2x_ext_phy_resove_fc(bp))) {
1742 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
1743 switch (bp->req_flow_ctrl) {
1744 case FLOW_CTRL_AUTO:
1745 if (bp->dev->mtu <= 4500)
1746 bp->flow_ctrl = FLOW_CTRL_BOTH;
1748 bp->flow_ctrl = FLOW_CTRL_TX;
1752 bp->flow_ctrl = FLOW_CTRL_TX;
1756 if (bp->dev->mtu <= 4500)
1757 bp->flow_ctrl = FLOW_CTRL_RX;
1760 case FLOW_CTRL_BOTH:
1761 if (bp->dev->mtu <= 4500)
1762 bp->flow_ctrl = FLOW_CTRL_BOTH;
1764 bp->flow_ctrl = FLOW_CTRL_TX;
1767 case FLOW_CTRL_NONE:
1771 } else { /* forced mode */
1772 switch (bp->req_flow_ctrl) {
1773 case FLOW_CTRL_AUTO:
1774 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
1775 " req_autoneg 0x%x\n",
1776 bp->req_flow_ctrl, bp->req_autoneg);
1781 case FLOW_CTRL_BOTH:
1782 bp->flow_ctrl = bp->req_flow_ctrl;
1785 case FLOW_CTRL_NONE:
1791 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
1794 static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
1796 bp->link_status = 0;
1798 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
1799 DP(NETIF_MSG_LINK, "phy link up\n");
1801 bp->phy_link_up = 1;
1802 bp->link_status |= LINK_STATUS_LINK_UP;
1804 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
1805 bp->duplex = DUPLEX_FULL;
1807 bp->duplex = DUPLEX_HALF;
1809 bnx2x_flow_ctrl_resolve(bp, gp_status);
1811 switch (gp_status & GP_STATUS_SPEED_MASK) {
1813 bp->line_speed = SPEED_10;
1814 if (bp->duplex == DUPLEX_FULL)
1815 bp->link_status |= LINK_10TFD;
1817 bp->link_status |= LINK_10THD;
1820 case GP_STATUS_100M:
1821 bp->line_speed = SPEED_100;
1822 if (bp->duplex == DUPLEX_FULL)
1823 bp->link_status |= LINK_100TXFD;
1825 bp->link_status |= LINK_100TXHD;
1829 case GP_STATUS_1G_KX:
1830 bp->line_speed = SPEED_1000;
1831 if (bp->duplex == DUPLEX_FULL)
1832 bp->link_status |= LINK_1000TFD;
1834 bp->link_status |= LINK_1000THD;
1837 case GP_STATUS_2_5G:
1838 bp->line_speed = SPEED_2500;
1839 if (bp->duplex == DUPLEX_FULL)
1840 bp->link_status |= LINK_2500TFD;
1842 bp->link_status |= LINK_2500THD;
1847 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1851 case GP_STATUS_10G_KX4:
1852 case GP_STATUS_10G_HIG:
1853 case GP_STATUS_10G_CX4:
1854 bp->line_speed = SPEED_10000;
1855 bp->link_status |= LINK_10GTFD;
1858 case GP_STATUS_12G_HIG:
1859 bp->line_speed = SPEED_12000;
1860 bp->link_status |= LINK_12GTFD;
1863 case GP_STATUS_12_5G:
1864 bp->line_speed = SPEED_12500;
1865 bp->link_status |= LINK_12_5GTFD;
1869 bp->line_speed = SPEED_13000;
1870 bp->link_status |= LINK_13GTFD;
1874 bp->line_speed = SPEED_15000;
1875 bp->link_status |= LINK_15GTFD;
1879 bp->line_speed = SPEED_16000;
1880 bp->link_status |= LINK_16GTFD;
1884 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1889 bp->link_status |= LINK_STATUS_SERDES_LINK;
1891 if (bp->req_autoneg & AUTONEG_SPEED) {
1892 bp->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
1894 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
1896 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1898 if (bp->autoneg & AUTONEG_PARALLEL)
1900 LINK_STATUS_PARALLEL_DETECTION_USED;
1903 if (bp->flow_ctrl & FLOW_CTRL_TX)
1904 bp->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1906 if (bp->flow_ctrl & FLOW_CTRL_RX)
1907 bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1909 } else { /* link_down */
1910 DP(NETIF_MSG_LINK, "phy link down\n");
1912 bp->phy_link_up = 0;
1915 bp->duplex = DUPLEX_FULL;
1919 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %d\n"
1920 DP_LEVEL " line_speed %d duplex %d flow_ctrl 0x%x"
1921 " link_status 0x%x\n",
1922 gp_status, bp->phy_link_up, bp->line_speed, bp->duplex,
1923 bp->flow_ctrl, bp->link_status);
1926 static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
1928 int port = bp->port;
1930 /* first reset all status
1931 * we assume only one line will be change at a time */
1932 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1933 (NIG_STATUS_XGXS0_LINK10G |
1934 NIG_STATUS_XGXS0_LINK_STATUS |
1935 NIG_STATUS_SERDES0_LINK_STATUS));
1936 if (bp->phy_link_up) {
1938 /* Disable the 10G link interrupt
1939 * by writing 1 to the status register
1941 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
1943 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1944 NIG_STATUS_XGXS0_LINK10G);
1946 } else if (bp->phy_flags & PHY_XGXS_FLAG) {
1947 /* Disable the link interrupt
1948 * by writing 1 to the relevant lane
1949 * in the status register
1951 DP(NETIF_MSG_LINK, "1G XGXS phy link up\n");
1953 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1954 ((1 << bp->ser_lane) <<
1955 NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
1957 } else { /* SerDes */
1958 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
1959 /* Disable the link interrupt
1960 * by writing 1 to the status register
1963 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1964 NIG_STATUS_SERDES0_LINK_STATUS);
1967 } else { /* link_down */
1971 static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
1976 u32 rx_sd, pcs_status;
1978 if (bp->phy_flags & PHY_XGXS_FLAG) {
1979 ext_phy_addr = ((bp->ext_phy_config &
1980 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1981 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1983 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
1984 switch (ext_phy_type) {
1985 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
1986 DP(NETIF_MSG_LINK, "XGXS Direct\n");
1990 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
1991 DP(NETIF_MSG_LINK, "XGXS 8705\n");
1992 bnx2x_mdio45_read(bp, ext_phy_addr,
1993 EXT_PHY_OPT_WIS_DEVAD,
1994 EXT_PHY_OPT_LASI_STATUS, &val1);
1995 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
1997 bnx2x_mdio45_read(bp, ext_phy_addr,
1998 EXT_PHY_OPT_WIS_DEVAD,
1999 EXT_PHY_OPT_LASI_STATUS, &val1);
2000 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
2002 bnx2x_mdio45_read(bp, ext_phy_addr,
2003 EXT_PHY_OPT_PMA_PMD_DEVAD,
2004 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
2005 DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
2006 val1 = (rx_sd & 0x1);
2009 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2010 DP(NETIF_MSG_LINK, "XGXS 8706\n");
2011 bnx2x_mdio45_read(bp, ext_phy_addr,
2012 EXT_PHY_OPT_PMA_PMD_DEVAD,
2013 EXT_PHY_OPT_LASI_STATUS, &val1);
2014 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2016 bnx2x_mdio45_read(bp, ext_phy_addr,
2017 EXT_PHY_OPT_PMA_PMD_DEVAD,
2018 EXT_PHY_OPT_LASI_STATUS, &val1);
2019 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2021 bnx2x_mdio45_read(bp, ext_phy_addr,
2022 EXT_PHY_OPT_PMA_PMD_DEVAD,
2023 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
2024 bnx2x_mdio45_read(bp, ext_phy_addr,
2025 EXT_PHY_OPT_PCS_DEVAD,
2026 EXT_PHY_OPT_PCS_STATUS, &pcs_status);
2027 bnx2x_mdio45_read(bp, ext_phy_addr,
2028 EXT_PHY_AUTO_NEG_DEVAD,
2029 EXT_PHY_OPT_AN_LINK_STATUS, &val2);
2031 DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
2032 " pcs_status 0x%x 1Gbps link_status 0x%x 0x%x\n",
2033 rx_sd, pcs_status, val2, (val2 & (1<<1)));
2034 /* link is up if both bit 0 of pmd_rx_sd and
2035 * bit 0 of pcs_status are set, or if the autoneg bit
2038 val1 = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
2041 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2042 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2044 /* clear the interrupt LASI status register */
2045 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2047 EXT_PHY_KR_PCS_DEVAD,
2048 EXT_PHY_KR_LASI_STATUS, &val2);
2049 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2051 EXT_PHY_KR_PCS_DEVAD,
2052 EXT_PHY_KR_LASI_STATUS, &val1);
2053 DP(NETIF_MSG_LINK, "KR LASI status 0x%x->0x%x\n",
2055 /* Check the LASI */
2056 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2058 EXT_PHY_KR_PMA_PMD_DEVAD,
2060 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2062 EXT_PHY_KR_PMA_PMD_DEVAD,
2064 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n",
2066 /* Check the link status */
2067 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2069 EXT_PHY_KR_PCS_DEVAD,
2070 EXT_PHY_KR_PCS_STATUS, &val2);
2071 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
2072 /* Check the link status on 1.1.2 */
2073 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2075 EXT_PHY_OPT_PMA_PMD_DEVAD,
2076 EXT_PHY_KR_STATUS, &val2);
2077 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2079 EXT_PHY_OPT_PMA_PMD_DEVAD,
2080 EXT_PHY_KR_STATUS, &val1);
2082 "KR PMA status 0x%x->0x%x\n", val2, val1);
2083 val1 = ((val1 & 4) == 4);
2084 /* If 1G was requested assume the link is up */
2085 if (!(bp->req_autoneg & AUTONEG_SPEED) &&
2086 (bp->req_line_speed == SPEED_1000))
2088 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2091 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2092 bnx2x_mdio45_read(bp, ext_phy_addr,
2093 EXT_PHY_OPT_PMA_PMD_DEVAD,
2094 EXT_PHY_OPT_LASI_STATUS, &val2);
2095 bnx2x_mdio45_read(bp, ext_phy_addr,
2096 EXT_PHY_OPT_PMA_PMD_DEVAD,
2097 EXT_PHY_OPT_LASI_STATUS, &val1);
2099 "10G-base-T LASI status 0x%x->0x%x\n", val2, val1);
2100 bnx2x_mdio45_read(bp, ext_phy_addr,
2101 EXT_PHY_OPT_PMA_PMD_DEVAD,
2102 EXT_PHY_KR_STATUS, &val2);
2103 bnx2x_mdio45_read(bp, ext_phy_addr,
2104 EXT_PHY_OPT_PMA_PMD_DEVAD,
2105 EXT_PHY_KR_STATUS, &val1);
2107 "10G-base-T PMA status 0x%x->0x%x\n", val2, val1);
2108 val1 = ((val1 & 4) == 4);
2110 * print the AN outcome of the SFX7101 PHY
2113 bnx2x_mdio45_read(bp, ext_phy_addr,
2114 EXT_PHY_KR_AUTO_NEG_DEVAD,
2117 "SFX7101 AN status 0x%x->%s\n", val2,
2118 (val2 & (1<<14)) ? "Master" : "Slave");
2123 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2124 bp->ext_phy_config);
2129 } else { /* SerDes */
2130 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
2131 switch (ext_phy_type) {
2132 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2133 DP(NETIF_MSG_LINK, "SerDes Direct\n");
2137 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2138 DP(NETIF_MSG_LINK, "SerDes 5482\n");
2143 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2144 bp->ext_phy_config);
2153 static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
2155 int port = bp->port;
2156 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2157 NIG_REG_INGRESS_BMAC0_MEM;
2161 DP(NETIF_MSG_LINK, "enabling BigMAC\n");
2162 /* reset and unreset the BigMac */
2163 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2164 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2166 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2167 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2169 /* enable access for bmac registers */
2170 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2175 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
2179 wb_write[0] = ((bp->dev->dev_addr[2] << 24) |
2180 (bp->dev->dev_addr[3] << 16) |
2181 (bp->dev->dev_addr[4] << 8) |
2182 bp->dev->dev_addr[5]);
2183 wb_write[1] = ((bp->dev->dev_addr[0] << 8) |
2184 bp->dev->dev_addr[1]);
2185 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
2190 if (bp->flow_ctrl & FLOW_CTRL_TX)
2194 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_write, 2);
2197 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -CRC */
2199 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_write, 2);
2205 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
2209 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2212 /* rx control set to don't strip crc */
2214 if (bp->flow_ctrl & FLOW_CTRL_RX)
2218 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_write, 2);
2221 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2223 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_write, 2);
2225 /* set cnt max size */
2226 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -VLAN */
2228 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
2231 /* configure safc */
2232 wb_write[0] = 0x1000200;
2234 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
2237 /* fix for emulation */
2238 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2239 wb_write[0] = 0xf000;
2242 bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
2246 /* reset old bmac stats */
2247 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
2249 NIG_WR(NIG_REG_XCM0_OUT_EN + port*4, 0x0);
2252 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
2253 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
2255 /* disable the NIG in/out to the emac */
2256 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x0);
2257 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
2258 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
2260 /* enable the NIG in/out to the bmac */
2261 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
2263 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x1);
2265 if (bp->flow_ctrl & FLOW_CTRL_TX)
2267 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
2268 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
2270 bp->phy_flags |= PHY_BMAC_FLAG;
2272 bp->stats_state = STATS_STATE_ENABLE;
2275 static void bnx2x_bmac_rx_disable(struct bnx2x *bp)
2277 int port = bp->port;
2278 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2279 NIG_REG_INGRESS_BMAC0_MEM;
2282 /* Only if the bmac is out of reset */
2283 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
2284 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)) {
2285 /* Clear Rx Enable bit in BMAC_CONTROL register */
2286 #ifdef BNX2X_DMAE_RD
2287 bnx2x_read_dmae(bp, bmac_addr +
2288 BIGMAC_REGISTER_BMAC_CONTROL, 2);
2289 wb_write[0] = *bnx2x_sp(bp, wb_data[0]);
2290 wb_write[1] = *bnx2x_sp(bp, wb_data[1]);
2292 wb_write[0] = REG_RD(bp,
2293 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL);
2294 wb_write[1] = REG_RD(bp,
2295 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL + 4);
2297 wb_write[0] &= ~BMAC_CONTROL_RX_ENABLE;
2298 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2304 static void bnx2x_emac_enable(struct bnx2x *bp)
2306 int port = bp->port;
2307 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
2311 DP(NETIF_MSG_LINK, "enabling EMAC\n");
2312 /* reset and unreset the emac core */
2313 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2314 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2316 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2317 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2319 /* enable emac and not bmac */
2320 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
2323 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2324 /* Use lane 1 (of lanes 0-3) */
2325 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2326 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2329 else if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2330 /* Use lane 1 (of lanes 0-3) */
2331 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2332 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2336 if (bp->phy_flags & PHY_XGXS_FLAG) {
2337 DP(NETIF_MSG_LINK, "XGXS\n");
2338 /* select the master lanes (out of 0-3) */
2339 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4,
2342 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2344 } else { /* SerDes */
2345 DP(NETIF_MSG_LINK, "SerDes\n");
2347 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2352 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 1);
2354 /* init emac - use read-modify-write */
2355 /* self clear reset */
2356 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2357 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
2360 while (val & EMAC_MODE_RESET) {
2361 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2362 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2364 BNX2X_ERR("EMAC timeout!\n");
2371 EMAC_WR(EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET);
2374 while (val & EMAC_TX_MODE_RESET) {
2375 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_TX_MODE);
2376 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2378 BNX2X_ERR("EMAC timeout!\n");
2384 if (CHIP_REV_IS_SLOW(bp)) {
2385 /* config GMII mode */
2386 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2387 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
2390 /* pause enable/disable */
2391 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2392 EMAC_RX_MODE_FLOW_EN);
2393 if (bp->flow_ctrl & FLOW_CTRL_RX)
2394 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2395 EMAC_RX_MODE_FLOW_EN);
2397 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2398 EMAC_TX_MODE_EXT_PAUSE_EN);
2399 if (bp->flow_ctrl & FLOW_CTRL_TX)
2400 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2401 EMAC_TX_MODE_EXT_PAUSE_EN);
2404 /* KEEP_VLAN_TAG, promiscuous */
2405 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
2406 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
2407 EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
2409 /* identify magic packets */
2410 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2411 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_MPKT));
2413 /* enable emac for jumbo packets */
2414 EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
2415 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
2416 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); /* -VLAN */
2419 NIG_WR(NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
2421 val = ((bp->dev->dev_addr[0] << 8) |
2422 bp->dev->dev_addr[1]);
2423 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
2425 val = ((bp->dev->dev_addr[2] << 24) |
2426 (bp->dev->dev_addr[3] << 16) |
2427 (bp->dev->dev_addr[4] << 8) |
2428 bp->dev->dev_addr[5]);
2429 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
2431 /* disable the NIG in/out to the bmac */
2432 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x0);
2433 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
2434 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
2436 /* enable the NIG in/out to the emac */
2437 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x1);
2439 if (bp->flow_ctrl & FLOW_CTRL_TX)
2441 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
2442 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
2444 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2445 /* take the BigMac out of reset */
2446 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2447 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2449 /* enable access for bmac registers */
2450 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2453 bp->phy_flags |= PHY_EMAC_FLAG;
2455 bp->stats_state = STATS_STATE_ENABLE;
2458 static void bnx2x_emac_program(struct bnx2x *bp)
2461 int port = bp->port;
2463 DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2464 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2465 (EMAC_MODE_25G_MODE |
2466 EMAC_MODE_PORT_MII_10M |
2467 EMAC_MODE_HALF_DUPLEX));
2468 switch (bp->line_speed) {
2470 mode |= EMAC_MODE_PORT_MII_10M;
2474 mode |= EMAC_MODE_PORT_MII;
2478 mode |= EMAC_MODE_PORT_GMII;
2482 mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
2486 /* 10G not valid for EMAC */
2487 BNX2X_ERR("Invalid line_speed 0x%x\n", bp->line_speed);
2491 if (bp->duplex == DUPLEX_HALF)
2492 mode |= EMAC_MODE_HALF_DUPLEX;
2493 bnx2x_bits_en(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2496 bnx2x_leds_set(bp, bp->line_speed);
2499 static void bnx2x_set_sgmii_tx_driver(struct bnx2x *bp)
2505 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2506 bnx2x_mdio22_read(bp, MDIO_OVER_1G_LP_UP2, &lp_up2);
2508 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_TX0);
2509 bnx2x_mdio22_read(bp, MDIO_TX0_TX_DRIVER, &tx_driver);
2511 /* bits [10:7] at lp_up2, positioned at [15:12] */
2512 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
2513 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
2514 MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
2516 if ((lp_up2 != 0) &&
2517 (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK))) {
2518 /* replace tx_driver bits [15:12] */
2519 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2520 tx_driver |= lp_up2;
2521 bnx2x_mdio22_write(bp, MDIO_TX0_TX_DRIVER, tx_driver);
2525 static void bnx2x_pbf_update(struct bnx2x *bp)
2527 int port = bp->port;
2533 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2535 /* wait for init credit */
2536 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
2537 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2538 DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
2540 while ((init_crd != crd) && count) {
2543 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2546 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2547 if (init_crd != crd)
2548 BNX2X_ERR("BUG! init_crd 0x%x != crd 0x%x\n", init_crd, crd);
2550 if (bp->flow_ctrl & FLOW_CTRL_RX)
2552 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause);
2554 /* update threshold */
2555 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
2556 /* update init credit */
2557 init_crd = 778; /* (800-18-4) */
2560 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16;
2562 /* update threshold */
2563 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
2564 /* update init credit */
2565 switch (bp->line_speed) {
2569 init_crd = thresh + 55 - 22;
2573 init_crd = thresh + 138 - 22;
2577 init_crd = thresh + 553 - 22;
2581 BNX2X_ERR("Invalid line_speed 0x%x\n",
2586 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
2587 DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
2588 bp->line_speed, init_crd);
2590 /* probe the credit changes */
2591 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
2593 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
2596 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
2599 static void bnx2x_update_mng(struct bnx2x *bp)
2602 SHMEM_WR(bp, port_mb[bp->port].link_status,
2606 static void bnx2x_link_report(struct bnx2x *bp)
2609 netif_carrier_on(bp->dev);
2610 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2612 printk("%d Mbps ", bp->line_speed);
2614 if (bp->duplex == DUPLEX_FULL)
2615 printk("full duplex");
2617 printk("half duplex");
2619 if (bp->flow_ctrl) {
2620 if (bp->flow_ctrl & FLOW_CTRL_RX) {
2621 printk(", receive ");
2622 if (bp->flow_ctrl & FLOW_CTRL_TX)
2623 printk("& transmit ");
2625 printk(", transmit ");
2627 printk("flow control ON");
2631 } else { /* link_down */
2632 netif_carrier_off(bp->dev);
2633 printk(KERN_INFO PFX "%s NIC Link is Down\n", bp->dev->name);
2637 static void bnx2x_link_up(struct bnx2x *bp)
2639 int port = bp->port;
2642 bnx2x_pbf_update(bp);
2645 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
2647 /* update shared memory */
2648 bnx2x_update_mng(bp);
2650 /* indicate link up */
2651 bnx2x_link_report(bp);
2654 static void bnx2x_link_down(struct bnx2x *bp)
2656 int port = bp->port;
2659 if (bp->stats_state != STATS_STATE_DISABLE) {
2660 bp->stats_state = STATS_STATE_STOP;
2661 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2664 /* indicate no mac active */
2665 bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
2667 /* update shared memory */
2668 bnx2x_update_mng(bp);
2670 /* activate nig drain */
2671 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
2674 bnx2x_bmac_rx_disable(bp);
2675 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2676 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2678 /* indicate link down */
2679 bnx2x_link_report(bp);
2682 static void bnx2x_init_mac_stats(struct bnx2x *bp);
2684 /* This function is called upon link interrupt */
2685 static void bnx2x_link_update(struct bnx2x *bp)
2687 int port = bp->port;
2692 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
2693 " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
2694 " 10G %x, XGXS_LINK %x\n", port,
2695 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2696 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
2697 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask,
2698 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
2699 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
2700 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2701 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
2705 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
2706 /* avoid fast toggling */
2707 for (i = 0; i < 10; i++) {
2709 bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
2713 bnx2x_link_settings_status(bp, gp_status);
2715 /* anything 10 and over uses the bmac */
2716 link_10g = ((bp->line_speed >= SPEED_10000) &&
2717 (bp->line_speed <= SPEED_16000));
2719 bnx2x_link_int_ack(bp, link_10g);
2721 /* link is up only if both local phy and external phy are up */
2722 bp->link_up = (bp->phy_link_up && bnx2x_ext_phy_is_link_up(bp));
2725 bnx2x_bmac_enable(bp, 0);
2726 bnx2x_leds_set(bp, SPEED_10000);
2729 bnx2x_emac_enable(bp);
2730 bnx2x_emac_program(bp);
2733 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
2734 if (!(bp->phy_flags & PHY_SGMII_FLAG))
2735 bnx2x_set_sgmii_tx_driver(bp);
2740 } else { /* link down */
2741 bnx2x_leds_unset(bp);
2742 bnx2x_link_down(bp);
2745 bnx2x_init_mac_stats(bp);
2749 * Init service functions
2752 static void bnx2x_set_aer_mmd(struct bnx2x *bp)
2754 u16 offset = (bp->phy_flags & PHY_XGXS_FLAG) ?
2755 (bp->phy_addr + bp->ser_lane) : 0;
2757 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
2758 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
2761 static void bnx2x_set_master_ln(struct bnx2x *bp)
2765 /* set the master_ln for AN */
2766 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2767 bnx2x_mdio22_read(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2769 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2770 (new_master_ln | bp->ser_lane));
2773 static void bnx2x_reset_unicore(struct bnx2x *bp)
2778 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2779 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
2780 /* reset the unicore */
2781 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2782 (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET));
2784 /* wait for the reset to self clear */
2785 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
2788 /* the reset erased the previous bank value */
2789 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2790 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2793 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
2799 BNX2X_ERR("BUG! %s (0x%x) is still in reset!\n",
2800 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2804 static void bnx2x_set_swap_lanes(struct bnx2x *bp)
2806 /* Each two bits represents a lane number:
2807 No swap is 0123 => 0x1b no need to enable the swap */
2809 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2810 if (bp->rx_lane_swap != 0x1b) {
2811 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP,
2813 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
2814 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
2816 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
2819 if (bp->tx_lane_swap != 0x1b) {
2820 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP,
2822 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
2824 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
2828 static void bnx2x_set_parallel_detection(struct bnx2x *bp)
2832 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2833 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2836 if (bp->autoneg & AUTONEG_PARALLEL) {
2837 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2839 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2841 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2844 if (bp->phy_flags & PHY_XGXS_FLAG) {
2845 DP(NETIF_MSG_LINK, "XGXS\n");
2846 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
2848 bnx2x_mdio22_write(bp,
2849 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
2850 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
2852 bnx2x_mdio22_read(bp,
2853 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2856 if (bp->autoneg & AUTONEG_PARALLEL) {
2858 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2861 ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2863 bnx2x_mdio22_write(bp,
2864 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2867 /* Disable parallel detection of HiG */
2868 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2869 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
2870 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
2871 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
2875 static void bnx2x_set_autoneg(struct bnx2x *bp)
2880 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2881 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, ®_val);
2882 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2883 (bp->autoneg & AUTONEG_CL37)) {
2884 /* CL37 Autoneg Enabled */
2885 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
2887 /* CL37 Autoneg Disabled */
2888 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2889 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
2891 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2893 /* Enable/Disable Autodetection */
2894 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2895 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val);
2896 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
2898 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2899 (bp->autoneg & AUTONEG_SGMII_FIBER_AUTODET)) {
2900 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2902 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2904 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
2906 /* Enable TetonII and BAM autoneg */
2907 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_BAM_NEXT_PAGE);
2908 bnx2x_mdio22_read(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2910 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2911 (bp->autoneg & AUTONEG_CL37) && (bp->autoneg & AUTONEG_BAM)) {
2912 /* Enable BAM aneg Mode and TetonII aneg Mode */
2913 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2914 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2916 /* TetonII and BAM Autoneg Disabled */
2917 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2918 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2920 bnx2x_mdio22_write(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2923 /* Enable Clause 73 Aneg */
2924 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2925 (bp->autoneg & AUTONEG_CL73)) {
2926 /* Enable BAM Station Manager */
2927 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_USERB0);
2928 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL1,
2929 (MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
2930 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
2931 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN));
2933 /* Merge CL73 and CL37 aneg resolution */
2934 bnx2x_mdio22_read(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2936 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2938 MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR));
2940 /* Set the CL73 AN speed */
2941 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB1);
2942 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB1_AN_ADV2, ®_val);
2943 /* In the SerDes we support only the 1G.
2944 In the XGXS we support the 10G KX4
2945 but we currently do not support the KR */
2946 if (bp->phy_flags & PHY_XGXS_FLAG) {
2947 DP(NETIF_MSG_LINK, "XGXS\n");
2949 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
2951 DP(NETIF_MSG_LINK, "SerDes\n");
2953 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
2955 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
2957 /* CL73 Autoneg Enabled */
2958 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
2960 /* CL73 Autoneg Disabled */
2963 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
2964 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
2967 /* program SerDes, forced speed */
2968 static void bnx2x_program_serdes(struct bnx2x *bp)
2972 /* program duplex, disable autoneg */
2973 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2974 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, ®_val);
2975 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
2976 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN);
2977 if (bp->req_duplex == DUPLEX_FULL)
2978 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2979 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2982 - needed only if the speed is greater than 1G (2.5G or 10G) */
2983 if (bp->req_line_speed > SPEED_1000) {
2984 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2985 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_MISC1, ®_val);
2986 /* clearing the speed value before setting the right speed */
2987 reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
2988 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
2989 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
2990 if (bp->req_line_speed == SPEED_10000)
2992 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
2993 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_MISC1, reg_val);
2997 static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
3001 /* configure the 48 bits for BAM AN */
3002 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
3004 /* set extended capabilities */
3005 if (bp->advertising & ADVERTISED_2500baseX_Full)
3006 val |= MDIO_OVER_1G_UP1_2_5G;
3007 if (bp->advertising & ADVERTISED_10000baseT_Full)
3008 val |= MDIO_OVER_1G_UP1_10G;
3009 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP1, val);
3011 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP3, 0);
3014 static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
3018 /* for AN, we are always publishing full duplex */
3019 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
3021 /* resolve pause mode and advertisement
3022 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
3023 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
3024 switch (bp->req_flow_ctrl) {
3025 case FLOW_CTRL_AUTO:
3026 if (bp->dev->mtu <= 4500) {
3028 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3029 bp->advertising |= (ADVERTISED_Pause |
3030 ADVERTISED_Asym_Pause);
3033 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3034 bp->advertising |= ADVERTISED_Asym_Pause;
3040 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3041 bp->advertising |= ADVERTISED_Asym_Pause;
3045 if (bp->dev->mtu <= 4500) {
3047 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3048 bp->advertising |= (ADVERTISED_Pause |
3049 ADVERTISED_Asym_Pause);
3052 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3053 bp->advertising &= ~(ADVERTISED_Pause |
3054 ADVERTISED_Asym_Pause);
3058 case FLOW_CTRL_BOTH:
3059 if (bp->dev->mtu <= 4500) {
3061 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3062 bp->advertising |= (ADVERTISED_Pause |
3063 ADVERTISED_Asym_Pause);
3066 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3067 bp->advertising |= ADVERTISED_Asym_Pause;
3071 case FLOW_CTRL_NONE:
3073 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3074 bp->advertising &= ~(ADVERTISED_Pause |
3075 ADVERTISED_Asym_Pause);
3078 } else { /* forced mode */
3079 switch (bp->req_flow_ctrl) {
3080 case FLOW_CTRL_AUTO:
3081 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
3082 " req_autoneg 0x%x\n",
3083 bp->req_flow_ctrl, bp->req_autoneg);
3088 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3089 bp->advertising |= ADVERTISED_Asym_Pause;
3093 case FLOW_CTRL_BOTH:
3094 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3095 bp->advertising |= (ADVERTISED_Pause |
3096 ADVERTISED_Asym_Pause);
3099 case FLOW_CTRL_NONE:
3101 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3102 bp->advertising &= ~(ADVERTISED_Pause |
3103 ADVERTISED_Asym_Pause);
3108 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3109 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
3112 static void bnx2x_restart_autoneg(struct bnx2x *bp)
3114 if (bp->autoneg & AUTONEG_CL73) {
3115 /* enable and restart clause 73 aneg */
3118 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3119 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3121 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3123 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
3124 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
3127 /* Enable and restart BAM/CL37 aneg */
3130 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3131 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3133 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3135 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3136 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
3140 static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
3144 /* in SGMII mode, the unicore is always slave */
3145 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
3146 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3148 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
3149 /* set sgmii mode (and not fiber) */
3150 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
3151 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
3152 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
3153 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3156 /* if forced speed */
3157 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3158 /* set speed, disable autoneg */
3161 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3162 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3164 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3165 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK |
3166 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
3168 switch (bp->req_line_speed) {
3171 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
3175 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
3178 /* there is nothing to set for 10M */
3181 /* invalid speed for SGMII */
3182 DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
3183 bp->req_line_speed);
3187 /* setting the full duplex */
3188 if (bp->req_duplex == DUPLEX_FULL)
3190 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
3191 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3194 } else { /* AN mode */
3195 /* enable and restart AN */
3196 bnx2x_restart_autoneg(bp);
3200 static void bnx2x_link_int_enable(struct bnx2x *bp)
3202 int port = bp->port;
3206 /* setting the status to report on link up
3207 for either XGXS or SerDes */
3208 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
3209 (NIG_STATUS_XGXS0_LINK10G |
3210 NIG_STATUS_XGXS0_LINK_STATUS |
3211 NIG_STATUS_SERDES0_LINK_STATUS));
3213 if (bp->phy_flags & PHY_XGXS_FLAG) {
3214 mask = (NIG_MASK_XGXS0_LINK10G |
3215 NIG_MASK_XGXS0_LINK_STATUS);
3216 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
3217 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3218 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3219 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3221 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
3222 mask |= NIG_MASK_MI_INT;
3223 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3226 } else { /* SerDes */
3227 mask = NIG_MASK_SERDES0_LINK_STATUS;
3228 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
3229 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3230 if ((ext_phy_type !=
3231 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
3233 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
3234 mask |= NIG_MASK_MI_INT;
3235 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3239 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3241 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
3242 " int_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
3243 " 10G %x, XGXS_LINK %x\n", port,
3244 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
3245 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
3246 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
3247 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
3248 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
3249 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
3250 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
3254 static void bnx2x_bcm8072_external_rom_boot(struct bnx2x *bp)
3256 u32 ext_phy_addr = ((bp->ext_phy_config &
3257 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3258 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3259 u32 fw_ver1, fw_ver2;
3261 /* Need to wait 200ms after reset */
3263 /* Boot port from external ROM
3264 * Set ser_boot_ctl bit in the MISC_CTRL1 register
3266 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3267 EXT_PHY_KR_PMA_PMD_DEVAD,
3268 EXT_PHY_KR_MISC_CTRL1, 0x0001);
3270 /* Reset internal microprocessor */
3271 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3272 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3273 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3274 /* set micro reset = 0 */
3275 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3276 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3277 EXT_PHY_KR_ROM_MICRO_RESET);
3278 /* Reset internal microprocessor */
3279 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3280 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3281 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3282 /* wait for 100ms for code download via SPI port */
3285 /* Clear ser_boot_ctl bit */
3286 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3287 EXT_PHY_KR_PMA_PMD_DEVAD,
3288 EXT_PHY_KR_MISC_CTRL1, 0x0000);
3292 /* Print the PHY FW version */
3293 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3294 EXT_PHY_KR_PMA_PMD_DEVAD,
3296 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3297 EXT_PHY_KR_PMA_PMD_DEVAD,
3300 "8072 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
3303 static void bnx2x_bcm8072_force_10G(struct bnx2x *bp)
3305 u32 ext_phy_addr = ((bp->ext_phy_config &
3306 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3307 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3309 /* Force KR or KX */
3310 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3311 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL,
3313 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3314 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL2,
3316 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3317 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_PMD_CTRL,
3319 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3320 EXT_PHY_KR_AUTO_NEG_DEVAD, EXT_PHY_KR_CTRL,
3324 static void bnx2x_ext_phy_init(struct bnx2x *bp)
3332 if (bp->phy_flags & PHY_XGXS_FLAG) {
3333 ext_phy_addr = ((bp->ext_phy_config &
3334 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3335 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3337 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3338 /* Make sure that the soft reset is off (expect for the 8072:
3339 * due to the lock, it will be done inside the specific
3342 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3343 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3344 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
3345 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)) {
3346 /* Wait for soft reset to get cleared upto 1 sec */
3347 for (cnt = 0; cnt < 1000; cnt++) {
3348 bnx2x_mdio45_read(bp, ext_phy_addr,
3349 EXT_PHY_OPT_PMA_PMD_DEVAD,
3350 EXT_PHY_OPT_CNTL, &ctrl);
3351 if (!(ctrl & (1<<15)))
3356 "control reg 0x%x (after %d ms)\n", ctrl, cnt);
3359 switch (ext_phy_type) {
3360 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3361 DP(NETIF_MSG_LINK, "XGXS Direct\n");
3364 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3365 DP(NETIF_MSG_LINK, "XGXS 8705\n");
3367 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3368 EXT_PHY_OPT_PMA_PMD_DEVAD,
3369 EXT_PHY_OPT_PMD_MISC_CNTL,
3371 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3372 EXT_PHY_OPT_PMA_PMD_DEVAD,
3373 EXT_PHY_OPT_PHY_IDENTIFIER,
3375 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3376 EXT_PHY_OPT_PMA_PMD_DEVAD,
3377 EXT_PHY_OPT_CMU_PLL_BYPASS,
3379 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3380 EXT_PHY_OPT_WIS_DEVAD,
3381 EXT_PHY_OPT_LASI_CNTL, 0x1);
3384 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3385 DP(NETIF_MSG_LINK, "XGXS 8706\n");
3387 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3389 if (bp->req_line_speed == SPEED_10000) {
3391 "XGXS 8706 force 10Gbps\n");
3392 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3393 EXT_PHY_OPT_PMA_PMD_DEVAD,
3394 EXT_PHY_OPT_PMD_DIGITAL_CNT,
3399 "XGXS 8706 force 1Gbps\n");
3401 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3402 EXT_PHY_OPT_PMA_PMD_DEVAD,
3406 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3407 EXT_PHY_OPT_PMA_PMD_DEVAD,
3413 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3414 EXT_PHY_OPT_PMA_PMD_DEVAD,
3415 EXT_PHY_OPT_LASI_CNTL,
3419 /* Allow CL37 through CL73 */
3420 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
3421 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3422 EXT_PHY_AUTO_NEG_DEVAD,
3423 EXT_PHY_OPT_AN_CL37_CL73,
3426 /* Enable Full-Duplex advertisment on CL37 */
3427 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3428 EXT_PHY_AUTO_NEG_DEVAD,
3429 EXT_PHY_OPT_AN_CL37_FD,
3431 /* Enable CL37 AN */
3432 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3433 EXT_PHY_AUTO_NEG_DEVAD,
3434 EXT_PHY_OPT_AN_CL37_AN,
3436 /* Advertise 10G/1G support */
3437 if (bp->advertising &
3438 ADVERTISED_1000baseT_Full)
3440 if (bp->advertising &
3441 ADVERTISED_10000baseT_Full)
3444 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3445 EXT_PHY_AUTO_NEG_DEVAD,
3446 EXT_PHY_OPT_AN_ADV, val);
3448 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3449 EXT_PHY_OPT_PMA_PMD_DEVAD,
3450 EXT_PHY_OPT_LASI_CNTL,
3453 /* Enable clause 73 AN */
3454 bnx2x_mdio45_write(bp, ext_phy_addr,
3455 EXT_PHY_AUTO_NEG_DEVAD,
3461 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3462 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3463 /* Wait for soft reset to get cleared upto 1 sec */
3464 for (cnt = 0; cnt < 1000; cnt++) {
3465 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
3467 EXT_PHY_OPT_PMA_PMD_DEVAD,
3468 EXT_PHY_OPT_CNTL, &ctrl);
3469 if (!(ctrl & (1<<15)))
3474 "8072 control reg 0x%x (after %d ms)\n",
3477 bnx2x_bcm8072_external_rom_boot(bp);
3478 DP(NETIF_MSG_LINK, "Finshed loading 8072 KR ROM\n");
3481 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3483 EXT_PHY_KR_PMA_PMD_DEVAD,
3485 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3487 EXT_PHY_KR_PMA_PMD_DEVAD,
3488 EXT_PHY_KR_LASI_CNTL, 0x0004);
3490 /* If this is forced speed, set to KR or KX
3491 * (all other are not supported)
3493 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3494 if (bp->req_line_speed == SPEED_10000) {
3495 bnx2x_bcm8072_force_10G(bp);
3497 "Forced speed 10G on 8072\n");
3500 HW_LOCK_RESOURCE_8072_MDIO);
3506 /* Advertise 10G/1G support */
3507 if (bp->advertising &
3508 ADVERTISED_1000baseT_Full)
3510 if (bp->advertising &
3511 ADVERTISED_10000baseT_Full)
3514 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3516 EXT_PHY_KR_AUTO_NEG_DEVAD,
3518 /* Add support for CL37 ( passive mode ) I */
3519 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3521 EXT_PHY_KR_AUTO_NEG_DEVAD,
3523 /* Add support for CL37 ( passive mode ) II */
3524 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3526 EXT_PHY_KR_AUTO_NEG_DEVAD,
3528 /* Add support for CL37 ( passive mode ) III */
3529 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3531 EXT_PHY_KR_AUTO_NEG_DEVAD,
3533 /* Restart autoneg */
3535 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3537 EXT_PHY_KR_AUTO_NEG_DEVAD,
3538 EXT_PHY_KR_CTRL, 0x1200);
3539 DP(NETIF_MSG_LINK, "8072 Autoneg Restart: "
3540 "1G %ssupported 10G %ssupported\n",
3541 (val & (1<<5)) ? "" : "not ",
3542 (val & (1<<7)) ? "" : "not ");
3545 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3548 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3550 "Setting the SFX7101 LASI indication\n");
3551 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3552 EXT_PHY_OPT_PMA_PMD_DEVAD,
3553 EXT_PHY_OPT_LASI_CNTL, 0x1);
3555 "Setting the SFX7101 LED to blink on traffic\n");
3556 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3557 EXT_PHY_OPT_PMA_PMD_DEVAD,
3560 /* read modify write pause advertizing */
3561 bnx2x_mdio45_read(bp, ext_phy_addr,
3562 EXT_PHY_KR_AUTO_NEG_DEVAD,
3563 EXT_PHY_KR_AUTO_NEG_ADVERT, &val);
3564 val &= ~EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH;
3565 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3566 if (bp->advertising & ADVERTISED_Pause)
3567 val |= EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE;
3569 if (bp->advertising & ADVERTISED_Asym_Pause) {
3571 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC;
3573 DP(NETIF_MSG_LINK, "SFX7101 AN advertize 0x%x\n", val);
3574 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3575 EXT_PHY_KR_AUTO_NEG_DEVAD,
3576 EXT_PHY_KR_AUTO_NEG_ADVERT, val);
3577 /* Restart autoneg */
3578 bnx2x_mdio45_read(bp, ext_phy_addr,
3579 EXT_PHY_KR_AUTO_NEG_DEVAD,
3580 EXT_PHY_KR_CTRL, &val);
3582 bnx2x_mdio45_write(bp, ext_phy_addr,
3583 EXT_PHY_KR_AUTO_NEG_DEVAD,
3584 EXT_PHY_KR_CTRL, val);
3588 BNX2X_ERR("BAD XGXS ext_phy_config 0x%x\n",
3589 bp->ext_phy_config);
3593 } else { /* SerDes */
3594 /* ext_phy_addr = ((bp->ext_phy_config &
3595 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
3596 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
3598 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3599 switch (ext_phy_type) {
3600 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3601 DP(NETIF_MSG_LINK, "SerDes Direct\n");
3604 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3605 DP(NETIF_MSG_LINK, "SerDes 5482\n");
3609 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3610 bp->ext_phy_config);
3616 static void bnx2x_ext_phy_reset(struct bnx2x *bp)
3619 u32 ext_phy_addr = ((bp->ext_phy_config &
3620 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3621 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3622 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3624 /* The PHY reset is controled by GPIO 1
3625 * Give it 1ms of reset pulse
3627 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3628 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3629 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3630 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3632 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3633 MISC_REGISTERS_GPIO_OUTPUT_HIGH);
3636 if (bp->phy_flags & PHY_XGXS_FLAG) {
3637 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3638 switch (ext_phy_type) {
3639 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3640 DP(NETIF_MSG_LINK, "XGXS Direct\n");
3643 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3644 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3645 DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
3646 bnx2x_mdio45_write(bp, ext_phy_addr,
3647 EXT_PHY_OPT_PMA_PMD_DEVAD,
3648 EXT_PHY_OPT_CNTL, 0xa040);
3651 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3652 DP(NETIF_MSG_LINK, "XGXS 8072\n");
3653 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3654 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3656 EXT_PHY_KR_PMA_PMD_DEVAD,
3658 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3661 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3662 DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
3666 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
3667 bp->ext_phy_config);
3671 } else { /* SerDes */
3672 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3673 switch (ext_phy_type) {
3674 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3675 DP(NETIF_MSG_LINK, "SerDes Direct\n");
3678 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3679 DP(NETIF_MSG_LINK, "SerDes 5482\n");
3683 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3684 bp->ext_phy_config);
3690 static void bnx2x_link_initialize(struct bnx2x *bp)
3692 int port = bp->port;
3694 /* disable attentions */
3695 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3696 (NIG_MASK_XGXS0_LINK_STATUS |
3697 NIG_MASK_XGXS0_LINK10G |
3698 NIG_MASK_SERDES0_LINK_STATUS |
3701 /* Activate the external PHY */
3702 bnx2x_ext_phy_reset(bp);
3704 bnx2x_set_aer_mmd(bp);
3706 if (bp->phy_flags & PHY_XGXS_FLAG)
3707 bnx2x_set_master_ln(bp);
3709 /* reset the SerDes and wait for reset bit return low */
3710 bnx2x_reset_unicore(bp);
3712 bnx2x_set_aer_mmd(bp);
3714 /* setting the masterLn_def again after the reset */
3715 if (bp->phy_flags & PHY_XGXS_FLAG) {
3716 bnx2x_set_master_ln(bp);
3717 bnx2x_set_swap_lanes(bp);
3720 /* Set Parallel Detect */
3721 if (bp->req_autoneg & AUTONEG_SPEED)
3722 bnx2x_set_parallel_detection(bp);
3724 if (bp->phy_flags & PHY_XGXS_FLAG) {
3725 if (bp->req_line_speed &&
3726 bp->req_line_speed < SPEED_1000) {
3727 bp->phy_flags |= PHY_SGMII_FLAG;
3729 bp->phy_flags &= ~PHY_SGMII_FLAG;
3733 if (!(bp->phy_flags & PHY_SGMII_FLAG)) {
3736 rx_eq = ((bp->serdes_config &
3737 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
3738 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
3740 DP(NETIF_MSG_LINK, "setting rx eq to %d\n", rx_eq);
3741 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
3742 bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0)) {
3743 MDIO_SET_REG_BANK(bp, bank);
3744 bnx2x_mdio22_write(bp, MDIO_RX0_RX_EQ_BOOST,
3746 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
3747 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
3750 /* forced speed requested? */
3751 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3752 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3754 /* disable autoneg */
3755 bnx2x_set_autoneg(bp);
3757 /* program speed and duplex */
3758 bnx2x_program_serdes(bp);
3760 } else { /* AN_mode */
3761 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
3764 bnx2x_set_brcm_cl37_advertisment(bp);
3766 /* program duplex & pause advertisement (for aneg) */
3767 bnx2x_set_ieee_aneg_advertisment(bp);
3769 /* enable autoneg */
3770 bnx2x_set_autoneg(bp);
3772 /* enable and restart AN */
3773 bnx2x_restart_autoneg(bp);
3776 } else { /* SGMII mode */
3777 DP(NETIF_MSG_LINK, "SGMII\n");
3779 bnx2x_initialize_sgmii_process(bp);
3782 /* init ext phy and enable link state int */
3783 bnx2x_ext_phy_init(bp);
3785 /* enable the interrupt */
3786 bnx2x_link_int_enable(bp);
3789 static void bnx2x_phy_deassert(struct bnx2x *bp)
3791 int port = bp->port;
3794 if (bp->phy_flags & PHY_XGXS_FLAG) {
3795 DP(NETIF_MSG_LINK, "XGXS\n");
3796 val = XGXS_RESET_BITS;
3798 } else { /* SerDes */
3799 DP(NETIF_MSG_LINK, "SerDes\n");
3800 val = SERDES_RESET_BITS;
3803 val = val << (port*16);
3805 /* reset and unreset the SerDes/XGXS */
3806 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3808 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3811 static int bnx2x_phy_init(struct bnx2x *bp)
3813 DP(NETIF_MSG_LINK, "started\n");
3814 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
3815 bp->phy_flags |= PHY_EMAC_FLAG;
3817 bp->line_speed = SPEED_10000;
3818 bp->duplex = DUPLEX_FULL;
3819 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3820 bnx2x_emac_enable(bp);
3821 bnx2x_link_report(bp);
3824 } else if (CHIP_REV(bp) == CHIP_REV_EMUL) {
3825 bp->phy_flags |= PHY_BMAC_FLAG;
3827 bp->line_speed = SPEED_10000;
3828 bp->duplex = DUPLEX_FULL;
3829 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3830 bnx2x_bmac_enable(bp, 0);
3831 bnx2x_link_report(bp);
3835 bnx2x_phy_deassert(bp);
3836 bnx2x_link_initialize(bp);
3842 static void bnx2x_link_reset(struct bnx2x *bp)
3844 int port = bp->port;
3845 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3847 /* update shared memory */
3848 bp->link_status = 0;
3849 bnx2x_update_mng(bp);
3851 /* disable attentions */
3852 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3853 (NIG_MASK_XGXS0_LINK_STATUS |
3854 NIG_MASK_XGXS0_LINK10G |
3855 NIG_MASK_SERDES0_LINK_STATUS |
3858 /* activate nig drain */
3859 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
3861 /* disable nig egress interface */
3862 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
3863 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
3865 /* Stop BigMac rx */
3866 bnx2x_bmac_rx_disable(bp);
3869 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
3873 /* The PHY reset is controled by GPIO 1
3874 * Hold it as output low
3876 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3877 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3878 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3879 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3880 DP(NETIF_MSG_LINK, "reset external PHY\n");
3883 /* reset the SerDes/XGXS */
3884 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3885 (0x1ff << (port*16)));
3888 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
3889 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
3891 /* disable nig ingress interface */
3892 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
3893 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0);
3899 #ifdef BNX2X_XGXS_LB
3900 static void bnx2x_set_xgxs_loopback(struct bnx2x *bp, int is_10g)
3902 int port = bp->port;
3907 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
3909 /* change the uni_phy_addr in the nig */
3910 REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18),
3912 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3914 /* change the aer mmd */
3915 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
3916 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x2800);
3918 /* config combo IEEE0 control reg for loopback */
3919 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3920 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3923 /* set aer mmd back */
3924 bnx2x_set_aer_mmd(bp);
3927 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3932 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
3934 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3935 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3937 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3939 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
3944 /* end of PHY/MAC */
3949 * General service functions
3952 /* the slow path queue is odd since completions arrive on the fastpath ring */
3953 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3954 u32 data_hi, u32 data_lo, int common)
3956 int port = bp->port;
3959 "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
3960 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
3961 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
3962 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
3964 #ifdef BNX2X_STOP_ON_ERROR
3965 if (unlikely(bp->panic))
3969 spin_lock(&bp->spq_lock);
3971 if (!bp->spq_left) {
3972 BNX2X_ERR("BUG! SPQ ring full!\n");
3973 spin_unlock(&bp->spq_lock);
3978 /* CID needs port number to be encoded int it */
3979 bp->spq_prod_bd->hdr.conn_and_cmd_data =
3980 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
3982 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
3984 bp->spq_prod_bd->hdr.type |=
3985 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
3987 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
3988 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
3992 if (bp->spq_prod_bd == bp->spq_last_bd) {
3993 bp->spq_prod_bd = bp->spq;
3994 bp->spq_prod_idx = 0;
3995 DP(NETIF_MSG_TIMER, "end of spq\n");
4002 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port),
4005 spin_unlock(&bp->spq_lock);
4009 /* acquire split MCP access lock register */
4010 static int bnx2x_lock_alr(struct bnx2x *bp)
4017 for (j = 0; j < i*10; j++) {
4019 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4020 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
4021 if (val & (1L << 31))
4027 if (!(val & (1L << 31))) {
4028 BNX2X_ERR("Cannot acquire nvram interface\n");
4036 /* Release split MCP access lock register */
4037 static void bnx2x_unlock_alr(struct bnx2x *bp)
4041 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4044 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
4046 struct host_def_status_block *def_sb = bp->def_status_blk;
4049 barrier(); /* status block is written to by the chip */
4051 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
4052 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
4055 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
4056 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
4059 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
4060 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
4063 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
4064 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
4067 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
4068 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
4075 * slow path service functions
4078 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
4080 int port = bp->port;
4081 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
4082 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4083 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4084 u32 nig_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
4085 NIG_REG_MASK_INTERRUPT_PORT0;
4087 if (~bp->aeu_mask & (asserted & 0xff))
4088 BNX2X_ERR("IGU ERROR\n");
4089 if (bp->attn_state & asserted)
4090 BNX2X_ERR("IGU ERROR\n");
4092 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
4093 bp->aeu_mask, asserted);
4094 bp->aeu_mask &= ~(asserted & 0xff);
4095 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
4097 REG_WR(bp, aeu_addr, bp->aeu_mask);
4099 bp->attn_state |= asserted;
4101 if (asserted & ATTN_HARD_WIRED_MASK) {
4102 if (asserted & ATTN_NIG_FOR_FUNC) {
4103 u32 nig_status_port;
4104 u32 nig_int_addr = port ?
4105 NIG_REG_STATUS_INTERRUPT_PORT1 :
4106 NIG_REG_STATUS_INTERRUPT_PORT0;
4108 bp->nig_mask = REG_RD(bp, nig_mask_addr);
4109 REG_WR(bp, nig_mask_addr, 0);
4111 nig_status_port = REG_RD(bp, nig_int_addr);
4112 bnx2x_link_update(bp);
4114 /* handle unicore attn? */
4116 if (asserted & ATTN_SW_TIMER_4_FUNC)
4117 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4119 if (asserted & GPIO_2_FUNC)
4120 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4122 if (asserted & GPIO_3_FUNC)
4123 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4125 if (asserted & GPIO_4_FUNC)
4126 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4129 if (asserted & ATTN_GENERAL_ATTN_1) {
4130 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4131 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4133 if (asserted & ATTN_GENERAL_ATTN_2) {
4134 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4135 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4137 if (asserted & ATTN_GENERAL_ATTN_3) {
4138 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4139 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4142 if (asserted & ATTN_GENERAL_ATTN_4) {
4143 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4144 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4146 if (asserted & ATTN_GENERAL_ATTN_5) {
4147 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4148 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4150 if (asserted & ATTN_GENERAL_ATTN_6) {
4151 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4152 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4156 } /* if hardwired */
4158 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
4159 asserted, BAR_IGU_INTMEM + igu_addr);
4160 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
4162 /* now set back the mask */
4163 if (asserted & ATTN_NIG_FOR_FUNC)
4164 REG_WR(bp, nig_mask_addr, bp->nig_mask);
4167 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4169 int port = bp->port;
4171 struct attn_route attn;
4172 struct attn_route group_mask;
4176 /* need to take HW lock because MCP or other port might also
4177 try to handle this event */
4180 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4181 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4182 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4183 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
4184 DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]);
4186 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4187 if (deasserted & (1 << index)) {
4188 group_mask = bp->attn_group[index];
4190 DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
4191 (unsigned long long)group_mask.sig[0]);
4193 if (attn.sig[3] & group_mask.sig[3] &
4194 EVEREST_GEN_ATTN_IN_USE_MASK) {
4196 if (attn.sig[3] & BNX2X_MC_ASSERT_BITS) {
4198 BNX2X_ERR("MC assert!\n");
4201 } else if (attn.sig[3] & BNX2X_MCP_ASSERT) {
4203 BNX2X_ERR("MCP assert!\n");
4205 MISC_REG_AEU_GENERAL_ATTN_11, 0);
4206 bnx2x_mc_assert(bp);
4209 BNX2X_ERR("UNKOWEN HW ASSERT!\n");
4213 if (attn.sig[1] & group_mask.sig[1] &
4214 BNX2X_DOORQ_ASSERT) {
4216 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4217 BNX2X_ERR("DB hw attention 0x%x\n", val);
4218 /* DORQ discard attention */
4220 BNX2X_ERR("FATAL error from DORQ\n");
4223 if (attn.sig[2] & group_mask.sig[2] &
4224 AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4226 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4227 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4228 /* CFC error attention */
4230 BNX2X_ERR("FATAL error from CFC\n");
4233 if (attn.sig[2] & group_mask.sig[2] &
4234 AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4236 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4237 BNX2X_ERR("PXP hw attention 0x%x\n", val);
4238 /* RQ_USDMDP_FIFO_OVERFLOW */
4240 BNX2X_ERR("FATAL error from PXP\n");
4243 if (attn.sig[3] & group_mask.sig[3] &
4244 EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4246 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4248 DP(NETIF_MSG_HW, "got latched bits 0x%x\n",
4252 if ((attn.sig[0] & group_mask.sig[0] &
4253 HW_INTERRUT_ASSERT_SET_0) ||
4254 (attn.sig[1] & group_mask.sig[1] &
4255 HW_INTERRUT_ASSERT_SET_1) ||
4256 (attn.sig[2] & group_mask.sig[2] &
4257 HW_INTERRUT_ASSERT_SET_2))
4258 BNX2X_ERR("FATAL HW block attention\n");
4260 if ((attn.sig[0] & group_mask.sig[0] &
4261 HW_PRTY_ASSERT_SET_0) ||
4262 (attn.sig[1] & group_mask.sig[1] &
4263 HW_PRTY_ASSERT_SET_1) ||
4264 (attn.sig[2] & group_mask.sig[2] &
4265 HW_PRTY_ASSERT_SET_2))
4266 BNX2X_ERR("FATAL HW block parity attention\n");
4270 bnx2x_unlock_alr(bp);
4272 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8;
4275 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
4276 val, BAR_IGU_INTMEM + reg_addr); */
4277 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
4279 if (bp->aeu_mask & (deasserted & 0xff))
4280 BNX2X_ERR("IGU BUG\n");
4281 if (~bp->attn_state & deasserted)
4282 BNX2X_ERR("IGU BUG\n");
4284 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4285 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4287 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
4288 bp->aeu_mask |= (deasserted & 0xff);
4290 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
4291 REG_WR(bp, reg_addr, bp->aeu_mask);
4293 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4294 bp->attn_state &= ~deasserted;
4295 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4298 static void bnx2x_attn_int(struct bnx2x *bp)
4300 /* read local copy of bits */
4301 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
4302 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
4303 u32 attn_state = bp->attn_state;
4305 /* look for changed bits */
4306 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
4307 u32 deasserted = ~attn_bits & attn_ack & attn_state;
4310 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
4311 attn_bits, attn_ack, asserted, deasserted);
4313 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4314 BNX2X_ERR("bad attention state\n");
4316 /* handle bits that were raised */
4318 bnx2x_attn_int_asserted(bp, asserted);
4321 bnx2x_attn_int_deasserted(bp, deasserted);
4324 static void bnx2x_sp_task(struct work_struct *work)
4326 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
4329 /* Return here if interrupt is disabled */
4330 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
4331 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
4335 status = bnx2x_update_dsb_idx(bp);
4337 BNX2X_ERR("spurious slowpath interrupt!\n");
4339 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
4346 /* CStorm events: query_stats, cfc delete ramrods */
4348 bp->stat_pending = 0;
4350 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
4352 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
4354 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
4356 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
4358 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
4362 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
4364 struct net_device *dev = dev_instance;
4365 struct bnx2x *bp = netdev_priv(dev);
4367 /* Return here if interrupt is disabled */
4368 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
4369 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
4373 bnx2x_ack_sb(bp, 16, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
4375 #ifdef BNX2X_STOP_ON_ERROR
4376 if (unlikely(bp->panic))
4380 schedule_work(&bp->sp_task);
4385 /* end of slow path */
4389 /****************************************************************************
4391 ****************************************************************************/
4393 #define UPDATE_STAT(s, t) \
4395 estats->t += new->s - old->s; \
4399 /* sum[hi:lo] += add[hi:lo] */
4400 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
4403 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
4406 /* difference = minuend - subtrahend */
4407 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
4409 if (m_lo < s_lo) { /* underflow */ \
4410 d_hi = m_hi - s_hi; \
4411 if (d_hi > 0) { /* we can 'loan' 1 */ \
4413 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
4414 } else { /* m_hi <= s_hi */ \
4418 } else { /* m_lo >= s_lo */ \
4419 if (m_hi < s_hi) { \
4422 } else { /* m_hi >= s_hi */ \
4423 d_hi = m_hi - s_hi; \
4424 d_lo = m_lo - s_lo; \
4429 /* minuend -= subtrahend */
4430 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
4432 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
4435 #define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
4437 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
4438 diff.lo, new->s_lo, old->s_lo); \
4439 old->s_hi = new->s_hi; \
4440 old->s_lo = new->s_lo; \
4441 ADD_64(estats->t_hi, diff.hi, \
4442 estats->t_lo, diff.lo); \
4445 /* sum[hi:lo] += add */
4446 #define ADD_EXTEND_64(s_hi, s_lo, a) \
4449 s_hi += (s_lo < a) ? 1 : 0; \
4452 #define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
4454 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
4457 #define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
4459 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
4460 old_tclient->s = le32_to_cpu(tclient->s); \
4461 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
4465 * General service functions
4468 static inline long bnx2x_hilo(u32 *hiref)
4470 u32 lo = *(hiref + 1);
4471 #if (BITS_PER_LONG == 64)
4474 return HILO_U64(hi, lo);
4481 * Init service functions
4484 static void bnx2x_init_mac_stats(struct bnx2x *bp)
4486 struct dmae_command *dmae;
4487 int port = bp->port;
4488 int loader_idx = port * 8;
4492 bp->executer_idx = 0;
4495 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4496 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4498 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4500 DMAE_CMD_ENDIANITY_DW_SWAP |
4502 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4505 opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
4507 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4508 dmae->opcode = opcode;
4509 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
4511 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
4513 dmae->dst_addr_lo = bp->fw_mb >> 2;
4514 dmae->dst_addr_hi = 0;
4515 dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
4518 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4519 dmae->comp_addr_hi = 0;
4522 dmae->comp_addr_lo = 0;
4523 dmae->comp_addr_hi = 0;
4529 /* no need to collect statistics in link down */
4533 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4534 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4535 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4537 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4539 DMAE_CMD_ENDIANITY_DW_SWAP |
4541 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4543 if (bp->phy_flags & PHY_BMAC_FLAG) {
4545 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4546 NIG_REG_INGRESS_BMAC0_MEM);
4548 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4549 BIGMAC_REGISTER_TX_STAT_GTBYT */
4550 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4551 dmae->opcode = opcode;
4552 dmae->src_addr_lo = (mac_addr +
4553 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4554 dmae->src_addr_hi = 0;
4555 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4556 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4557 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4558 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4559 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4560 dmae->comp_addr_hi = 0;
4563 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4564 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4565 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4566 dmae->opcode = opcode;
4567 dmae->src_addr_lo = (mac_addr +
4568 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4569 dmae->src_addr_hi = 0;
4570 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4571 offsetof(struct bmac_stats, rx_gr64));
4572 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4573 offsetof(struct bmac_stats, rx_gr64));
4574 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4575 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4576 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4577 dmae->comp_addr_hi = 0;
4580 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
4582 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4584 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4585 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4586 dmae->opcode = opcode;
4587 dmae->src_addr_lo = (mac_addr +
4588 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4589 dmae->src_addr_hi = 0;
4590 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4591 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4592 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4593 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4594 dmae->comp_addr_hi = 0;
4597 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4598 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4599 dmae->opcode = opcode;
4600 dmae->src_addr_lo = (mac_addr +
4601 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4602 dmae->src_addr_hi = 0;
4603 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4604 offsetof(struct emac_stats,
4605 rx_falsecarriererrors));
4606 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4607 offsetof(struct emac_stats,
4608 rx_falsecarriererrors));
4610 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4611 dmae->comp_addr_hi = 0;
4614 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4615 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4616 dmae->opcode = opcode;
4617 dmae->src_addr_lo = (mac_addr +
4618 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4619 dmae->src_addr_hi = 0;
4620 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4621 offsetof(struct emac_stats,
4623 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4624 offsetof(struct emac_stats,
4626 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4627 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4628 dmae->comp_addr_hi = 0;
4633 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4634 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4635 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4636 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4638 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4640 DMAE_CMD_ENDIANITY_DW_SWAP |
4642 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4643 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4644 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4645 dmae->src_addr_hi = 0;
4646 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
4647 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
4648 dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
4649 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
4650 offsetof(struct nig_stats, done));
4651 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
4652 offsetof(struct nig_stats, done));
4653 dmae->comp_val = 0xffffffff;
4656 static void bnx2x_init_stats(struct bnx2x *bp)
4658 int port = bp->port;
4660 bp->stats_state = STATS_STATE_DISABLE;
4661 bp->executer_idx = 0;
4663 bp->old_brb_discard = REG_RD(bp,
4664 NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4666 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
4667 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
4668 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4670 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
4671 REG_WR(bp, BAR_XSTRORM_INTMEM +
4672 XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4674 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
4675 REG_WR(bp, BAR_TSTRORM_INTMEM +
4676 TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4678 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
4679 REG_WR(bp, BAR_CSTRORM_INTMEM +
4680 CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4682 REG_WR(bp, BAR_XSTRORM_INTMEM +
4683 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4684 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4685 REG_WR(bp, BAR_XSTRORM_INTMEM +
4686 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4687 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4689 REG_WR(bp, BAR_TSTRORM_INTMEM +
4690 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4691 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4692 REG_WR(bp, BAR_TSTRORM_INTMEM +
4693 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4694 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4697 static void bnx2x_stop_stats(struct bnx2x *bp)
4700 if (bp->stats_state != STATS_STATE_DISABLE) {
4703 bp->stats_state = STATS_STATE_STOP;
4704 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
4706 while (bp->stats_state != STATS_STATE_DISABLE) {
4708 BNX2X_ERR("timeout waiting for stats stop\n");
4715 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
4719 * Statistics service functions
4722 static void bnx2x_update_bmac_stats(struct bnx2x *bp)
4726 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
4727 struct bmac_stats *old = &bp->old_bmac;
4728 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4733 UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
4734 tx_gtbyt.lo, total_bytes_transmitted_lo);
4736 UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
4737 tx_gtmca.lo, total_multicast_packets_transmitted_lo);
4738 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4740 UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
4741 tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
4742 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4744 UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
4745 tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
4746 SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
4747 estats->total_unicast_packets_transmitted_lo, sum.lo);
4749 UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
4750 UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
4751 UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
4752 UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
4753 UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
4754 UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
4755 UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
4756 UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
4757 UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
4758 UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
4759 UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
4761 UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
4762 UPDATE_STAT(rx_grund.lo, runt_packets_received);
4763 UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
4764 UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
4765 UPDATE_STAT(rx_grxcf.lo, control_frames_received);
4766 /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
4767 UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
4768 UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
4770 UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
4771 rx_grerb.lo, stat_IfHCInBadOctets_lo);
4772 UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
4773 tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
4774 UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
4775 /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
4776 estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
4779 static void bnx2x_update_emac_stats(struct bnx2x *bp)
4781 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
4782 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4784 UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
4785 total_bytes_transmitted_lo);
4786 UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
4787 total_unicast_packets_transmitted_hi,
4788 total_unicast_packets_transmitted_lo);
4789 UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
4790 total_multicast_packets_transmitted_hi,
4791 total_multicast_packets_transmitted_lo);
4792 UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
4793 total_broadcast_packets_transmitted_hi,
4794 total_broadcast_packets_transmitted_lo);
4796 estats->pause_xon_frames_transmitted += new->tx_outxonsent;
4797 estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
4798 estats->single_collision_transmit_frames +=
4799 new->tx_dot3statssinglecollisionframes;
4800 estats->multiple_collision_transmit_frames +=
4801 new->tx_dot3statsmultiplecollisionframes;
4802 estats->late_collision_frames += new->tx_dot3statslatecollisions;
4803 estats->excessive_collision_frames +=
4804 new->tx_dot3statsexcessivecollisions;
4805 estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
4806 estats->frames_transmitted_65_127_bytes +=
4807 new->tx_etherstatspkts65octetsto127octets;
4808 estats->frames_transmitted_128_255_bytes +=
4809 new->tx_etherstatspkts128octetsto255octets;
4810 estats->frames_transmitted_256_511_bytes +=
4811 new->tx_etherstatspkts256octetsto511octets;
4812 estats->frames_transmitted_512_1023_bytes +=
4813 new->tx_etherstatspkts512octetsto1023octets;
4814 estats->frames_transmitted_1024_1522_bytes +=
4815 new->tx_etherstatspkts1024octetsto1522octet;
4816 estats->frames_transmitted_1523_9022_bytes +=
4817 new->tx_etherstatspktsover1522octets;
4819 estats->crc_receive_errors += new->rx_dot3statsfcserrors;
4820 estats->alignment_errors += new->rx_dot3statsalignmenterrors;
4821 estats->false_carrier_detections += new->rx_falsecarriererrors;
4822 estats->runt_packets_received += new->rx_etherstatsundersizepkts;
4823 estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
4824 estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
4825 estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
4826 estats->control_frames_received += new->rx_maccontrolframesreceived;
4827 estats->error_runt_packets_received += new->rx_etherstatsfragments;
4828 estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
4830 UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
4831 stat_IfHCInBadOctets_lo);
4832 UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
4833 stat_IfHCOutBadOctets_lo);
4834 estats->stat_Dot3statsInternalMacTransmitErrors +=
4835 new->tx_dot3statsinternalmactransmiterrors;
4836 estats->stat_Dot3StatsCarrierSenseErrors +=
4837 new->rx_dot3statscarriersenseerrors;
4838 estats->stat_Dot3StatsDeferredTransmissions +=
4839 new->tx_dot3statsdeferredtransmissions;
4840 estats->stat_FlowControlDone += new->tx_flowcontroldone;
4841 estats->stat_XoffStateEntered += new->rx_xoffstateentered;
4844 static int bnx2x_update_storm_stats(struct bnx2x *bp)
4846 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4847 struct tstorm_common_stats *tstats = &stats->tstorm_common;
4848 struct tstorm_per_client_stats *tclient =
4849 &tstats->client_statistics[0];
4850 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
4851 struct xstorm_common_stats *xstats = &stats->xstorm_common;
4852 struct nig_stats *nstats = bnx2x_sp(bp, nig);
4853 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4856 /* are DMAE stats valid? */
4857 if (nstats->done != 0xffffffff) {
4858 DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
4862 /* are storm stats valid? */
4863 if (tstats->done.hi != 0xffffffff) {
4864 DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
4867 if (xstats->done.hi != 0xffffffff) {
4868 DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
4872 estats->total_bytes_received_hi =
4873 estats->valid_bytes_received_hi =
4874 le32_to_cpu(tclient->total_rcv_bytes.hi);
4875 estats->total_bytes_received_lo =
4876 estats->valid_bytes_received_lo =
4877 le32_to_cpu(tclient->total_rcv_bytes.lo);
4878 ADD_64(estats->total_bytes_received_hi,
4879 le32_to_cpu(tclient->rcv_error_bytes.hi),
4880 estats->total_bytes_received_lo,
4881 le32_to_cpu(tclient->rcv_error_bytes.lo));
4883 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4884 total_unicast_packets_received_hi,
4885 total_unicast_packets_received_lo);
4886 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4887 total_multicast_packets_received_hi,
4888 total_multicast_packets_received_lo);
4889 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4890 total_broadcast_packets_received_hi,
4891 total_broadcast_packets_received_lo);
4893 estats->frames_received_64_bytes = MAC_STX_NA;
4894 estats->frames_received_65_127_bytes = MAC_STX_NA;
4895 estats->frames_received_128_255_bytes = MAC_STX_NA;
4896 estats->frames_received_256_511_bytes = MAC_STX_NA;
4897 estats->frames_received_512_1023_bytes = MAC_STX_NA;
4898 estats->frames_received_1024_1522_bytes = MAC_STX_NA;
4899 estats->frames_received_1523_9022_bytes = MAC_STX_NA;
4901 estats->x_total_sent_bytes_hi =
4902 le32_to_cpu(xstats->total_sent_bytes.hi);
4903 estats->x_total_sent_bytes_lo =
4904 le32_to_cpu(xstats->total_sent_bytes.lo);
4905 estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
4907 estats->t_rcv_unicast_bytes_hi =
4908 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
4909 estats->t_rcv_unicast_bytes_lo =
4910 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
4911 estats->t_rcv_broadcast_bytes_hi =
4912 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4913 estats->t_rcv_broadcast_bytes_lo =
4914 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4915 estats->t_rcv_multicast_bytes_hi =
4916 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
4917 estats->t_rcv_multicast_bytes_lo =
4918 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
4919 estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
4921 estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
4922 estats->packets_too_big_discard =
4923 le32_to_cpu(tclient->packets_too_big_discard);
4924 estats->jabber_packets_received = estats->packets_too_big_discard +
4925 estats->stat_Dot3statsFramesTooLong;
4926 estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
4927 estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
4928 estats->mac_discard = le32_to_cpu(tclient->mac_discard);
4929 estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
4930 estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
4931 estats->brb_truncate_discard =
4932 le32_to_cpu(tstats->brb_truncate_discard);
4934 estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
4935 bp->old_brb_discard = nstats->brb_discard;
4937 estats->brb_packet = nstats->brb_packet;
4938 estats->brb_truncate = nstats->brb_truncate;
4939 estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
4940 estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
4941 estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
4942 estats->mng_discard = nstats->mng_discard;
4943 estats->mng_octet_inp = nstats->mng_octet_inp;
4944 estats->mng_octet_out = nstats->mng_octet_out;
4945 estats->mng_packet_inp = nstats->mng_packet_inp;
4946 estats->mng_packet_out = nstats->mng_packet_out;
4947 estats->pbf_octets = nstats->pbf_octets;
4948 estats->pbf_packet = nstats->pbf_packet;
4949 estats->safc_inp = nstats->safc_inp;
4951 xstats->done.hi = 0;
4952 tstats->done.hi = 0;
4958 static void bnx2x_update_net_stats(struct bnx2x *bp)
4960 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4961 struct net_device_stats *nstats = &bp->dev->stats;
4963 nstats->rx_packets =
4964 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4965 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4966 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4968 nstats->tx_packets =
4969 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4970 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4971 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4973 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4976 bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4978 nstats->rx_dropped = estats->checksum_discard +
4979 estats->mac_discard;
4980 nstats->tx_dropped = 0;
4983 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
4985 nstats->collisions =
4986 estats->single_collision_transmit_frames +
4987 estats->multiple_collision_transmit_frames +
4988 estats->late_collision_frames +
4989 estats->excessive_collision_frames;
4991 nstats->rx_length_errors = estats->runt_packets_received +
4992 estats->jabber_packets_received;
4993 nstats->rx_over_errors = estats->no_buff_discard;
4994 nstats->rx_crc_errors = estats->crc_receive_errors;
4995 nstats->rx_frame_errors = estats->alignment_errors;
4996 nstats->rx_fifo_errors = estats->brb_discard +
4997 estats->brb_truncate_discard;
4998 nstats->rx_missed_errors = estats->xxoverflow_discard;
5000 nstats->rx_errors = nstats->rx_length_errors +
5001 nstats->rx_over_errors +
5002 nstats->rx_crc_errors +
5003 nstats->rx_frame_errors +
5004 nstats->rx_fifo_errors;
5006 nstats->tx_aborted_errors = estats->late_collision_frames +
5007 estats->excessive_collision_frames;
5008 nstats->tx_carrier_errors = estats->false_carrier_detections;
5009 nstats->tx_fifo_errors = 0;
5010 nstats->tx_heartbeat_errors = 0;
5011 nstats->tx_window_errors = 0;
5013 nstats->tx_errors = nstats->tx_aborted_errors +
5014 nstats->tx_carrier_errors;
5016 estats->mac_stx_start = ++estats->mac_stx_end;
5019 static void bnx2x_update_stats(struct bnx2x *bp)
5023 if (!bnx2x_update_storm_stats(bp)) {
5025 if (bp->phy_flags & PHY_BMAC_FLAG) {
5026 bnx2x_update_bmac_stats(bp);
5028 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
5029 bnx2x_update_emac_stats(bp);
5031 } else { /* unreached */
5032 BNX2X_ERR("no MAC active\n");
5036 bnx2x_update_net_stats(bp);
5039 if (bp->msglevel & NETIF_MSG_TIMER) {
5040 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
5041 struct net_device_stats *nstats = &bp->dev->stats;
5043 printk(KERN_DEBUG "%s:\n", bp->dev->name);
5044 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
5046 bnx2x_tx_avail(bp->fp),
5047 *bp->fp->tx_cons_sb, nstats->tx_packets);
5048 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
5050 (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
5051 *bp->fp->rx_cons_sb, nstats->rx_packets);
5052 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
5053 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
5054 estats->driver_xoff, estats->brb_discard);
5055 printk(KERN_DEBUG "tstats: checksum_discard %u "
5056 "packets_too_big_discard %u no_buff_discard %u "
5057 "mac_discard %u mac_filter_discard %u "
5058 "xxovrflow_discard %u brb_truncate_discard %u "
5059 "ttl0_discard %u\n",
5060 estats->checksum_discard,
5061 estats->packets_too_big_discard,
5062 estats->no_buff_discard, estats->mac_discard,
5063 estats->mac_filter_discard, estats->xxoverflow_discard,
5064 estats->brb_truncate_discard, estats->ttl0_discard);
5066 for_each_queue(bp, i) {
5067 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
5068 bnx2x_fp(bp, i, tx_pkt),
5069 bnx2x_fp(bp, i, rx_pkt),
5070 bnx2x_fp(bp, i, rx_calls));
5074 if (bp->state != BNX2X_STATE_OPEN) {
5075 DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
5079 #ifdef BNX2X_STOP_ON_ERROR
5080 if (unlikely(bp->panic))
5085 if (bp->executer_idx) {
5086 struct dmae_command *dmae = &bp->dmae;
5087 int port = bp->port;
5088 int loader_idx = port * 8;
5090 memset(dmae, 0, sizeof(struct dmae_command));
5092 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
5093 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
5094 DMAE_CMD_DST_RESET |
5096 DMAE_CMD_ENDIANITY_B_DW_SWAP |
5098 DMAE_CMD_ENDIANITY_DW_SWAP |
5100 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
5101 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
5102 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
5103 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
5104 sizeof(struct dmae_command) *
5105 (loader_idx + 1)) >> 2;
5106 dmae->dst_addr_hi = 0;
5107 dmae->len = sizeof(struct dmae_command) >> 2;
5108 dmae->len--; /* !!! for A0/1 only */
5109 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
5110 dmae->comp_addr_hi = 0;
5113 bnx2x_post_dmae(bp, dmae, loader_idx);
5116 if (bp->stats_state != STATS_STATE_ENABLE) {
5117 bp->stats_state = STATS_STATE_DISABLE;
5121 if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
5122 /* stats ramrod has it's own slot on the spe */
5124 bp->stat_pending = 1;
5128 static void bnx2x_timer(unsigned long data)
5130 struct bnx2x *bp = (struct bnx2x *) data;
5132 if (!netif_running(bp->dev))
5135 if (atomic_read(&bp->intr_sem) != 0)
5139 struct bnx2x_fastpath *fp = &bp->fp[0];
5142 bnx2x_tx_int(fp, 1000);
5143 rc = bnx2x_rx_int(fp, 1000);
5147 int port = bp->port;
5151 ++bp->fw_drv_pulse_wr_seq;
5152 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5153 /* TBD - add SYSTEM_TIME */
5154 drv_pulse = bp->fw_drv_pulse_wr_seq;
5155 SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse);
5157 mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) &
5158 MCP_PULSE_SEQ_MASK);
5159 /* The delta between driver pulse and mcp response
5160 * should be 1 (before mcp response) or 0 (after mcp response)
5162 if ((drv_pulse != mcp_pulse) &&
5163 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5164 /* someone lost a heartbeat... */
5165 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5166 drv_pulse, mcp_pulse);
5170 if (bp->stats_state == STATS_STATE_DISABLE)
5173 bnx2x_update_stats(bp);
5176 mod_timer(&bp->timer, jiffies + bp->current_interval);
5179 /* end of Statistics */
5184 * nic init service functions
5187 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5188 dma_addr_t mapping, int id)
5190 int port = bp->port;
5195 section = ((u64)mapping) + offsetof(struct host_status_block,
5197 sb->u_status_block.status_block_id = id;
5199 REG_WR(bp, BAR_USTRORM_INTMEM +
5200 USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5201 REG_WR(bp, BAR_USTRORM_INTMEM +
5202 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5205 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5206 REG_WR16(bp, BAR_USTRORM_INTMEM +
5207 USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5210 section = ((u64)mapping) + offsetof(struct host_status_block,
5212 sb->c_status_block.status_block_id = id;
5214 REG_WR(bp, BAR_CSTRORM_INTMEM +
5215 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5216 REG_WR(bp, BAR_CSTRORM_INTMEM +
5217 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5220 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5221 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5222 CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5224 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5227 static void bnx2x_init_def_sb(struct bnx2x *bp,
5228 struct host_def_status_block *def_sb,
5229 dma_addr_t mapping, int id)
5231 int port = bp->port;
5232 int index, val, reg_offset;
5236 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5237 atten_status_block);
5238 def_sb->atten_status_block.status_block_id = id;
5240 bp->def_att_idx = 0;
5243 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5244 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5246 for (index = 0; index < 3; index++) {
5247 bp->attn_group[index].sig[0] = REG_RD(bp,
5248 reg_offset + 0x10*index);
5249 bp->attn_group[index].sig[1] = REG_RD(bp,
5250 reg_offset + 0x4 + 0x10*index);
5251 bp->attn_group[index].sig[2] = REG_RD(bp,
5252 reg_offset + 0x8 + 0x10*index);
5253 bp->attn_group[index].sig[3] = REG_RD(bp,
5254 reg_offset + 0xc + 0x10*index);
5257 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5258 MISC_REG_AEU_MASK_ATTN_FUNC_0));
5260 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5261 HC_REG_ATTN_MSG0_ADDR_L);
5263 REG_WR(bp, reg_offset, U64_LO(section));
5264 REG_WR(bp, reg_offset + 4, U64_HI(section));
5266 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5268 val = REG_RD(bp, reg_offset);
5270 REG_WR(bp, reg_offset, val);
5273 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5274 u_def_status_block);
5275 def_sb->u_def_status_block.status_block_id = id;
5279 REG_WR(bp, BAR_USTRORM_INTMEM +
5280 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5281 REG_WR(bp, BAR_USTRORM_INTMEM +
5282 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5284 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port),
5287 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5288 REG_WR16(bp, BAR_USTRORM_INTMEM +
5289 USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5292 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5293 c_def_status_block);
5294 def_sb->c_def_status_block.status_block_id = id;
5298 REG_WR(bp, BAR_CSTRORM_INTMEM +
5299 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5300 REG_WR(bp, BAR_CSTRORM_INTMEM +
5301 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5303 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port),
5306 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5307 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5308 CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5311 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5312 t_def_status_block);
5313 def_sb->t_def_status_block.status_block_id = id;
5317 REG_WR(bp, BAR_TSTRORM_INTMEM +
5318 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5319 REG_WR(bp, BAR_TSTRORM_INTMEM +
5320 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5322 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port),
5325 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5326 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5327 TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5330 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5331 x_def_status_block);
5332 def_sb->x_def_status_block.status_block_id = id;
5336 REG_WR(bp, BAR_XSTRORM_INTMEM +
5337 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5338 REG_WR(bp, BAR_XSTRORM_INTMEM +
5339 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5341 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port),
5344 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5345 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5346 XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5348 bp->stat_pending = 0;
5350 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5353 static void bnx2x_update_coalesce(struct bnx2x *bp)
5355 int port = bp->port;
5358 for_each_queue(bp, i) {
5360 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5361 REG_WR8(bp, BAR_USTRORM_INTMEM +
5362 USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5363 HC_INDEX_U_ETH_RX_CQ_CONS),
5364 bp->rx_ticks_int/12);
5365 REG_WR16(bp, BAR_USTRORM_INTMEM +
5366 USTORM_SB_HC_DISABLE_OFFSET(port, i,
5367 HC_INDEX_U_ETH_RX_CQ_CONS),
5368 bp->rx_ticks_int ? 0 : 1);
5370 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5371 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5372 CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5373 HC_INDEX_C_ETH_TX_CQ_CONS),
5374 bp->tx_ticks_int/12);
5375 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5376 CSTORM_SB_HC_DISABLE_OFFSET(port, i,
5377 HC_INDEX_C_ETH_TX_CQ_CONS),
5378 bp->tx_ticks_int ? 0 : 1);
5382 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5386 int port = bp->port;
5388 bp->rx_buf_use_size = bp->dev->mtu;
5390 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
5391 bp->rx_buf_size = bp->rx_buf_use_size + 64;
5393 for_each_queue(bp, j) {
5394 struct bnx2x_fastpath *fp = &bp->fp[j];
5397 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5399 for (i = 1; i <= NUM_RX_RINGS; i++) {
5400 struct eth_rx_bd *rx_bd;
5402 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5404 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5405 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5407 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5408 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5412 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5413 struct eth_rx_cqe_next_page *nextpg;
5415 nextpg = (struct eth_rx_cqe_next_page *)
5416 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5418 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5419 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5421 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5422 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5425 /* rx completion queue */
5426 fp->rx_comp_cons = ring_prod = 0;
5428 for (i = 0; i < bp->rx_ring_size; i++) {
5429 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5430 BNX2X_ERR("was only able to allocate "
5434 ring_prod = NEXT_RX_IDX(ring_prod);
5435 BUG_TRAP(ring_prod > i);
5438 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
5439 fp->rx_pkt = fp->rx_calls = 0;
5441 /* Warning! this will generate an interrupt (to the TSTORM) */
5442 /* must only be done when chip is initialized */
5443 REG_WR(bp, BAR_TSTRORM_INTMEM +
5444 TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
5448 REG_WR(bp, BAR_USTRORM_INTMEM +
5449 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port),
5450 U64_LO(fp->rx_comp_mapping));
5451 REG_WR(bp, BAR_USTRORM_INTMEM +
5452 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4,
5453 U64_HI(fp->rx_comp_mapping));
5457 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5461 for_each_queue(bp, j) {
5462 struct bnx2x_fastpath *fp = &bp->fp[j];
5464 for (i = 1; i <= NUM_TX_RINGS; i++) {
5465 struct eth_tx_bd *tx_bd =
5466 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
5469 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5470 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5472 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5473 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5476 fp->tx_pkt_prod = 0;
5477 fp->tx_pkt_cons = 0;
5480 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5485 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5487 int port = bp->port;
5489 spin_lock_init(&bp->spq_lock);
5491 bp->spq_left = MAX_SPQ_PENDING;
5492 bp->spq_prod_idx = 0;
5493 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5494 bp->spq_prod_bd = bp->spq;
5495 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5497 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port),
5498 U64_LO(bp->spq_mapping));
5499 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port) + 4,
5500 U64_HI(bp->spq_mapping));
5502 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(port),
5506 static void bnx2x_init_context(struct bnx2x *bp)
5510 for_each_queue(bp, i) {
5511 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5512 struct bnx2x_fastpath *fp = &bp->fp[i];
5514 context->xstorm_st_context.tx_bd_page_base_hi =
5515 U64_HI(fp->tx_desc_mapping);
5516 context->xstorm_st_context.tx_bd_page_base_lo =
5517 U64_LO(fp->tx_desc_mapping);
5518 context->xstorm_st_context.db_data_addr_hi =
5519 U64_HI(fp->tx_prods_mapping);
5520 context->xstorm_st_context.db_data_addr_lo =
5521 U64_LO(fp->tx_prods_mapping);
5523 context->ustorm_st_context.rx_bd_page_base_hi =
5524 U64_HI(fp->rx_desc_mapping);
5525 context->ustorm_st_context.rx_bd_page_base_lo =
5526 U64_LO(fp->rx_desc_mapping);
5527 context->ustorm_st_context.status_block_id = i;
5528 context->ustorm_st_context.sb_index_number =
5529 HC_INDEX_U_ETH_RX_CQ_CONS;
5530 context->ustorm_st_context.rcq_base_address_hi =
5531 U64_HI(fp->rx_comp_mapping);
5532 context->ustorm_st_context.rcq_base_address_lo =
5533 U64_LO(fp->rx_comp_mapping);
5534 context->ustorm_st_context.flags =
5535 USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT;
5536 context->ustorm_st_context.mc_alignment_size = 64;
5537 context->ustorm_st_context.num_rss = bp->num_queues;
5539 context->cstorm_st_context.sb_index_number =
5540 HC_INDEX_C_ETH_TX_CQ_CONS;
5541 context->cstorm_st_context.status_block_id = i;
5543 context->xstorm_ag_context.cdu_reserved =
5544 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5545 CDU_REGION_NUMBER_XCM_AG,
5546 ETH_CONNECTION_TYPE);
5547 context->ustorm_ag_context.cdu_usage =
5548 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5549 CDU_REGION_NUMBER_UCM_AG,
5550 ETH_CONNECTION_TYPE);
5554 static void bnx2x_init_ind_table(struct bnx2x *bp)
5556 int port = bp->port;
5562 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5563 REG_WR8(bp, TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
5564 i % bp->num_queues);
5566 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5569 static void bnx2x_set_client_config(struct bnx2x *bp)
5572 int mode = bp->rx_mode;
5574 int i, port = bp->port;
5575 struct tstorm_eth_client_config tstorm_client = {0};
5577 tstorm_client.mtu = bp->dev->mtu;
5578 tstorm_client.statistics_counter_id = 0;
5579 tstorm_client.config_flags =
5580 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
5582 if (mode && bp->vlgrp) {
5583 tstorm_client.config_flags |=
5584 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
5585 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5588 if (mode != BNX2X_RX_MODE_PROMISC)
5589 tstorm_client.drop_flags =
5590 TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR;
5592 for_each_queue(bp, i) {
5593 REG_WR(bp, BAR_TSTRORM_INTMEM +
5594 TSTORM_CLIENT_CONFIG_OFFSET(port, i),
5595 ((u32 *)&tstorm_client)[0]);
5596 REG_WR(bp, BAR_TSTRORM_INTMEM +
5597 TSTORM_CLIENT_CONFIG_OFFSET(port, i) + 4,
5598 ((u32 *)&tstorm_client)[1]);
5601 /* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
5602 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
5605 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5607 int mode = bp->rx_mode;
5608 int port = bp->port;
5609 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5612 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
5615 case BNX2X_RX_MODE_NONE: /* no Rx */
5616 tstorm_mac_filter.ucast_drop_all = 1;
5617 tstorm_mac_filter.mcast_drop_all = 1;
5618 tstorm_mac_filter.bcast_drop_all = 1;
5620 case BNX2X_RX_MODE_NORMAL:
5621 tstorm_mac_filter.bcast_accept_all = 1;
5623 case BNX2X_RX_MODE_ALLMULTI:
5624 tstorm_mac_filter.mcast_accept_all = 1;
5625 tstorm_mac_filter.bcast_accept_all = 1;
5627 case BNX2X_RX_MODE_PROMISC:
5628 tstorm_mac_filter.ucast_accept_all = 1;
5629 tstorm_mac_filter.mcast_accept_all = 1;
5630 tstorm_mac_filter.bcast_accept_all = 1;
5633 BNX2X_ERR("bad rx mode (%d)\n", mode);
5636 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5637 REG_WR(bp, BAR_TSTRORM_INTMEM +
5638 TSTORM_MAC_FILTER_CONFIG_OFFSET(port) + i * 4,
5639 ((u32 *)&tstorm_mac_filter)[i]);
5641 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5642 ((u32 *)&tstorm_mac_filter)[i]); */
5645 if (mode != BNX2X_RX_MODE_NONE)
5646 bnx2x_set_client_config(bp);
5649 static void bnx2x_init_internal(struct bnx2x *bp)
5651 int port = bp->port;
5652 struct tstorm_eth_function_common_config tstorm_config = {0};
5653 struct stats_indication_flags stats_flags = {0};
5656 tstorm_config.config_flags = MULTI_FLAGS;
5657 tstorm_config.rss_result_mask = MULTI_MASK;
5660 REG_WR(bp, BAR_TSTRORM_INTMEM +
5661 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port),
5662 (*(u32 *)&tstorm_config));
5664 /* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
5665 (*(u32 *)&tstorm_config)); */
5667 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5668 bnx2x_set_storm_rx_mode(bp);
5670 stats_flags.collect_eth = cpu_to_le32(1);
5672 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
5673 ((u32 *)&stats_flags)[0]);
5674 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
5675 ((u32 *)&stats_flags)[1]);
5677 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
5678 ((u32 *)&stats_flags)[0]);
5679 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
5680 ((u32 *)&stats_flags)[1]);
5682 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
5683 ((u32 *)&stats_flags)[0]);
5684 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
5685 ((u32 *)&stats_flags)[1]);
5687 /* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
5688 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
5691 static void bnx2x_nic_init(struct bnx2x *bp)
5695 for_each_queue(bp, i) {
5696 struct bnx2x_fastpath *fp = &bp->fp[i];
5698 fp->state = BNX2X_FP_STATE_CLOSED;
5699 DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n",
5700 bp, fp->status_blk, i);
5702 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, i);
5705 bnx2x_init_def_sb(bp, bp->def_status_blk,
5706 bp->def_status_blk_mapping, 0x10);
5707 bnx2x_update_coalesce(bp);
5708 bnx2x_init_rx_rings(bp);
5709 bnx2x_init_tx_ring(bp);
5710 bnx2x_init_sp_ring(bp);
5711 bnx2x_init_context(bp);
5712 bnx2x_init_internal(bp);
5713 bnx2x_init_stats(bp);
5714 bnx2x_init_ind_table(bp);
5715 bnx2x_enable_int(bp);
5719 /* end of nic init */
5722 * gzip service functions
5725 static int bnx2x_gunzip_init(struct bnx2x *bp)
5727 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5728 &bp->gunzip_mapping);
5729 if (bp->gunzip_buf == NULL)
5732 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5733 if (bp->strm == NULL)
5736 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5738 if (bp->strm->workspace == NULL)
5748 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5749 bp->gunzip_mapping);
5750 bp->gunzip_buf = NULL;
5753 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5754 " uncompression\n", bp->dev->name);
5758 static void bnx2x_gunzip_end(struct bnx2x *bp)
5760 kfree(bp->strm->workspace);
5765 if (bp->gunzip_buf) {
5766 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5767 bp->gunzip_mapping);
5768 bp->gunzip_buf = NULL;
5772 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5776 /* check gzip header */
5777 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5784 if (zbuf[3] & FNAME)
5785 while ((zbuf[n++] != 0) && (n < len));
5787 bp->strm->next_in = zbuf + n;
5788 bp->strm->avail_in = len - n;
5789 bp->strm->next_out = bp->gunzip_buf;
5790 bp->strm->avail_out = FW_BUF_SIZE;
5792 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5796 rc = zlib_inflate(bp->strm, Z_FINISH);
5797 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5798 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5799 bp->dev->name, bp->strm->msg);
5801 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5802 if (bp->gunzip_outlen & 0x3)
5803 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5804 " gunzip_outlen (%d) not aligned\n",
5805 bp->dev->name, bp->gunzip_outlen);
5806 bp->gunzip_outlen >>= 2;
5808 zlib_inflateEnd(bp->strm);
5810 if (rc == Z_STREAM_END)
5816 /* nic load/unload */
5819 * general service functions
5822 /* send a NIG loopback debug packet */
5823 static void bnx2x_lb_pckt(struct bnx2x *bp)
5829 /* Ethernet source and destination addresses */
5831 wb_write[0] = 0x55555555;
5832 wb_write[1] = 0x55555555;
5833 wb_write[2] = 0x20; /* SOP */
5834 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5836 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x55555555);
5837 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5839 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x20);
5842 /* NON-IP protocol */
5844 wb_write[0] = 0x09000000;
5845 wb_write[1] = 0x55555555;
5846 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5847 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5849 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x09000000);
5850 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5851 /* EOP, eop_bvalid = 0 */
5852 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x10);
5856 /* some of the internal memories
5857 * are not directly readable from the driver
5858 * to test them we send debug packets
5860 static int bnx2x_int_mem_test(struct bnx2x *bp)
5866 switch (CHIP_REV(bp)) {
5878 DP(NETIF_MSG_HW, "start part1\n");
5880 /* Disable inputs of parser neighbor blocks */
5881 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5882 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5883 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5884 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5886 /* Write 0 to parser credits for CFC search request */
5887 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5889 /* send Ethernet packet */
5892 /* TODO do i reset NIG statistic? */
5893 /* Wait until NIG register shows 1 packet of size 0x10 */
5894 count = 1000 * factor;
5896 #ifdef BNX2X_DMAE_RD
5897 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5898 val = *bnx2x_sp(bp, wb_data[0]);
5900 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5901 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5910 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5914 /* Wait until PRS register shows 1 packet */
5915 count = 1000 * factor;
5917 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5926 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5930 /* Reset and init BRB, PRS */
5931 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
5933 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3);
5935 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5936 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5938 DP(NETIF_MSG_HW, "part2\n");
5940 /* Disable inputs of parser neighbor blocks */
5941 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5942 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5943 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5944 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5946 /* Write 0 to parser credits for CFC search request */
5947 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5949 /* send 10 Ethernet packets */
5950 for (i = 0; i < 10; i++)
5953 /* Wait until NIG register shows 10 + 1
5954 packets of size 11*0x10 = 0xb0 */
5955 count = 1000 * factor;
5957 #ifdef BNX2X_DMAE_RD
5958 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5959 val = *bnx2x_sp(bp, wb_data[0]);
5961 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5962 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5971 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5975 /* Wait until PRS register shows 2 packets */
5976 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5978 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5980 /* Write 1 to parser credits for CFC search request */
5981 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5983 /* Wait until PRS register shows 3 packets */
5984 msleep(10 * factor);
5985 /* Wait until NIG register shows 1 packet of size 0x10 */
5986 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5988 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5990 /* clear NIG EOP FIFO */
5991 for (i = 0; i < 11; i++)
5992 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5993 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5995 BNX2X_ERR("clear of NIG failed\n");
5999 /* Reset and init BRB, PRS, NIG */
6000 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6002 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6004 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
6005 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
6008 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6011 /* Enable inputs of parser neighbor blocks */
6012 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6013 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6014 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6015 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
6017 DP(NETIF_MSG_HW, "done\n");
6022 static void enable_blocks_attention(struct bnx2x *bp)
6024 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6025 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6026 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6027 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6028 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6029 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6030 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6031 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6032 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6033 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6034 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6035 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6036 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6037 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6038 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6039 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6040 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6041 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6042 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6043 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6044 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6045 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6046 REG_WR(bp, PXP2_REG_PXP2_INT_MASK, 0x480000);
6047 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6048 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6049 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6050 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6051 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6052 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6053 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6054 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6055 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
6058 static int bnx2x_function_init(struct bnx2x *bp, int mode)
6060 int func = bp->port;
6061 int port = func ? PORT1 : PORT0;
6067 DP(BNX2X_MSG_MCP, "function is %d mode is %x\n", func, mode);
6068 if ((func != 0) && (func != 1)) {
6069 BNX2X_ERR("BAD function number (%d)\n", func);
6073 bnx2x_gunzip_init(bp);
6075 if (mode & 0x1) { /* init common */
6076 DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n",
6078 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6080 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6082 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
6084 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6086 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6088 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
6089 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
6093 if (CHIP_REV(bp) == CHIP_REV_Ax) {
6094 /* enable HW interrupt from PXP on USDM
6095 overflow bit 16 on INT_MASK_0 */
6096 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6100 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6101 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6102 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6103 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6104 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6105 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
6107 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6108 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6109 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6110 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6111 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6116 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6119 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 5);
6121 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6122 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6123 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6126 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
6128 /* let the HW do it's magic ... */
6131 (can be moved up if we want to use the DMAE) */
6132 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6134 BNX2X_ERR("PXP2 CFG failed\n");
6138 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6140 BNX2X_ERR("PXP2 RD_INIT failed\n");
6144 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6145 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6147 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6149 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
6150 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
6151 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
6152 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
6154 #ifdef BNX2X_DMAE_RD
6155 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6156 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6157 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6158 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6160 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER);
6161 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 4);
6162 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 8);
6163 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER);
6164 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 4);
6165 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 8);
6166 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER);
6167 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 4);
6168 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 8);
6169 REG_RD(bp, USEM_REG_PASSIVE_BUFFER);
6170 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 4);
6171 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
6173 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
6174 /* soft reset pulse */
6175 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6176 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6179 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
6181 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
6182 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_BITS);
6183 if (CHIP_REV(bp) == CHIP_REV_Ax) {
6184 /* enable hw interrupt from doorbell Q */
6185 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6188 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
6190 if (CHIP_REV_IS_SLOW(bp)) {
6191 /* fix for emulation and FPGA for no pause */
6192 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
6193 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
6194 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
6195 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
6198 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
6200 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
6201 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
6202 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
6203 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
6205 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6206 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6207 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6208 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6210 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
6211 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
6212 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
6213 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
6216 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6218 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6221 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
6222 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
6223 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
6225 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6226 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6227 REG_WR(bp, i, 0xc0cac01a);
6228 /* TODO: replace with something meaningful */
6230 /* SRCH COMMON comes here */
6231 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6233 if (sizeof(union cdu_context) != 1024) {
6234 /* we currently assume that a context is 1024 bytes */
6235 printk(KERN_ALERT PFX "please adjust the size of"
6236 " cdu_context(%ld)\n",
6237 (long)sizeof(union cdu_context));
6239 val = (4 << 24) + (0 << 12) + 1024;
6240 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6241 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
6243 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
6244 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6246 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
6247 bnx2x_init_block(bp, MISC_AEU_COMMON_START,
6248 MISC_AEU_COMMON_END);
6249 /* RXPCS COMMON comes here */
6250 /* EMAC0 COMMON comes here */
6251 /* EMAC1 COMMON comes here */
6252 /* DBU COMMON comes here */
6253 /* DBG COMMON comes here */
6254 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
6256 if (CHIP_REV_IS_SLOW(bp))
6259 /* finish CFC init */
6260 val = REG_RD(bp, CFC_REG_LL_INIT_DONE);
6262 BNX2X_ERR("CFC LL_INIT failed\n");
6266 val = REG_RD(bp, CFC_REG_AC_INIT_DONE);
6268 BNX2X_ERR("CFC AC_INIT failed\n");
6272 val = REG_RD(bp, CFC_REG_CAM_INIT_DONE);
6274 BNX2X_ERR("CFC CAM_INIT failed\n");
6278 REG_WR(bp, CFC_REG_DEBUG0, 0);
6280 /* read NIG statistic
6281 to see if this is our first up since powerup */
6282 #ifdef BNX2X_DMAE_RD
6283 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6284 val = *bnx2x_sp(bp, wb_data[0]);
6286 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
6287 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
6289 /* do internal memory self test */
6290 if ((val == 0) && bnx2x_int_mem_test(bp)) {
6291 BNX2X_ERR("internal mem selftest failed\n");
6295 /* clear PXP2 attentions */
6296 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR);
6298 enable_blocks_attention(bp);
6299 /* enable_blocks_parity(bp); */
6301 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6302 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6303 /* Fan failure is indicated by SPIO 5 */
6304 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6305 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6307 /* set to active low mode */
6308 val = REG_RD(bp, MISC_REG_SPIO_INT);
6309 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6310 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6311 REG_WR(bp, MISC_REG_SPIO_INT, val);
6313 /* enable interrupt to signal the IGU */
6314 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6315 val |= (1 << MISC_REGISTERS_SPIO_5);
6316 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6323 } /* end of common init */
6327 /* the phys address is shifted right 12 bits and has an added
6328 1=valid bit added to the 53rd bit
6329 then since this is a wide register(TM)
6330 we split it into two 32 bit writes
6332 #define RQ_ONCHIP_AT_PORT_SIZE 384
6333 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6334 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6335 #define PXP_ONE_ILT(x) ((x << 10) | x)
6337 DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func);
6339 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + func*4, 0);
6341 /* Port PXP comes here */
6342 /* Port PXP2 comes here */
6347 i = func * RQ_ONCHIP_AT_PORT_SIZE;
6349 wb_write[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context));
6350 wb_write[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context));
6351 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6353 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8,
6354 ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)));
6355 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8 + 4,
6356 ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)));
6358 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, PXP_ONE_ILT(i));
6364 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6365 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6366 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6367 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6372 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6373 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6374 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6375 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6380 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6381 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6382 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6383 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6386 /* Port TCM comes here */
6387 /* Port UCM comes here */
6388 /* Port CCM comes here */
6389 bnx2x_init_block(bp, func ? XCM_PORT1_START : XCM_PORT0_START,
6390 func ? XCM_PORT1_END : XCM_PORT0_END);
6396 for (i = 0; i < 32; i++) {
6397 REG_WR(bp, QM_REG_BASEADDR + (func*32 + i)*4, 1024 * 4 * i);
6399 REG_WR_DMAE(bp, QM_REG_PTRTBL + (func*32 + i)*8, wb_write, 2);
6401 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8, 0);
6402 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8 + 4, 0);
6405 REG_WR(bp, QM_REG_CONNNUM_0 + func*4, 1024/16 - 1);
6407 /* Port QM comes here */
6410 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6411 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6413 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
6414 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
6416 /* Port DQ comes here */
6417 /* Port BRB1 comes here */
6418 bnx2x_init_block(bp, func ? PRS_PORT1_START : PRS_PORT0_START,
6419 func ? PRS_PORT1_END : PRS_PORT0_END);
6420 /* Port TSDM comes here */
6421 /* Port CSDM comes here */
6422 /* Port USDM comes here */
6423 /* Port XSDM comes here */
6424 bnx2x_init_block(bp, func ? TSEM_PORT1_START : TSEM_PORT0_START,
6425 func ? TSEM_PORT1_END : TSEM_PORT0_END);
6426 bnx2x_init_block(bp, func ? USEM_PORT1_START : USEM_PORT0_START,
6427 func ? USEM_PORT1_END : USEM_PORT0_END);
6428 bnx2x_init_block(bp, func ? CSEM_PORT1_START : CSEM_PORT0_START,
6429 func ? CSEM_PORT1_END : CSEM_PORT0_END);
6430 bnx2x_init_block(bp, func ? XSEM_PORT1_START : XSEM_PORT0_START,
6431 func ? XSEM_PORT1_END : XSEM_PORT0_END);
6432 /* Port UPB comes here */
6433 /* Port XSDM comes here */
6434 bnx2x_init_block(bp, func ? PBF_PORT1_START : PBF_PORT0_START,
6435 func ? PBF_PORT1_END : PBF_PORT0_END);
6437 /* configure PBF to work without PAUSE mtu 9000 */
6438 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + func*4, 0);
6440 /* update threshold */
6441 REG_WR(bp, PBF_REG_P0_ARB_THRSH + func*4, (9040/16));
6442 /* update init credit */
6443 REG_WR(bp, PBF_REG_P0_INIT_CRD + func*4, (9040/16) + 553 - 22);
6446 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 1);
6448 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 0);
6451 /* tell the searcher where the T2 table is */
6452 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6454 wb_write[0] = U64_LO(bp->t2_mapping);
6455 wb_write[1] = U64_HI(bp->t2_mapping);
6456 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6457 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6458 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6459 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6461 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6462 /* Port SRCH comes here */
6464 /* Port CDU comes here */
6465 /* Port CFC comes here */
6466 bnx2x_init_block(bp, func ? HC_PORT1_START : HC_PORT0_START,
6467 func ? HC_PORT1_END : HC_PORT0_END);
6468 bnx2x_init_block(bp, func ? MISC_AEU_PORT1_START :
6469 MISC_AEU_PORT0_START,
6470 func ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
6471 /* Port PXPCS comes here */
6472 /* Port EMAC0 comes here */
6473 /* Port EMAC1 comes here */
6474 /* Port DBU comes here */
6475 /* Port DBG comes here */
6476 bnx2x_init_block(bp, func ? NIG_PORT1_START : NIG_PORT0_START,
6477 func ? NIG_PORT1_END : NIG_PORT0_END);
6478 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + func*4, 1);
6479 /* Port MCP comes here */
6480 /* Port DMAE comes here */
6482 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6483 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6484 /* add SPIO 5 to group 0 */
6485 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6486 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6487 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6494 bnx2x_link_reset(bp);
6496 /* Reset PCIE errors for debug */
6497 REG_WR(bp, 0x2114, 0xffffffff);
6498 REG_WR(bp, 0x2120, 0xffffffff);
6499 REG_WR(bp, 0x2814, 0xffffffff);
6501 /* !!! move to init_values.h */
6502 REG_WR(bp, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6503 REG_WR(bp, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6504 REG_WR(bp, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6505 REG_WR(bp, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6507 REG_WR(bp, DBG_REG_PCI_REQ_CREDIT, 0x1);
6508 REG_WR(bp, TM_REG_PCIARB_CRDCNT_VAL, 0x1);
6509 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
6510 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x0);
6512 bnx2x_gunzip_end(bp);
6517 bp->fw_drv_pulse_wr_seq =
6518 (SHMEM_RD(bp, func_mb[port].drv_pulse_mb) &
6519 DRV_PULSE_SEQ_MASK);
6520 bp->fw_mb = SHMEM_RD(bp, func_mb[port].fw_mb_param);
6521 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n",
6522 bp->fw_drv_pulse_wr_seq, bp->fw_mb);
6530 /* send the MCP a request, block until there is a reply */
6531 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6533 int port = bp->port;
6534 u32 seq = ++bp->fw_seq;
6537 SHMEM_WR(bp, func_mb[port].drv_mb_header, (command | seq));
6538 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6540 /* let the FW do it's magic ... */
6541 msleep(100); /* TBD */
6543 if (CHIP_REV_IS_SLOW(bp))
6546 rc = SHMEM_RD(bp, func_mb[port].fw_mb_header);
6547 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
6549 /* is this a reply to our command? */
6550 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6551 rc &= FW_MSG_CODE_MASK;
6555 BNX2X_ERR("FW failed to respond!\n");
6563 static void bnx2x_free_mem(struct bnx2x *bp)
6566 #define BNX2X_PCI_FREE(x, y, size) \
6569 pci_free_consistent(bp->pdev, size, x, y); \
6575 #define BNX2X_FREE(x) \
6586 for_each_queue(bp, i) {
6589 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6590 bnx2x_fp(bp, i, status_blk_mapping),
6591 sizeof(struct host_status_block) +
6592 sizeof(struct eth_tx_db_data));
6594 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
6595 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6596 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6597 bnx2x_fp(bp, i, tx_desc_mapping),
6598 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6600 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6601 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6602 bnx2x_fp(bp, i, rx_desc_mapping),
6603 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6605 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6606 bnx2x_fp(bp, i, rx_comp_mapping),
6607 sizeof(struct eth_fast_path_rx_cqe) *
6613 /* end of fastpath */
6615 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6616 (sizeof(struct host_def_status_block)));
6618 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6619 (sizeof(struct bnx2x_slowpath)));
6622 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6623 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6624 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6625 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6627 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, PAGE_SIZE);
6629 #undef BNX2X_PCI_FREE
6633 static int bnx2x_alloc_mem(struct bnx2x *bp)
6636 #define BNX2X_PCI_ALLOC(x, y, size) \
6638 x = pci_alloc_consistent(bp->pdev, size, y); \
6640 goto alloc_mem_err; \
6641 memset(x, 0, size); \
6644 #define BNX2X_ALLOC(x, size) \
6646 x = vmalloc(size); \
6648 goto alloc_mem_err; \
6649 memset(x, 0, size); \
6655 BNX2X_ALLOC(bp->fp, sizeof(struct bnx2x_fastpath) * bp->num_queues);
6657 for_each_queue(bp, i) {
6658 bnx2x_fp(bp, i, bp) = bp;
6661 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6662 &bnx2x_fp(bp, i, status_blk_mapping),
6663 sizeof(struct host_status_block) +
6664 sizeof(struct eth_tx_db_data));
6666 bnx2x_fp(bp, i, hw_tx_prods) =
6667 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6669 bnx2x_fp(bp, i, tx_prods_mapping) =
6670 bnx2x_fp(bp, i, status_blk_mapping) +
6671 sizeof(struct host_status_block);
6673 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
6674 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6675 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6676 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6677 &bnx2x_fp(bp, i, tx_desc_mapping),
6678 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6680 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6681 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6682 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6683 &bnx2x_fp(bp, i, rx_desc_mapping),
6684 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6686 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6687 &bnx2x_fp(bp, i, rx_comp_mapping),
6688 sizeof(struct eth_fast_path_rx_cqe) *
6692 /* end of fastpath */
6694 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6695 sizeof(struct host_def_status_block));
6697 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6698 sizeof(struct bnx2x_slowpath));
6701 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6704 for (i = 0; i < 64*1024; i += 64) {
6705 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6706 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6709 /* allocate searcher T2 table
6710 we allocate 1/4 of alloc num for T2
6711 (which is not entered into the ILT) */
6712 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6715 for (i = 0; i < 16*1024; i += 64)
6716 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6718 /* now fixup the last line in the block to point to the next block */
6719 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6721 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6722 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6724 /* QM queues (128*MAX_CONN) */
6725 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6728 /* Slow path ring */
6729 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6737 #undef BNX2X_PCI_ALLOC
6741 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6745 for_each_queue(bp, i) {
6746 struct bnx2x_fastpath *fp = &bp->fp[i];
6748 u16 bd_cons = fp->tx_bd_cons;
6749 u16 sw_prod = fp->tx_pkt_prod;
6750 u16 sw_cons = fp->tx_pkt_cons;
6752 BUG_TRAP(fp->tx_buf_ring != NULL);
6754 while (sw_cons != sw_prod) {
6755 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6761 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6765 for_each_queue(bp, j) {
6766 struct bnx2x_fastpath *fp = &bp->fp[j];
6768 BUG_TRAP(fp->rx_buf_ring != NULL);
6770 for (i = 0; i < NUM_RX_BD; i++) {
6771 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6772 struct sk_buff *skb = rx_buf->skb;
6777 pci_unmap_single(bp->pdev,
6778 pci_unmap_addr(rx_buf, mapping),
6779 bp->rx_buf_use_size,
6780 PCI_DMA_FROMDEVICE);
6788 static void bnx2x_free_skbs(struct bnx2x *bp)
6790 bnx2x_free_tx_skbs(bp);
6791 bnx2x_free_rx_skbs(bp);
6794 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6798 free_irq(bp->msix_table[0].vector, bp->dev);
6799 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6800 bp->msix_table[0].vector);
6802 for_each_queue(bp, i) {
6803 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6804 "state(%x)\n", i, bp->msix_table[i + 1].vector,
6805 bnx2x_fp(bp, i, state));
6807 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) {
6809 free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
6810 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_CLOSED;
6813 DP(NETIF_MSG_IFDOWN, "irq not freed\n");
6819 static void bnx2x_free_irq(struct bnx2x *bp)
6822 if (bp->flags & USING_MSIX_FLAG) {
6824 bnx2x_free_msix_irqs(bp);
6825 pci_disable_msix(bp->pdev);
6827 bp->flags &= ~USING_MSIX_FLAG;
6830 free_irq(bp->pdev->irq, bp->dev);
6833 static int bnx2x_enable_msix(struct bnx2x *bp)
6838 bp->msix_table[0].entry = 0;
6839 for_each_queue(bp, i)
6840 bp->msix_table[i + 1].entry = i + 1;
6842 if (pci_enable_msix(bp->pdev, &bp->msix_table[0],
6843 bp->num_queues + 1)){
6844 BNX2X_ERR("failed to enable msix\n");
6849 bp->flags |= USING_MSIX_FLAG;
6856 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6861 DP(NETIF_MSG_IFUP, "about to request sp irq\n");
6863 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6864 bp->dev->name, bp->dev);
6867 BNX2X_ERR("request sp irq failed\n");
6871 for_each_queue(bp, i) {
6872 rc = request_irq(bp->msix_table[i + 1].vector,
6873 bnx2x_msix_fp_int, 0,
6874 bp->dev->name, &bp->fp[i]);
6877 BNX2X_ERR("request fp #%d irq failed\n", i);
6878 bnx2x_free_msix_irqs(bp);
6882 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6890 static int bnx2x_req_irq(struct bnx2x *bp)
6893 int rc = request_irq(bp->pdev->irq, bnx2x_interrupt,
6894 IRQF_SHARED, bp->dev->name, bp->dev);
6896 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6903 * Init service functions
6906 static void bnx2x_set_mac_addr(struct bnx2x *bp)
6908 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6911 * unicasts 0-31:port0 32-63:port1
6912 * multicast 64-127:port0 128-191:port1
6914 config->hdr.length_6b = 2;
6915 config->hdr.offset = bp->port ? 31 : 0;
6916 config->hdr.reserved0 = 0;
6917 config->hdr.reserved1 = 0;
6920 config->config_table[0].cam_entry.msb_mac_addr =
6921 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6922 config->config_table[0].cam_entry.middle_mac_addr =
6923 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6924 config->config_table[0].cam_entry.lsb_mac_addr =
6925 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6926 config->config_table[0].cam_entry.flags = cpu_to_le16(bp->port);
6927 config->config_table[0].target_table_entry.flags = 0;
6928 config->config_table[0].target_table_entry.client_id = 0;
6929 config->config_table[0].target_table_entry.vlan_id = 0;
6931 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6932 config->config_table[0].cam_entry.msb_mac_addr,
6933 config->config_table[0].cam_entry.middle_mac_addr,
6934 config->config_table[0].cam_entry.lsb_mac_addr);
6937 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6938 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6939 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6940 config->config_table[1].cam_entry.flags = cpu_to_le16(bp->port);
6941 config->config_table[1].target_table_entry.flags =
6942 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6943 config->config_table[1].target_table_entry.client_id = 0;
6944 config->config_table[1].target_table_entry.vlan_id = 0;
6946 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6947 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6948 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6951 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6952 int *state_p, int poll)
6954 /* can take a while if any port is running */
6957 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6958 poll ? "polling" : "waiting", state, idx);
6965 bnx2x_rx_int(bp->fp, 10);
6966 /* If index is different from 0
6967 * The reply for some commands will
6968 * be on the none default queue
6971 bnx2x_rx_int(&bp->fp[idx], 10);
6974 mb(); /* state is changed by bnx2x_sp_event()*/
6976 if (*state_p == state)
6985 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6986 poll ? "polling" : "waiting", state, idx);
6991 static int bnx2x_setup_leading(struct bnx2x *bp)
6994 /* reset IGU state */
6995 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6998 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7000 return bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7004 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7007 /* reset IGU state */
7008 bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7010 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
7011 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
7013 /* Wait for completion */
7014 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7015 &(bp->fp[index].state), 1);
7020 static int bnx2x_poll(struct napi_struct *napi, int budget);
7021 static void bnx2x_set_rx_mode(struct net_device *dev);
7023 static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
7028 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7030 /* Send LOAD_REQUEST command to MCP.
7031 Returns the type of LOAD command: if it is the
7032 first port to be initialized common blocks should be
7033 initialized, otherwise - not.
7036 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7037 if (rc == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7038 return -EBUSY; /* other port in diagnostic mode */
7041 rc = FW_MSG_CODE_DRV_LOAD_COMMON;
7044 /* if we can't use msix we only need one fp,
7045 * so try to enable msix with the requested number of fp's
7046 * and fallback to inta with one fp
7052 if ((use_multi > 1) && (use_multi <= 16))
7053 /* user requested number */
7054 bp->num_queues = use_multi;
7055 else if (use_multi == 1)
7056 bp->num_queues = num_online_cpus();
7060 if (bnx2x_enable_msix(bp)) {
7061 /* failed to enable msix */
7064 BNX2X_ERR("Multi requested but failed"
7065 " to enable MSI-X\n");
7070 DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
7072 if (bnx2x_alloc_mem(bp))
7076 if (bp->flags & USING_MSIX_FLAG) {
7077 if (bnx2x_req_msix_irqs(bp)) {
7078 pci_disable_msix(bp->pdev);
7083 if (bnx2x_req_irq(bp)) {
7084 BNX2X_ERR("IRQ request failed, aborting\n");
7090 for_each_queue(bp, i)
7091 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7096 if (bnx2x_function_init(bp, (rc == FW_MSG_CODE_DRV_LOAD_COMMON))) {
7097 BNX2X_ERR("HW init failed, aborting\n");
7102 atomic_set(&bp->intr_sem, 0);
7105 /* Setup NIC internals and enable interrupts */
7108 /* Send LOAD_DONE command to MCP */
7110 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7111 DP(NETIF_MSG_IFUP, "rc = 0x%x\n", rc);
7113 BNX2X_ERR("MCP response failure, unloading\n");
7118 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7120 /* Enable Rx interrupt handling before sending the ramrod
7121 as it's completed on Rx FP queue */
7122 for_each_queue(bp, i)
7123 napi_enable(&bnx2x_fp(bp, i, napi));
7125 if (bnx2x_setup_leading(bp))
7128 for_each_nondefault_queue(bp, i)
7129 if (bnx2x_setup_multi(bp, i))
7132 bnx2x_set_mac_addr(bp);
7136 /* Start fast path */
7137 if (req_irq) { /* IRQ is only requested from bnx2x_open */
7138 netif_start_queue(bp->dev);
7139 if (bp->flags & USING_MSIX_FLAG)
7140 printk(KERN_INFO PFX "%s: using MSI-X\n",
7143 /* Otherwise Tx queue should be only reenabled */
7144 } else if (netif_running(bp->dev)) {
7145 netif_wake_queue(bp->dev);
7146 bnx2x_set_rx_mode(bp->dev);
7149 /* start the timer */
7150 mod_timer(&bp->timer, jiffies + bp->current_interval);
7155 for_each_queue(bp, i)
7156 napi_disable(&bnx2x_fp(bp, i, napi));
7159 bnx2x_disable_int_sync(bp);
7161 bnx2x_free_skbs(bp);
7167 /* TBD we really need to reset the chip
7168 if we want to recover from this */
7172 static void bnx2x_netif_stop(struct bnx2x *bp)
7176 bp->rx_mode = BNX2X_RX_MODE_NONE;
7177 bnx2x_set_storm_rx_mode(bp);
7179 bnx2x_disable_int_sync(bp);
7180 bnx2x_link_reset(bp);
7182 for_each_queue(bp, i)
7183 napi_disable(&bnx2x_fp(bp, i, napi));
7185 if (netif_running(bp->dev)) {
7186 netif_tx_disable(bp->dev);
7187 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7191 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7193 int port = bp->port;
7199 DP(NETIF_MSG_IFDOWN, "reset called with code %x\n", reset_code);
7201 /* Do not rcv packets to BRB */
7202 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7203 /* Do not direct rcv packets that are not for MCP to the BRB */
7204 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7205 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7207 /* Configure IGU and AEU */
7208 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
7209 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7211 /* TODO: Close Doorbell port? */
7218 base = port * RQ_ONCHIP_AT_PORT_SIZE;
7219 for (i = base; i < base + RQ_ONCHIP_AT_PORT_SIZE; i++) {
7221 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
7223 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT, 0);
7224 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + 4, 0);
7228 if (reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7230 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7232 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7237 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7242 /* halt the connection */
7243 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
7244 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
7247 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7248 &(bp->fp[index].state), 1);
7249 if (rc) /* timeout */
7252 /* delete cfc entry */
7253 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7255 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7256 &(bp->fp[index].state), 1);
7261 static void bnx2x_stop_leading(struct bnx2x *bp)
7263 u16 dsb_sp_prod_idx;
7264 /* if the other port is handling traffic,
7265 this can take a lot of time */
7270 /* Send HALT ramrod */
7271 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7272 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, 0, 0);
7274 if (bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7275 &(bp->fp[0].state), 1))
7278 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7280 /* Send CFC_DELETE ramrod */
7281 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7283 /* Wait for completion to arrive on default status block
7284 we are going to reset the chip anyway
7285 so there is not much to do if this times out
7287 while ((dsb_sp_prod_idx == *bp->dsb_sp_prod) && timeout) {
7292 DP(NETIF_MSG_IFDOWN, "timeout polling for completion "
7293 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7294 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7296 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7297 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7301 static int bnx2x_nic_unload(struct bnx2x *bp, int fre_irq)
7307 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7309 /* Calling flush_scheduled_work() may deadlock because
7310 * linkwatch_event() may be on the workqueue and it will try to get
7311 * the rtnl_lock which we are holding.
7314 while (bp->in_reset_task)
7317 /* Delete the timer: do it before disabling interrupts, as it
7318 may be still STAT_QUERY ramrod pending after stopping the timer */
7319 del_timer_sync(&bp->timer);
7321 /* Wait until stat ramrod returns and all SP tasks complete */
7322 while (bp->stat_pending && (bp->spq_left != MAX_SPQ_PENDING))
7325 /* Stop fast path, disable MAC, disable interrupts, disable napi */
7326 bnx2x_netif_stop(bp);
7328 if (bp->flags & NO_WOL_FLAG)
7329 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7331 u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
7332 u8 *mac_addr = bp->dev->dev_addr;
7333 u32 val = (EMAC_MODE_MPKT | EMAC_MODE_MPKT_RCVD |
7334 EMAC_MODE_ACPI_RCVD);
7336 EMAC_WR(EMAC_REG_EMAC_MODE, val);
7338 val = (mac_addr[0] << 8) | mac_addr[1];
7339 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
7341 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7342 (mac_addr[4] << 8) | mac_addr[5];
7343 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
7345 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7347 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7349 for_each_nondefault_queue(bp, i)
7350 if (bnx2x_stop_multi(bp, i))
7354 bnx2x_stop_leading(bp);
7358 rc = bnx2x_fw_command(bp, reset_code);
7360 rc = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7366 /* Reset the chip */
7367 bnx2x_reset_chip(bp, rc);
7369 /* Report UNLOAD_DONE to MCP */
7371 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7373 /* Free SKBs and driver internals */
7374 bnx2x_free_skbs(bp);
7377 bp->state = BNX2X_STATE_CLOSED;
7380 netif_carrier_off(bp->dev);
7385 /* end of nic load/unload */
7390 * Init service functions
7393 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
7395 int port = bp->port;
7400 switch (switch_cfg) {
7402 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7404 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
7405 switch (ext_phy_type) {
7406 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7407 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7410 bp->supported |= (SUPPORTED_10baseT_Half |
7411 SUPPORTED_10baseT_Full |
7412 SUPPORTED_100baseT_Half |
7413 SUPPORTED_100baseT_Full |
7414 SUPPORTED_1000baseT_Full |
7415 SUPPORTED_2500baseX_Full |
7416 SUPPORTED_TP | SUPPORTED_FIBRE |
7419 SUPPORTED_Asym_Pause);
7422 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7423 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7426 bp->phy_flags |= PHY_SGMII_FLAG;
7428 bp->supported |= (SUPPORTED_10baseT_Half |
7429 SUPPORTED_10baseT_Full |
7430 SUPPORTED_100baseT_Half |
7431 SUPPORTED_100baseT_Full |
7432 SUPPORTED_1000baseT_Full |
7433 SUPPORTED_TP | SUPPORTED_FIBRE |
7436 SUPPORTED_Asym_Pause);
7440 BNX2X_ERR("NVRAM config error. "
7441 "BAD SerDes ext_phy_config 0x%x\n",
7442 bp->ext_phy_config);
7446 bp->phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7448 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
7451 case SWITCH_CFG_10G:
7452 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7454 bp->phy_flags |= PHY_XGXS_FLAG;
7456 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7457 switch (ext_phy_type) {
7458 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7459 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7462 bp->supported |= (SUPPORTED_10baseT_Half |
7463 SUPPORTED_10baseT_Full |
7464 SUPPORTED_100baseT_Half |
7465 SUPPORTED_100baseT_Full |
7466 SUPPORTED_1000baseT_Full |
7467 SUPPORTED_2500baseX_Full |
7468 SUPPORTED_10000baseT_Full |
7469 SUPPORTED_TP | SUPPORTED_FIBRE |
7472 SUPPORTED_Asym_Pause);
7475 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7476 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7479 bp->supported |= (SUPPORTED_10000baseT_Full |
7482 SUPPORTED_Asym_Pause);
7485 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7486 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7489 bp->supported |= (SUPPORTED_10000baseT_Full |
7490 SUPPORTED_1000baseT_Full |
7494 SUPPORTED_Asym_Pause);
7497 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7498 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7501 bp->supported |= (SUPPORTED_10000baseT_Full |
7502 SUPPORTED_1000baseT_Full |
7506 SUPPORTED_Asym_Pause);
7509 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7510 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7513 bp->supported |= (SUPPORTED_10000baseT_Full |
7517 SUPPORTED_Asym_Pause);
7521 BNX2X_ERR("NVRAM config error. "
7522 "BAD XGXS ext_phy_config 0x%x\n",
7523 bp->ext_phy_config);
7527 bp->phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7529 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
7531 bp->ser_lane = ((bp->lane_config &
7532 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
7533 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
7534 bp->rx_lane_swap = ((bp->lane_config &
7535 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
7536 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
7537 bp->tx_lane_swap = ((bp->lane_config &
7538 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
7539 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
7540 BNX2X_DEV_INFO("rx_lane_swap 0x%x tx_lane_swap 0x%x\n",
7541 bp->rx_lane_swap, bp->tx_lane_swap);
7545 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7550 /* mask what we support according to speed_cap_mask */
7551 if (!(bp->speed_cap_mask &
7552 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7553 bp->supported &= ~SUPPORTED_10baseT_Half;
7555 if (!(bp->speed_cap_mask &
7556 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7557 bp->supported &= ~SUPPORTED_10baseT_Full;
7559 if (!(bp->speed_cap_mask &
7560 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7561 bp->supported &= ~SUPPORTED_100baseT_Half;
7563 if (!(bp->speed_cap_mask &
7564 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7565 bp->supported &= ~SUPPORTED_100baseT_Full;
7567 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7568 bp->supported &= ~(SUPPORTED_1000baseT_Half |
7569 SUPPORTED_1000baseT_Full);
7571 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7572 bp->supported &= ~SUPPORTED_2500baseX_Full;
7574 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7575 bp->supported &= ~SUPPORTED_10000baseT_Full;
7577 BNX2X_DEV_INFO("supported 0x%x\n", bp->supported);
7580 static void bnx2x_link_settings_requested(struct bnx2x *bp)
7582 bp->req_autoneg = 0;
7583 bp->req_duplex = DUPLEX_FULL;
7585 switch (bp->link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7586 case PORT_FEATURE_LINK_SPEED_AUTO:
7587 if (bp->supported & SUPPORTED_Autoneg) {
7588 bp->req_autoneg |= AUTONEG_SPEED;
7589 bp->req_line_speed = 0;
7590 bp->advertising = bp->supported;
7592 if (XGXS_EXT_PHY_TYPE(bp) ==
7593 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) {
7594 /* force 10G, no AN */
7595 bp->req_line_speed = SPEED_10000;
7597 (ADVERTISED_10000baseT_Full |
7601 BNX2X_ERR("NVRAM config error. "
7602 "Invalid link_config 0x%x"
7603 " Autoneg not supported\n",
7609 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7610 if (bp->supported & SUPPORTED_10baseT_Full) {
7611 bp->req_line_speed = SPEED_10;
7612 bp->advertising = (ADVERTISED_10baseT_Full |
7615 BNX2X_ERR("NVRAM config error. "
7616 "Invalid link_config 0x%x"
7617 " speed_cap_mask 0x%x\n",
7618 bp->link_config, bp->speed_cap_mask);
7623 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7624 if (bp->supported & SUPPORTED_10baseT_Half) {
7625 bp->req_line_speed = SPEED_10;
7626 bp->req_duplex = DUPLEX_HALF;
7627 bp->advertising = (ADVERTISED_10baseT_Half |
7630 BNX2X_ERR("NVRAM config error. "
7631 "Invalid link_config 0x%x"
7632 " speed_cap_mask 0x%x\n",
7633 bp->link_config, bp->speed_cap_mask);
7638 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7639 if (bp->supported & SUPPORTED_100baseT_Full) {
7640 bp->req_line_speed = SPEED_100;
7641 bp->advertising = (ADVERTISED_100baseT_Full |
7644 BNX2X_ERR("NVRAM config error. "
7645 "Invalid link_config 0x%x"
7646 " speed_cap_mask 0x%x\n",
7647 bp->link_config, bp->speed_cap_mask);
7652 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7653 if (bp->supported & SUPPORTED_100baseT_Half) {
7654 bp->req_line_speed = SPEED_100;
7655 bp->req_duplex = DUPLEX_HALF;
7656 bp->advertising = (ADVERTISED_100baseT_Half |
7659 BNX2X_ERR("NVRAM config error. "
7660 "Invalid link_config 0x%x"
7661 " speed_cap_mask 0x%x\n",
7662 bp->link_config, bp->speed_cap_mask);
7667 case PORT_FEATURE_LINK_SPEED_1G:
7668 if (bp->supported & SUPPORTED_1000baseT_Full) {
7669 bp->req_line_speed = SPEED_1000;
7670 bp->advertising = (ADVERTISED_1000baseT_Full |
7673 BNX2X_ERR("NVRAM config error. "
7674 "Invalid link_config 0x%x"
7675 " speed_cap_mask 0x%x\n",
7676 bp->link_config, bp->speed_cap_mask);
7681 case PORT_FEATURE_LINK_SPEED_2_5G:
7682 if (bp->supported & SUPPORTED_2500baseX_Full) {
7683 bp->req_line_speed = SPEED_2500;
7684 bp->advertising = (ADVERTISED_2500baseX_Full |
7687 BNX2X_ERR("NVRAM config error. "
7688 "Invalid link_config 0x%x"
7689 " speed_cap_mask 0x%x\n",
7690 bp->link_config, bp->speed_cap_mask);
7695 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7696 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7697 case PORT_FEATURE_LINK_SPEED_10G_KR:
7698 if (bp->supported & SUPPORTED_10000baseT_Full) {
7699 bp->req_line_speed = SPEED_10000;
7700 bp->advertising = (ADVERTISED_10000baseT_Full |
7703 BNX2X_ERR("NVRAM config error. "
7704 "Invalid link_config 0x%x"
7705 " speed_cap_mask 0x%x\n",
7706 bp->link_config, bp->speed_cap_mask);
7712 BNX2X_ERR("NVRAM config error. "
7713 "BAD link speed link_config 0x%x\n",
7715 bp->req_autoneg |= AUTONEG_SPEED;
7716 bp->req_line_speed = 0;
7717 bp->advertising = bp->supported;
7720 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d\n",
7721 bp->req_line_speed, bp->req_duplex);
7723 bp->req_flow_ctrl = (bp->link_config &
7724 PORT_FEATURE_FLOW_CONTROL_MASK);
7725 if ((bp->req_flow_ctrl == FLOW_CTRL_AUTO) &&
7726 (bp->supported & SUPPORTED_Autoneg))
7727 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
7729 BNX2X_DEV_INFO("req_autoneg 0x%x req_flow_ctrl 0x%x"
7730 " advertising 0x%x\n",
7731 bp->req_autoneg, bp->req_flow_ctrl, bp->advertising);
7734 static void bnx2x_get_hwinfo(struct bnx2x *bp)
7736 u32 val, val2, val3, val4, id;
7737 int port = bp->port;
7740 bp->shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7741 BNX2X_DEV_INFO("shmem offset is %x\n", bp->shmem_base);
7743 /* Get the chip revision id and number. */
7744 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7745 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7746 id = ((val & 0xffff) << 16);
7747 val = REG_RD(bp, MISC_REG_CHIP_REV);
7748 id |= ((val & 0xf) << 12);
7749 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7750 id |= ((val & 0xff) << 4);
7751 REG_RD(bp, MISC_REG_BOND_ID);
7754 BNX2X_DEV_INFO("chip ID is %x\n", id);
7756 if (!bp->shmem_base || (bp->shmem_base != 0xAF900)) {
7757 BNX2X_DEV_INFO("MCP not active\n");
7762 val = SHMEM_RD(bp, validity_map[port]);
7763 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7764 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7765 BNX2X_ERR("BAD MCP validity signature\n");
7767 bp->fw_seq = (SHMEM_RD(bp, func_mb[port].drv_mb_header) &
7768 DRV_MSG_SEQ_NUMBER_MASK);
7770 bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7771 bp->board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7773 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7775 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7776 bp->ext_phy_config =
7778 dev_info.port_hw_config[port].external_phy_config);
7779 bp->speed_cap_mask =
7781 dev_info.port_hw_config[port].speed_capability_mask);
7784 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7786 BNX2X_DEV_INFO("hw_config (%08x) board (%08x) serdes_config (%08x)\n"
7787 KERN_INFO " lane_config (%08x) ext_phy_config (%08x)\n"
7788 KERN_INFO " speed_cap_mask (%08x) link_config (%08x)"
7790 bp->hw_config, bp->board, bp->serdes_config,
7791 bp->lane_config, bp->ext_phy_config,
7792 bp->speed_cap_mask, bp->link_config, bp->fw_seq);
7794 switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK);
7795 bnx2x_link_settings_supported(bp, switch_cfg);
7797 bp->autoneg = (bp->hw_config & SHARED_HW_CFG_AN_ENABLE_MASK);
7798 /* for now disable cl73 */
7799 bp->autoneg &= ~SHARED_HW_CFG_AN_ENABLE_CL73;
7800 BNX2X_DEV_INFO("autoneg 0x%x\n", bp->autoneg);
7802 bnx2x_link_settings_requested(bp);
7804 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7805 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7806 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7807 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7808 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7809 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7810 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7811 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7813 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7816 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7817 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7818 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7819 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7821 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7822 val, val2, val3, val4);
7826 bp->bc_ver = val = ((SHMEM_RD(bp, dev_info.bc_rev)) >> 8);
7827 BNX2X_DEV_INFO("bc_ver %X\n", val);
7828 if (val < BNX2X_BC_VER) {
7829 /* for now only warn
7830 * later we might need to enforce this */
7831 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7832 " please upgrade BC\n", BNX2X_BC_VER, val);
7838 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7839 bp->flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
7840 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7841 bp->flash_size, bp->flash_size);
7845 set_mac: /* only supposed to happen on emulation/FPGA */
7846 BNX2X_ERR("warning rendom MAC workaround active\n");
7847 random_ether_addr(bp->dev->dev_addr);
7848 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7853 * ethtool service functions
7856 /* All ethtool functions called with rtnl_lock */
7858 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7860 struct bnx2x *bp = netdev_priv(dev);
7862 cmd->supported = bp->supported;
7863 cmd->advertising = bp->advertising;
7865 if (netif_carrier_ok(dev)) {
7866 cmd->speed = bp->line_speed;
7867 cmd->duplex = bp->duplex;
7869 cmd->speed = bp->req_line_speed;
7870 cmd->duplex = bp->req_duplex;
7873 if (bp->phy_flags & PHY_XGXS_FLAG) {
7874 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7876 switch (ext_phy_type) {
7877 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7878 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7879 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7880 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7881 cmd->port = PORT_FIBRE;
7884 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7885 cmd->port = PORT_TP;
7889 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7890 bp->ext_phy_config);
7893 cmd->port = PORT_TP;
7895 cmd->phy_address = bp->phy_addr;
7896 cmd->transceiver = XCVR_INTERNAL;
7898 if (bp->req_autoneg & AUTONEG_SPEED)
7899 cmd->autoneg = AUTONEG_ENABLE;
7901 cmd->autoneg = AUTONEG_DISABLE;
7906 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7907 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7908 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7909 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7910 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7911 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7912 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7917 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7919 struct bnx2x *bp = netdev_priv(dev);
7922 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7923 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7924 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7925 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7926 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7927 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7928 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7930 switch (cmd->port) {
7932 if (!(bp->supported & SUPPORTED_TP)) {
7933 DP(NETIF_MSG_LINK, "TP not supported\n");
7937 if (bp->phy_flags & PHY_XGXS_FLAG) {
7938 bnx2x_link_reset(bp);
7939 bnx2x_link_settings_supported(bp, SWITCH_CFG_1G);
7940 bnx2x_phy_deassert(bp);
7945 if (!(bp->supported & SUPPORTED_FIBRE)) {
7946 DP(NETIF_MSG_LINK, "FIBRE not supported\n");
7950 if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
7951 bnx2x_link_reset(bp);
7952 bnx2x_link_settings_supported(bp, SWITCH_CFG_10G);
7953 bnx2x_phy_deassert(bp);
7958 DP(NETIF_MSG_LINK, "Unknown port type\n");
7962 if (cmd->autoneg == AUTONEG_ENABLE) {
7963 if (!(bp->supported & SUPPORTED_Autoneg)) {
7964 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
7968 /* advertise the requested speed and duplex if supported */
7969 cmd->advertising &= bp->supported;
7971 bp->req_autoneg |= AUTONEG_SPEED;
7972 bp->req_line_speed = 0;
7973 bp->req_duplex = DUPLEX_FULL;
7974 bp->advertising |= (ADVERTISED_Autoneg | cmd->advertising);
7976 } else { /* forced speed */
7977 /* advertise the requested speed and duplex if supported */
7978 switch (cmd->speed) {
7980 if (cmd->duplex == DUPLEX_FULL) {
7981 if (!(bp->supported &
7982 SUPPORTED_10baseT_Full)) {
7984 "10M full not supported\n");
7988 advertising = (ADVERTISED_10baseT_Full |
7991 if (!(bp->supported &
7992 SUPPORTED_10baseT_Half)) {
7994 "10M half not supported\n");
7998 advertising = (ADVERTISED_10baseT_Half |
8004 if (cmd->duplex == DUPLEX_FULL) {
8005 if (!(bp->supported &
8006 SUPPORTED_100baseT_Full)) {
8008 "100M full not supported\n");
8012 advertising = (ADVERTISED_100baseT_Full |
8015 if (!(bp->supported &
8016 SUPPORTED_100baseT_Half)) {
8018 "100M half not supported\n");
8022 advertising = (ADVERTISED_100baseT_Half |
8028 if (cmd->duplex != DUPLEX_FULL) {
8029 DP(NETIF_MSG_LINK, "1G half not supported\n");
8033 if (!(bp->supported & SUPPORTED_1000baseT_Full)) {
8034 DP(NETIF_MSG_LINK, "1G full not supported\n");
8038 advertising = (ADVERTISED_1000baseT_Full |
8043 if (cmd->duplex != DUPLEX_FULL) {
8045 "2.5G half not supported\n");
8049 if (!(bp->supported & SUPPORTED_2500baseX_Full)) {
8051 "2.5G full not supported\n");
8055 advertising = (ADVERTISED_2500baseX_Full |
8060 if (cmd->duplex != DUPLEX_FULL) {
8061 DP(NETIF_MSG_LINK, "10G half not supported\n");
8065 if (!(bp->supported & SUPPORTED_10000baseT_Full)) {
8066 DP(NETIF_MSG_LINK, "10G full not supported\n");
8070 advertising = (ADVERTISED_10000baseT_Full |
8075 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8079 bp->req_autoneg &= ~AUTONEG_SPEED;
8080 bp->req_line_speed = cmd->speed;
8081 bp->req_duplex = cmd->duplex;
8082 bp->advertising = advertising;
8085 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_line_speed %d\n"
8086 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8087 bp->req_autoneg, bp->req_line_speed, bp->req_duplex,
8090 bnx2x_stop_stats(bp);
8091 bnx2x_link_initialize(bp);
8096 static void bnx2x_get_drvinfo(struct net_device *dev,
8097 struct ethtool_drvinfo *info)
8099 struct bnx2x *bp = netdev_priv(dev);
8101 strcpy(info->driver, DRV_MODULE_NAME);
8102 strcpy(info->version, DRV_MODULE_VERSION);
8103 snprintf(info->fw_version, 32, "%d.%d.%d:%d (BC VER %x)",
8104 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
8105 BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_COMPILE_FLAGS,
8107 strcpy(info->bus_info, pci_name(bp->pdev));
8108 info->n_stats = BNX2X_NUM_STATS;
8109 info->testinfo_len = BNX2X_NUM_TESTS;
8110 info->eedump_len = bp->flash_size;
8111 info->regdump_len = 0;
8114 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8116 struct bnx2x *bp = netdev_priv(dev);
8118 if (bp->flags & NO_WOL_FLAG) {
8122 wol->supported = WAKE_MAGIC;
8124 wol->wolopts = WAKE_MAGIC;
8128 memset(&wol->sopass, 0, sizeof(wol->sopass));
8131 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8133 struct bnx2x *bp = netdev_priv(dev);
8135 if (wol->wolopts & ~WAKE_MAGIC)
8138 if (wol->wolopts & WAKE_MAGIC) {
8139 if (bp->flags & NO_WOL_FLAG)
8149 static u32 bnx2x_get_msglevel(struct net_device *dev)
8151 struct bnx2x *bp = netdev_priv(dev);
8153 return bp->msglevel;
8156 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8158 struct bnx2x *bp = netdev_priv(dev);
8160 if (capable(CAP_NET_ADMIN))
8161 bp->msglevel = level;
8164 static int bnx2x_nway_reset(struct net_device *dev)
8166 struct bnx2x *bp = netdev_priv(dev);
8168 if (bp->state != BNX2X_STATE_OPEN) {
8169 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
8173 bnx2x_stop_stats(bp);
8174 bnx2x_link_initialize(bp);
8179 static int bnx2x_get_eeprom_len(struct net_device *dev)
8181 struct bnx2x *bp = netdev_priv(dev);
8183 return bp->flash_size;
8186 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8188 int port = bp->port;
8192 /* adjust timeout for emulation/FPGA */
8193 count = NVRAM_TIMEOUT_COUNT;
8194 if (CHIP_REV_IS_SLOW(bp))
8197 /* request access to nvram interface */
8198 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8199 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8201 for (i = 0; i < count*10; i++) {
8202 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8203 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8209 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8210 DP(NETIF_MSG_NVM, "cannot get access to nvram interface\n");
8217 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8219 int port = bp->port;
8223 /* adjust timeout for emulation/FPGA */
8224 count = NVRAM_TIMEOUT_COUNT;
8225 if (CHIP_REV_IS_SLOW(bp))
8228 /* relinquish nvram interface */
8229 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8230 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8232 for (i = 0; i < count*10; i++) {
8233 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8234 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8240 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8241 DP(NETIF_MSG_NVM, "cannot free access to nvram interface\n");
8248 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8252 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8254 /* enable both bits, even on read */
8255 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8256 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8257 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8260 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8264 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8266 /* disable both bits, even after read */
8267 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8268 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8269 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8272 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8278 /* build the command word */
8279 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8281 /* need to clear DONE bit separately */
8282 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8284 /* address of the NVRAM to read from */
8285 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8286 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8288 /* issue a read command */
8289 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8291 /* adjust timeout for emulation/FPGA */
8292 count = NVRAM_TIMEOUT_COUNT;
8293 if (CHIP_REV_IS_SLOW(bp))
8296 /* wait for completion */
8299 for (i = 0; i < count; i++) {
8301 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8303 if (val & MCPR_NVM_COMMAND_DONE) {
8304 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8305 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8306 /* we read nvram data in cpu order
8307 * but ethtool sees it as an array of bytes
8308 * converting to big-endian will do the work */
8309 val = cpu_to_be32(val);
8319 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8326 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8328 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8333 if (offset + buf_size > bp->flash_size) {
8334 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8335 " buf_size (0x%x) > flash_size (0x%x)\n",
8336 offset, buf_size, bp->flash_size);
8340 /* request access to nvram interface */
8341 rc = bnx2x_acquire_nvram_lock(bp);
8345 /* enable access to nvram interface */
8346 bnx2x_enable_nvram_access(bp);
8348 /* read the first word(s) */
8349 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8350 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8351 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8352 memcpy(ret_buf, &val, 4);
8354 /* advance to the next dword */
8355 offset += sizeof(u32);
8356 ret_buf += sizeof(u32);
8357 buf_size -= sizeof(u32);
8362 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8363 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8364 memcpy(ret_buf, &val, 4);
8367 /* disable access to nvram interface */
8368 bnx2x_disable_nvram_access(bp);
8369 bnx2x_release_nvram_lock(bp);
8374 static int bnx2x_get_eeprom(struct net_device *dev,
8375 struct ethtool_eeprom *eeprom, u8 *eebuf)
8377 struct bnx2x *bp = netdev_priv(dev);
8380 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8381 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8382 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8383 eeprom->len, eeprom->len);
8385 /* parameters already validated in ethtool_get_eeprom */
8387 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8392 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8397 /* build the command word */
8398 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8400 /* need to clear DONE bit separately */
8401 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8403 /* write the data */
8404 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8406 /* address of the NVRAM to write to */
8407 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8408 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8410 /* issue the write command */
8411 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8413 /* adjust timeout for emulation/FPGA */
8414 count = NVRAM_TIMEOUT_COUNT;
8415 if (CHIP_REV_IS_SLOW(bp))
8418 /* wait for completion */
8420 for (i = 0; i < count; i++) {
8422 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8423 if (val & MCPR_NVM_COMMAND_DONE) {
8432 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8434 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8442 if (offset + buf_size > bp->flash_size) {
8443 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8444 " buf_size (0x%x) > flash_size (0x%x)\n",
8445 offset, buf_size, bp->flash_size);
8449 /* request access to nvram interface */
8450 rc = bnx2x_acquire_nvram_lock(bp);
8454 /* enable access to nvram interface */
8455 bnx2x_enable_nvram_access(bp);
8457 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8458 align_offset = (offset & ~0x03);
8459 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8462 val &= ~(0xff << BYTE_OFFSET(offset));
8463 val |= (*data_buf << BYTE_OFFSET(offset));
8465 /* nvram data is returned as an array of bytes
8466 * convert it back to cpu order */
8467 val = be32_to_cpu(val);
8469 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8471 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8475 /* disable access to nvram interface */
8476 bnx2x_disable_nvram_access(bp);
8477 bnx2x_release_nvram_lock(bp);
8482 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8490 if (buf_size == 1) { /* ethtool */
8491 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8494 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8496 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8501 if (offset + buf_size > bp->flash_size) {
8502 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8503 " buf_size (0x%x) > flash_size (0x%x)\n",
8504 offset, buf_size, bp->flash_size);
8508 /* request access to nvram interface */
8509 rc = bnx2x_acquire_nvram_lock(bp);
8513 /* enable access to nvram interface */
8514 bnx2x_enable_nvram_access(bp);
8517 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8518 while ((written_so_far < buf_size) && (rc == 0)) {
8519 if (written_so_far == (buf_size - sizeof(u32)))
8520 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8521 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8522 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8523 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8524 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8526 memcpy(&val, data_buf, 4);
8527 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8529 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8531 /* advance to the next dword */
8532 offset += sizeof(u32);
8533 data_buf += sizeof(u32);
8534 written_so_far += sizeof(u32);
8538 /* disable access to nvram interface */
8539 bnx2x_disable_nvram_access(bp);
8540 bnx2x_release_nvram_lock(bp);
8545 static int bnx2x_set_eeprom(struct net_device *dev,
8546 struct ethtool_eeprom *eeprom, u8 *eebuf)
8548 struct bnx2x *bp = netdev_priv(dev);
8551 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8552 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8553 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8554 eeprom->len, eeprom->len);
8556 /* parameters already validated in ethtool_set_eeprom */
8558 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8563 static int bnx2x_get_coalesce(struct net_device *dev,
8564 struct ethtool_coalesce *coal)
8566 struct bnx2x *bp = netdev_priv(dev);
8568 memset(coal, 0, sizeof(struct ethtool_coalesce));
8570 coal->rx_coalesce_usecs = bp->rx_ticks;
8571 coal->tx_coalesce_usecs = bp->tx_ticks;
8572 coal->stats_block_coalesce_usecs = bp->stats_ticks;
8577 static int bnx2x_set_coalesce(struct net_device *dev,
8578 struct ethtool_coalesce *coal)
8580 struct bnx2x *bp = netdev_priv(dev);
8582 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8583 if (bp->rx_ticks > 3000)
8584 bp->rx_ticks = 3000;
8586 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8587 if (bp->tx_ticks > 0x3000)
8588 bp->tx_ticks = 0x3000;
8590 bp->stats_ticks = coal->stats_block_coalesce_usecs;
8591 if (bp->stats_ticks > 0xffff00)
8592 bp->stats_ticks = 0xffff00;
8593 bp->stats_ticks &= 0xffff00;
8595 if (netif_running(bp->dev))
8596 bnx2x_update_coalesce(bp);
8601 static void bnx2x_get_ringparam(struct net_device *dev,
8602 struct ethtool_ringparam *ering)
8604 struct bnx2x *bp = netdev_priv(dev);
8606 ering->rx_max_pending = MAX_RX_AVAIL;
8607 ering->rx_mini_max_pending = 0;
8608 ering->rx_jumbo_max_pending = 0;
8610 ering->rx_pending = bp->rx_ring_size;
8611 ering->rx_mini_pending = 0;
8612 ering->rx_jumbo_pending = 0;
8614 ering->tx_max_pending = MAX_TX_AVAIL;
8615 ering->tx_pending = bp->tx_ring_size;
8618 static int bnx2x_set_ringparam(struct net_device *dev,
8619 struct ethtool_ringparam *ering)
8621 struct bnx2x *bp = netdev_priv(dev);
8623 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8624 (ering->tx_pending > MAX_TX_AVAIL) ||
8625 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8628 bp->rx_ring_size = ering->rx_pending;
8629 bp->tx_ring_size = ering->tx_pending;
8631 if (netif_running(bp->dev)) {
8632 bnx2x_nic_unload(bp, 0);
8633 bnx2x_nic_load(bp, 0);
8639 static void bnx2x_get_pauseparam(struct net_device *dev,
8640 struct ethtool_pauseparam *epause)
8642 struct bnx2x *bp = netdev_priv(dev);
8645 ((bp->req_autoneg & AUTONEG_FLOW_CTRL) == AUTONEG_FLOW_CTRL);
8646 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) == FLOW_CTRL_RX);
8647 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) == FLOW_CTRL_TX);
8649 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8650 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8651 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8654 static int bnx2x_set_pauseparam(struct net_device *dev,
8655 struct ethtool_pauseparam *epause)
8657 struct bnx2x *bp = netdev_priv(dev);
8659 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8660 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8661 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8663 if (epause->autoneg) {
8664 if (!(bp->supported & SUPPORTED_Autoneg)) {
8665 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
8669 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
8671 bp->req_autoneg &= ~AUTONEG_FLOW_CTRL;
8673 bp->req_flow_ctrl = FLOW_CTRL_AUTO;
8675 if (epause->rx_pause)
8676 bp->req_flow_ctrl |= FLOW_CTRL_RX;
8677 if (epause->tx_pause)
8678 bp->req_flow_ctrl |= FLOW_CTRL_TX;
8680 if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
8681 (bp->req_flow_ctrl == FLOW_CTRL_AUTO))
8682 bp->req_flow_ctrl = FLOW_CTRL_NONE;
8684 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_flow_ctrl 0x%x\n",
8685 bp->req_autoneg, bp->req_flow_ctrl);
8687 bnx2x_stop_stats(bp);
8688 bnx2x_link_initialize(bp);
8693 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8695 struct bnx2x *bp = netdev_priv(dev);
8700 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8702 struct bnx2x *bp = netdev_priv(dev);
8708 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8711 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8713 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8718 char string[ETH_GSTRING_LEN];
8719 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8720 { "MC Errors (online)" }
8723 static int bnx2x_self_test_count(struct net_device *dev)
8725 return BNX2X_NUM_TESTS;
8728 static void bnx2x_self_test(struct net_device *dev,
8729 struct ethtool_test *etest, u64 *buf)
8731 struct bnx2x *bp = netdev_priv(dev);
8734 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8736 if (bp->state != BNX2X_STATE_OPEN) {
8737 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
8741 stats_state = bp->stats_state;
8742 bnx2x_stop_stats(bp);
8744 if (bnx2x_mc_assert(bp) != 0) {
8746 etest->flags |= ETH_TEST_FL_FAILED;
8749 #ifdef BNX2X_EXTRA_DEBUG
8750 bnx2x_panic_dump(bp);
8752 bp->stats_state = stats_state;
8756 char string[ETH_GSTRING_LEN];
8757 } bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
8758 { "rx_bytes"}, /* 0 */
8759 { "rx_error_bytes"}, /* 1 */
8760 { "tx_bytes"}, /* 2 */
8761 { "tx_error_bytes"}, /* 3 */
8762 { "rx_ucast_packets"}, /* 4 */
8763 { "rx_mcast_packets"}, /* 5 */
8764 { "rx_bcast_packets"}, /* 6 */
8765 { "tx_ucast_packets"}, /* 7 */
8766 { "tx_mcast_packets"}, /* 8 */
8767 { "tx_bcast_packets"}, /* 9 */
8768 { "tx_mac_errors"}, /* 10 */
8769 { "tx_carrier_errors"}, /* 11 */
8770 { "rx_crc_errors"}, /* 12 */
8771 { "rx_align_errors"}, /* 13 */
8772 { "tx_single_collisions"}, /* 14 */
8773 { "tx_multi_collisions"}, /* 15 */
8774 { "tx_deferred"}, /* 16 */
8775 { "tx_excess_collisions"}, /* 17 */
8776 { "tx_late_collisions"}, /* 18 */
8777 { "tx_total_collisions"}, /* 19 */
8778 { "rx_fragments"}, /* 20 */
8779 { "rx_jabbers"}, /* 21 */
8780 { "rx_undersize_packets"}, /* 22 */
8781 { "rx_oversize_packets"}, /* 23 */
8782 { "rx_xon_frames"}, /* 24 */
8783 { "rx_xoff_frames"}, /* 25 */
8784 { "tx_xon_frames"}, /* 26 */
8785 { "tx_xoff_frames"}, /* 27 */
8786 { "rx_mac_ctrl_frames"}, /* 28 */
8787 { "rx_filtered_packets"}, /* 29 */
8788 { "rx_discards"}, /* 30 */
8791 #define STATS_OFFSET32(offset_name) \
8792 (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
8794 static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
8795 STATS_OFFSET32(total_bytes_received_hi), /* 0 */
8796 STATS_OFFSET32(stat_IfHCInBadOctets_hi), /* 1 */
8797 STATS_OFFSET32(total_bytes_transmitted_hi), /* 2 */
8798 STATS_OFFSET32(stat_IfHCOutBadOctets_hi), /* 3 */
8799 STATS_OFFSET32(total_unicast_packets_received_hi), /* 4 */
8800 STATS_OFFSET32(total_multicast_packets_received_hi), /* 5 */
8801 STATS_OFFSET32(total_broadcast_packets_received_hi), /* 6 */
8802 STATS_OFFSET32(total_unicast_packets_transmitted_hi), /* 7 */
8803 STATS_OFFSET32(total_multicast_packets_transmitted_hi), /* 8 */
8804 STATS_OFFSET32(total_broadcast_packets_transmitted_hi), /* 9 */
8805 STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */
8806 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors), /* 11 */
8807 STATS_OFFSET32(crc_receive_errors), /* 12 */
8808 STATS_OFFSET32(alignment_errors), /* 13 */
8809 STATS_OFFSET32(single_collision_transmit_frames), /* 14 */
8810 STATS_OFFSET32(multiple_collision_transmit_frames), /* 15 */
8811 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions), /* 16 */
8812 STATS_OFFSET32(excessive_collision_frames), /* 17 */
8813 STATS_OFFSET32(late_collision_frames), /* 18 */
8814 STATS_OFFSET32(number_of_bugs_found_in_stats_spec), /* 19 */
8815 STATS_OFFSET32(runt_packets_received), /* 20 */
8816 STATS_OFFSET32(jabber_packets_received), /* 21 */
8817 STATS_OFFSET32(error_runt_packets_received), /* 22 */
8818 STATS_OFFSET32(error_jabber_packets_received), /* 23 */
8819 STATS_OFFSET32(pause_xon_frames_received), /* 24 */
8820 STATS_OFFSET32(pause_xoff_frames_received), /* 25 */
8821 STATS_OFFSET32(pause_xon_frames_transmitted), /* 26 */
8822 STATS_OFFSET32(pause_xoff_frames_transmitted), /* 27 */
8823 STATS_OFFSET32(control_frames_received), /* 28 */
8824 STATS_OFFSET32(mac_filter_discard), /* 29 */
8825 STATS_OFFSET32(no_buff_discard), /* 30 */
8828 static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
8829 8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
8830 4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
8831 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
8835 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8837 switch (stringset) {
8839 memcpy(buf, bnx2x_stats_str_arr, sizeof(bnx2x_stats_str_arr));
8843 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
8848 static int bnx2x_get_stats_count(struct net_device *dev)
8850 return BNX2X_NUM_STATS;
8853 static void bnx2x_get_ethtool_stats(struct net_device *dev,
8854 struct ethtool_stats *stats, u64 *buf)
8856 struct bnx2x *bp = netdev_priv(dev);
8857 u32 *hw_stats = (u32 *)bnx2x_sp_check(bp, eth_stats);
8860 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8861 if (bnx2x_stats_len_arr[i] == 0) {
8862 /* skip this counter */
8870 if (bnx2x_stats_len_arr[i] == 4) {
8871 /* 4-byte counter */
8872 buf[i] = (u64) *(hw_stats + bnx2x_stats_offset_arr[i]);
8875 /* 8-byte counter */
8876 buf[i] = HILO_U64(*(hw_stats + bnx2x_stats_offset_arr[i]),
8877 *(hw_stats + bnx2x_stats_offset_arr[i] + 1));
8881 static int bnx2x_phys_id(struct net_device *dev, u32 data)
8883 struct bnx2x *bp = netdev_priv(dev);
8889 for (i = 0; i < (data * 2); i++) {
8891 bnx2x_leds_set(bp, SPEED_1000);
8893 bnx2x_leds_unset(bp);
8895 msleep_interruptible(500);
8896 if (signal_pending(current))
8901 bnx2x_leds_set(bp, bp->line_speed);
8906 static struct ethtool_ops bnx2x_ethtool_ops = {
8907 .get_settings = bnx2x_get_settings,
8908 .set_settings = bnx2x_set_settings,
8909 .get_drvinfo = bnx2x_get_drvinfo,
8910 .get_wol = bnx2x_get_wol,
8911 .set_wol = bnx2x_set_wol,
8912 .get_msglevel = bnx2x_get_msglevel,
8913 .set_msglevel = bnx2x_set_msglevel,
8914 .nway_reset = bnx2x_nway_reset,
8915 .get_link = ethtool_op_get_link,
8916 .get_eeprom_len = bnx2x_get_eeprom_len,
8917 .get_eeprom = bnx2x_get_eeprom,
8918 .set_eeprom = bnx2x_set_eeprom,
8919 .get_coalesce = bnx2x_get_coalesce,
8920 .set_coalesce = bnx2x_set_coalesce,
8921 .get_ringparam = bnx2x_get_ringparam,
8922 .set_ringparam = bnx2x_set_ringparam,
8923 .get_pauseparam = bnx2x_get_pauseparam,
8924 .set_pauseparam = bnx2x_set_pauseparam,
8925 .get_rx_csum = bnx2x_get_rx_csum,
8926 .set_rx_csum = bnx2x_set_rx_csum,
8927 .get_tx_csum = ethtool_op_get_tx_csum,
8928 .set_tx_csum = ethtool_op_set_tx_csum,
8929 .get_sg = ethtool_op_get_sg,
8930 .set_sg = ethtool_op_set_sg,
8931 .get_tso = ethtool_op_get_tso,
8932 .set_tso = bnx2x_set_tso,
8933 .self_test_count = bnx2x_self_test_count,
8934 .self_test = bnx2x_self_test,
8935 .get_strings = bnx2x_get_strings,
8936 .phys_id = bnx2x_phys_id,
8937 .get_stats_count = bnx2x_get_stats_count,
8938 .get_ethtool_stats = bnx2x_get_ethtool_stats
8941 /* end of ethtool_ops */
8943 /****************************************************************************
8944 * General service functions
8945 ****************************************************************************/
8947 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
8951 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
8955 pci_write_config_word(bp->pdev,
8956 bp->pm_cap + PCI_PM_CTRL,
8957 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
8958 PCI_PM_CTRL_PME_STATUS));
8960 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
8961 /* delay required during transition out of D3hot */
8966 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
8970 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
8972 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
8975 /* No more memory access after this point until
8976 * device is brought back to D0.
8987 * net_device service functions
8990 /* called with netif_tx_lock from set_multicast */
8991 static void bnx2x_set_rx_mode(struct net_device *dev)
8993 struct bnx2x *bp = netdev_priv(dev);
8994 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8996 DP(NETIF_MSG_IFUP, "called dev->flags = %x\n", dev->flags);
8998 if (dev->flags & IFF_PROMISC)
8999 rx_mode = BNX2X_RX_MODE_PROMISC;
9001 else if ((dev->flags & IFF_ALLMULTI) ||
9002 (dev->mc_count > BNX2X_MAX_MULTICAST))
9003 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9005 else { /* some multicasts */
9007 struct dev_mc_list *mclist;
9008 struct mac_configuration_cmd *config =
9009 bnx2x_sp(bp, mcast_config);
9011 for (i = 0, mclist = dev->mc_list;
9012 mclist && (i < dev->mc_count);
9013 i++, mclist = mclist->next) {
9015 config->config_table[i].cam_entry.msb_mac_addr =
9016 swab16(*(u16 *)&mclist->dmi_addr[0]);
9017 config->config_table[i].cam_entry.middle_mac_addr =
9018 swab16(*(u16 *)&mclist->dmi_addr[2]);
9019 config->config_table[i].cam_entry.lsb_mac_addr =
9020 swab16(*(u16 *)&mclist->dmi_addr[4]);
9021 config->config_table[i].cam_entry.flags =
9022 cpu_to_le16(bp->port);
9023 config->config_table[i].target_table_entry.flags = 0;
9024 config->config_table[i].target_table_entry.
9026 config->config_table[i].target_table_entry.
9030 "setting MCAST[%d] (%04x:%04x:%04x)\n",
9031 i, config->config_table[i].cam_entry.msb_mac_addr,
9032 config->config_table[i].cam_entry.middle_mac_addr,
9033 config->config_table[i].cam_entry.lsb_mac_addr);
9035 old = config->hdr.length_6b;
9037 for (; i < old; i++) {
9038 if (CAM_IS_INVALID(config->config_table[i])) {
9039 i--; /* already invalidated */
9043 CAM_INVALIDATE(config->config_table[i]);
9047 if (CHIP_REV_IS_SLOW(bp))
9048 offset = BNX2X_MAX_EMUL_MULTI*(1 + bp->port);
9050 offset = BNX2X_MAX_MULTICAST*(1 + bp->port);
9052 config->hdr.length_6b = i;
9053 config->hdr.offset = offset;
9054 config->hdr.reserved0 = 0;
9055 config->hdr.reserved1 = 0;
9057 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9058 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9059 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
9062 bp->rx_mode = rx_mode;
9063 bnx2x_set_storm_rx_mode(bp);
9066 static int bnx2x_poll(struct napi_struct *napi, int budget)
9068 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9070 struct bnx2x *bp = fp->bp;
9073 #ifdef BNX2X_STOP_ON_ERROR
9074 if (unlikely(bp->panic))
9078 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9079 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9080 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9082 bnx2x_update_fpsb_idx(fp);
9084 if (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons)
9085 bnx2x_tx_int(fp, budget);
9088 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
9089 work_done = bnx2x_rx_int(fp, budget);
9092 rmb(); /* bnx2x_has_work() reads the status block */
9094 /* must not complete if we consumed full budget */
9095 if ((work_done < budget) && !bnx2x_has_work(fp)) {
9097 #ifdef BNX2X_STOP_ON_ERROR
9100 netif_rx_complete(bp->dev, napi);
9102 bnx2x_ack_sb(bp, fp->index, USTORM_ID,
9103 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9104 bnx2x_ack_sb(bp, fp->index, CSTORM_ID,
9105 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9111 /* Called with netif_tx_lock.
9112 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9113 * netif_wake_queue().
9115 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9117 struct bnx2x *bp = netdev_priv(dev);
9118 struct bnx2x_fastpath *fp;
9119 struct sw_tx_bd *tx_buf;
9120 struct eth_tx_bd *tx_bd;
9121 struct eth_tx_parse_bd *pbd = NULL;
9122 u16 pkt_prod, bd_prod;
9123 int nbd, fp_index = 0;
9126 #ifdef BNX2X_STOP_ON_ERROR
9127 if (unlikely(bp->panic))
9128 return NETDEV_TX_BUSY;
9131 fp_index = smp_processor_id() % (bp->num_queues);
9133 fp = &bp->fp[fp_index];
9134 if (unlikely(bnx2x_tx_avail(bp->fp) <
9135 (skb_shinfo(skb)->nr_frags + 3))) {
9136 bp->slowpath->eth_stats.driver_xoff++,
9137 netif_stop_queue(dev);
9138 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9139 return NETDEV_TX_BUSY;
9143 This is a bit ugly. First we use one BD which we mark as start,
9144 then for TSO or xsum we have a parsing info BD,
9145 and only then we have the rest of the TSO bds.
9146 (don't forget to mark the last one as last,
9147 and to unmap only AFTER you write to the BD ...)
9148 I would like to thank DovH for this mess.
9151 pkt_prod = fp->tx_pkt_prod++;
9152 bd_prod = fp->tx_bd_prod;
9153 bd_prod = TX_BD(bd_prod);
9155 /* get a tx_buff and first bd */
9156 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9157 tx_bd = &fp->tx_desc_ring[bd_prod];
9159 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9160 tx_bd->general_data = (UNICAST_ADDRESS <<
9161 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9162 tx_bd->general_data |= 1; /* header nbd */
9164 /* remember the first bd of the packet */
9165 tx_buf->first_bd = bd_prod;
9167 DP(NETIF_MSG_TX_QUEUED,
9168 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9169 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9171 if (skb->ip_summed == CHECKSUM_PARTIAL) {
9172 struct iphdr *iph = ip_hdr(skb);
9175 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
9177 /* turn on parsing and get a bd */
9178 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9179 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9180 len = ((u8 *)iph - (u8 *)skb->data) / 2;
9182 /* for now NS flag is not used in Linux */
9183 pbd->global_data = (len |
9184 ((skb->protocol == ETH_P_8021Q) <<
9185 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9186 pbd->ip_hlen = ip_hdrlen(skb) / 2;
9187 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
9188 if (iph->protocol == IPPROTO_TCP) {
9189 struct tcphdr *th = tcp_hdr(skb);
9191 tx_bd->bd_flags.as_bitfield |=
9192 ETH_TX_BD_FLAGS_TCP_CSUM;
9193 pbd->tcp_flags = htonl(tcp_flag_word(skb)) & 0xFFFF;
9194 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
9195 pbd->tcp_pseudo_csum = swab16(th->check);
9197 } else if (iph->protocol == IPPROTO_UDP) {
9198 struct udphdr *uh = udp_hdr(skb);
9200 tx_bd->bd_flags.as_bitfield |=
9201 ETH_TX_BD_FLAGS_TCP_CSUM;
9202 pbd->total_hlen += cpu_to_le16(4);
9203 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9204 pbd->cs_offset = 5; /* 10 >> 1 */
9205 pbd->tcp_pseudo_csum = 0;
9206 /* HW bug: we need to subtract 10 bytes before the
9207 * UDP header from the csum
9209 uh->check = (u16) ~csum_fold(csum_sub(uh->check,
9210 csum_partial(((u8 *)(uh)-10), 10, 0)));
9214 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9215 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9216 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9218 tx_bd->vlan = cpu_to_le16(pkt_prod);
9221 mapping = pci_map_single(bp->pdev, skb->data,
9222 skb->len, PCI_DMA_TODEVICE);
9224 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9225 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9226 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9227 tx_bd->nbd = cpu_to_le16(nbd);
9228 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9230 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9231 " nbytes %d flags %x vlan %u\n",
9232 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, tx_bd->nbd,
9233 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield, tx_bd->vlan);
9235 if (skb_shinfo(skb)->gso_size &&
9236 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
9237 int hlen = 2 * le32_to_cpu(pbd->total_hlen);
9239 DP(NETIF_MSG_TX_QUEUED,
9240 "TSO packet len %d hlen %d total len %d tso size %d\n",
9241 skb->len, hlen, skb_headlen(skb),
9242 skb_shinfo(skb)->gso_size);
9244 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9246 if (tx_bd->nbytes > cpu_to_le16(hlen)) {
9247 /* we split the first bd into headers and data bds
9248 * to ease the pain of our fellow micocode engineers
9249 * we use one mapping for both bds
9250 * So far this has only been observed to happen
9251 * in Other Operating Systems(TM)
9254 /* first fix first bd */
9256 tx_bd->nbd = cpu_to_le16(nbd);
9257 tx_bd->nbytes = cpu_to_le16(hlen);
9259 /* we only print this as an error
9260 * because we don't think this will ever happen.
9262 BNX2X_ERR("TSO split header size is %d (%x:%x)"
9263 " nbd %d\n", tx_bd->nbytes, tx_bd->addr_hi,
9264 tx_bd->addr_lo, tx_bd->nbd);
9266 /* now get a new data bd
9267 * (after the pbd) and fill it */
9268 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9269 tx_bd = &fp->tx_desc_ring[bd_prod];
9271 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9272 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping) + hlen);
9273 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb) - hlen);
9274 tx_bd->vlan = cpu_to_le16(pkt_prod);
9275 /* this marks the bd
9276 * as one that has no individual mapping
9277 * the FW ignores this flag in a bd not marked start
9279 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9280 DP(NETIF_MSG_TX_QUEUED,
9281 "TSO split data size is %d (%x:%x)\n",
9282 tx_bd->nbytes, tx_bd->addr_hi, tx_bd->addr_lo);
9286 /* supposed to be unreached
9287 * (and therefore not handled properly...)
9289 BNX2X_ERR("LSO with no PBD\n");
9293 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9294 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9295 pbd->ip_id = swab16(ip_hdr(skb)->id);
9296 pbd->tcp_pseudo_csum =
9297 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9299 0, IPPROTO_TCP, 0));
9300 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9306 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9307 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9309 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9310 tx_bd = &fp->tx_desc_ring[bd_prod];
9312 mapping = pci_map_page(bp->pdev, frag->page,
9314 frag->size, PCI_DMA_TODEVICE);
9316 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9317 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9318 tx_bd->nbytes = cpu_to_le16(frag->size);
9319 tx_bd->vlan = cpu_to_le16(pkt_prod);
9320 tx_bd->bd_flags.as_bitfield = 0;
9321 DP(NETIF_MSG_TX_QUEUED, "frag %d bd @%p"
9322 " addr (%x:%x) nbytes %d flags %x\n",
9323 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9324 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield);
9328 /* now at last mark the bd as the last bd */
9329 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9331 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9332 tx_bd, tx_bd->bd_flags.as_bitfield);
9336 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9338 /* now send a tx doorbell, counting the next bd
9339 * if the packet contains or ends with it
9341 if (TX_BD_POFF(bd_prod) < nbd)
9345 DP(NETIF_MSG_TX_QUEUED,
9346 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9347 " tcp_flags %x xsum %x seq %u hlen %u\n",
9348 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9349 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9350 pbd->tcp_send_seq, pbd->total_hlen);
9352 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod);
9354 fp->hw_tx_prods->bds_prod += cpu_to_le16(nbd);
9355 mb(); /* FW restriction: must not reorder writing nbd and packets */
9356 fp->hw_tx_prods->packets_prod += cpu_to_le32(1);
9357 DOORBELL(bp, fp_index, 0);
9361 fp->tx_bd_prod = bd_prod;
9362 dev->trans_start = jiffies;
9364 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9365 netif_stop_queue(dev);
9366 bp->slowpath->eth_stats.driver_xoff++;
9367 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9368 netif_wake_queue(dev);
9372 return NETDEV_TX_OK;
9375 static struct net_device_stats *bnx2x_get_stats(struct net_device *dev)
9380 /* Called with rtnl_lock */
9381 static int bnx2x_open(struct net_device *dev)
9383 struct bnx2x *bp = netdev_priv(dev);
9385 bnx2x_set_power_state(bp, PCI_D0);
9387 return bnx2x_nic_load(bp, 1);
9390 /* Called with rtnl_lock */
9391 static int bnx2x_close(struct net_device *dev)
9394 struct bnx2x *bp = netdev_priv(dev);
9396 /* Unload the driver, release IRQs */
9397 rc = bnx2x_nic_unload(bp, 1);
9399 BNX2X_ERR("bnx2x_nic_unload failed: %d\n", rc);
9402 bnx2x_set_power_state(bp, PCI_D3hot);
9407 /* Called with rtnl_lock */
9408 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9410 struct sockaddr *addr = p;
9411 struct bnx2x *bp = netdev_priv(dev);
9413 if (!is_valid_ether_addr(addr->sa_data))
9416 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9417 if (netif_running(dev))
9418 bnx2x_set_mac_addr(bp);
9423 /* Called with rtnl_lock */
9424 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9426 struct mii_ioctl_data *data = if_mii(ifr);
9427 struct bnx2x *bp = netdev_priv(dev);
9432 data->phy_id = bp->phy_addr;
9438 spin_lock_bh(&bp->phy_lock);
9439 if (bp->state == BNX2X_STATE_OPEN) {
9440 err = bnx2x_mdio22_read(bp, data->reg_num & 0x1f,
9443 data->val_out = mii_regval;
9447 spin_unlock_bh(&bp->phy_lock);
9452 if (!capable(CAP_NET_ADMIN))
9455 spin_lock_bh(&bp->phy_lock);
9456 if (bp->state == BNX2X_STATE_OPEN) {
9457 err = bnx2x_mdio22_write(bp, data->reg_num & 0x1f,
9462 spin_unlock_bh(&bp->phy_lock);
9473 /* Called with rtnl_lock */
9474 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9476 struct bnx2x *bp = netdev_priv(dev);
9478 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9479 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9482 /* This does not race with packet allocation
9483 * because the actual alloc size is
9484 * only updated as part of load
9488 if (netif_running(dev)) {
9489 bnx2x_nic_unload(bp, 0);
9490 bnx2x_nic_load(bp, 0);
9495 static void bnx2x_tx_timeout(struct net_device *dev)
9497 struct bnx2x *bp = netdev_priv(dev);
9499 #ifdef BNX2X_STOP_ON_ERROR
9503 /* This allows the netif to be shutdown gracefully before resetting */
9504 schedule_work(&bp->reset_task);
9508 /* Called with rtnl_lock */
9509 static void bnx2x_vlan_rx_register(struct net_device *dev,
9510 struct vlan_group *vlgrp)
9512 struct bnx2x *bp = netdev_priv(dev);
9515 if (netif_running(dev))
9516 bnx2x_set_client_config(bp);
9520 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9521 static void poll_bnx2x(struct net_device *dev)
9523 struct bnx2x *bp = netdev_priv(dev);
9525 disable_irq(bp->pdev->irq);
9526 bnx2x_interrupt(bp->pdev->irq, dev);
9527 enable_irq(bp->pdev->irq);
9531 static void bnx2x_reset_task(struct work_struct *work)
9533 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
9535 #ifdef BNX2X_STOP_ON_ERROR
9536 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
9537 " so reset not done to allow debug dump,\n"
9538 KERN_ERR " you will need to reboot when done\n");
9542 if (!netif_running(bp->dev))
9545 bp->in_reset_task = 1;
9547 bnx2x_netif_stop(bp);
9549 bnx2x_nic_unload(bp, 0);
9550 bnx2x_nic_load(bp, 0);
9552 bp->in_reset_task = 0;
9555 static int __devinit bnx2x_init_board(struct pci_dev *pdev,
9556 struct net_device *dev)
9561 SET_NETDEV_DEV(dev, &pdev->dev);
9562 bp = netdev_priv(dev);
9565 bp->port = PCI_FUNC(pdev->devfn);
9567 rc = pci_enable_device(pdev);
9569 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9573 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9574 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9577 goto err_out_disable;
9580 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9581 printk(KERN_ERR PFX "Cannot find second PCI device"
9582 " base address, aborting\n");
9584 goto err_out_disable;
9587 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9589 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9591 goto err_out_disable;
9594 pci_set_master(pdev);
9596 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9597 if (bp->pm_cap == 0) {
9598 printk(KERN_ERR PFX "Cannot find power management"
9599 " capability, aborting\n");
9601 goto err_out_release;
9604 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9605 if (bp->pcie_cap == 0) {
9606 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9609 goto err_out_release;
9612 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9613 bp->flags |= USING_DAC_FLAG;
9614 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9615 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9616 " failed, aborting\n");
9618 goto err_out_release;
9621 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9622 printk(KERN_ERR PFX "System does not support DMA,"
9625 goto err_out_release;
9631 spin_lock_init(&bp->phy_lock);
9633 bp->in_reset_task = 0;
9635 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
9636 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
9638 dev->base_addr = pci_resource_start(pdev, 0);
9640 dev->irq = pdev->irq;
9642 bp->regview = ioremap_nocache(dev->base_addr,
9643 pci_resource_len(pdev, 0));
9645 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
9647 goto err_out_release;
9650 bp->doorbells = ioremap_nocache(pci_resource_start(pdev , 2),
9651 pci_resource_len(pdev, 2));
9652 if (!bp->doorbells) {
9653 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
9658 bnx2x_set_power_state(bp, PCI_D0);
9660 bnx2x_get_hwinfo(bp);
9662 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
9663 printk(KERN_ERR PFX "FPGA detected. MCP disabled,"
9664 " will only init first device\n");
9670 printk(KERN_ERR PFX "MCP disabled, will only"
9671 " init first device\n");
9675 if (onefunc && bp->port) {
9676 printk(KERN_ERR PFX "Second device disabled, exiting\n");
9681 bp->tx_ring_size = MAX_TX_AVAIL;
9682 bp->rx_ring_size = MAX_RX_AVAIL;
9688 bp->tx_quick_cons_trip_int = 0xff;
9689 bp->tx_quick_cons_trip = 0xff;
9690 bp->tx_ticks_int = 50;
9693 bp->rx_quick_cons_trip_int = 0xff;
9694 bp->rx_quick_cons_trip = 0xff;
9695 bp->rx_ticks_int = 25;
9698 bp->stats_ticks = 1000000 & 0xffff00;
9700 bp->timer_interval = HZ;
9701 bp->current_interval = (poll ? poll : HZ);
9703 init_timer(&bp->timer);
9704 bp->timer.expires = jiffies + bp->current_interval;
9705 bp->timer.data = (unsigned long) bp;
9706 bp->timer.function = bnx2x_timer;
9712 iounmap(bp->regview);
9716 if (bp->doorbells) {
9717 iounmap(bp->doorbells);
9718 bp->doorbells = NULL;
9722 pci_release_regions(pdev);
9725 pci_disable_device(pdev);
9726 pci_set_drvdata(pdev, NULL);
9732 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
9734 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9736 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9740 /* return value of 1=2.5GHz 2=5GHz */
9741 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
9743 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9745 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
9749 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9750 const struct pci_device_id *ent)
9752 static int version_printed;
9753 struct net_device *dev = NULL;
9756 int port = PCI_FUNC(pdev->devfn);
9757 DECLARE_MAC_BUF(mac);
9759 if (version_printed++ == 0)
9760 printk(KERN_INFO "%s", version);
9762 /* dev zeroed in init_etherdev */
9763 dev = alloc_etherdev(sizeof(*bp));
9767 netif_carrier_off(dev);
9769 bp = netdev_priv(dev);
9770 bp->msglevel = debug;
9772 if (port && onefunc) {
9773 printk(KERN_ERR PFX "second function disabled. exiting\n");
9778 rc = bnx2x_init_board(pdev, dev);
9784 dev->hard_start_xmit = bnx2x_start_xmit;
9785 dev->watchdog_timeo = TX_TIMEOUT;
9787 dev->get_stats = bnx2x_get_stats;
9788 dev->ethtool_ops = &bnx2x_ethtool_ops;
9789 dev->open = bnx2x_open;
9790 dev->stop = bnx2x_close;
9791 dev->set_multicast_list = bnx2x_set_rx_mode;
9792 dev->set_mac_address = bnx2x_change_mac_addr;
9793 dev->do_ioctl = bnx2x_ioctl;
9794 dev->change_mtu = bnx2x_change_mtu;
9795 dev->tx_timeout = bnx2x_tx_timeout;
9797 dev->vlan_rx_register = bnx2x_vlan_rx_register;
9799 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9800 dev->poll_controller = poll_bnx2x;
9802 dev->features |= NETIF_F_SG;
9803 if (bp->flags & USING_DAC_FLAG)
9804 dev->features |= NETIF_F_HIGHDMA;
9805 dev->features |= NETIF_F_IP_CSUM;
9807 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
9809 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
9811 rc = register_netdev(dev);
9813 dev_err(&pdev->dev, "Cannot register net device\n");
9815 iounmap(bp->regview);
9817 iounmap(bp->doorbells);
9818 pci_release_regions(pdev);
9819 pci_disable_device(pdev);
9820 pci_set_drvdata(pdev, NULL);
9825 pci_set_drvdata(pdev, dev);
9827 bp->name = board_info[ent->driver_data].name;
9828 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
9829 " IRQ %d, ", dev->name, bp->name,
9830 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
9831 ((CHIP_ID(bp) & 0x0ff0) >> 4),
9832 bnx2x_get_pcie_width(bp),
9833 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
9834 dev->base_addr, bp->pdev->irq);
9835 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
9839 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9841 struct net_device *dev = pci_get_drvdata(pdev);
9842 struct bnx2x *bp = netdev_priv(dev);
9844 flush_scheduled_work();
9845 /*tasklet_kill(&bp->sp_task);*/
9846 unregister_netdev(dev);
9849 iounmap(bp->regview);
9852 iounmap(bp->doorbells);
9855 pci_release_regions(pdev);
9856 pci_disable_device(pdev);
9857 pci_set_drvdata(pdev, NULL);
9860 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
9862 struct net_device *dev = pci_get_drvdata(pdev);
9863 struct bnx2x *bp = netdev_priv(dev);
9866 if (!netif_running(dev))
9869 rc = bnx2x_nic_unload(bp, 0);
9873 netif_device_detach(dev);
9874 pci_save_state(pdev);
9876 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
9880 static int bnx2x_resume(struct pci_dev *pdev)
9882 struct net_device *dev = pci_get_drvdata(pdev);
9883 struct bnx2x *bp = netdev_priv(dev);
9886 if (!netif_running(dev))
9889 pci_restore_state(pdev);
9891 bnx2x_set_power_state(bp, PCI_D0);
9892 netif_device_attach(dev);
9894 rc = bnx2x_nic_load(bp, 0);
9901 static struct pci_driver bnx2x_pci_driver = {
9902 .name = DRV_MODULE_NAME,
9903 .id_table = bnx2x_pci_tbl,
9904 .probe = bnx2x_init_one,
9905 .remove = __devexit_p(bnx2x_remove_one),
9906 .suspend = bnx2x_suspend,
9907 .resume = bnx2x_resume,
9910 static int __init bnx2x_init(void)
9912 return pci_register_driver(&bnx2x_pci_driver);
9915 static void __exit bnx2x_cleanup(void)
9917 pci_unregister_driver(&bnx2x_pci_driver);
9920 module_init(bnx2x_init);
9921 module_exit(bnx2x_cleanup);