1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2012 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/prefetch.h>
27 #include "bnx2x_cmn.h"
28 #include "bnx2x_init.h"
34 * bnx2x_move_fp - move content of the fastpath structure.
37 * @from: source FP index
38 * @to: destination FP index
40 * Makes sure the contents of the bp->fp[to].napi is kept
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
43 * source onto the target. Update txdata pointers and related
46 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
57 /* Copy the NAPI object as it has been already initialized */
58 from_fp->napi = to_fp->napi;
60 /* Move bnx2x_fastpath contents */
61 memcpy(to_fp, from_fp, sizeof(*to_fp));
64 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
67 /* move fp_stats contents as well, as their indices match fp ones */
68 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
70 /* Update txdata pointers in fp and move txdata content accordingly:
71 * Each fp consumes 'max_cos' txdata structures, so the index should be
72 * decremented by max_cos x delta.
75 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
78 if (from == FCOE_IDX(bp)) {
79 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
83 memcpy(&bp->bnx2x_txq[old_txdata_index],
84 &bp->bnx2x_txq[new_txdata_index],
85 sizeof(struct bnx2x_fp_txdata));
86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
89 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
91 /* free skb in the packet ring at pos idx
92 * return idx of last bd freed
94 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
95 u16 idx, unsigned int *pkts_compl,
96 unsigned int *bytes_compl)
98 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
99 struct eth_tx_start_bd *tx_start_bd;
100 struct eth_tx_bd *tx_data_bd;
101 struct sk_buff *skb = tx_buf->skb;
102 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
105 /* prefetch skb end pointer to speedup dev_kfree_skb() */
108 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
109 txdata->txq_index, idx, tx_buf, skb);
112 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
113 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
114 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
117 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
118 #ifdef BNX2X_STOP_ON_ERROR
119 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
120 BNX2X_ERR("BAD nbd!\n");
124 new_cons = nbd + tx_buf->first_bd;
126 /* Get the next bd */
127 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
129 /* Skip a parse bd... */
131 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
133 /* ...and the TSO split header bd since they have no mapping */
134 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
136 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
142 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
143 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
144 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
146 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
153 (*bytes_compl) += skb->len;
156 dev_kfree_skb_any(skb);
157 tx_buf->first_bd = 0;
163 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
165 struct netdev_queue *txq;
166 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
167 unsigned int pkts_compl = 0, bytes_compl = 0;
169 #ifdef BNX2X_STOP_ON_ERROR
170 if (unlikely(bp->panic))
174 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
175 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
176 sw_cons = txdata->tx_pkt_cons;
178 while (sw_cons != hw_cons) {
181 pkt_cons = TX_BD(sw_cons);
183 DP(NETIF_MSG_TX_DONE,
184 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
185 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
187 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
188 &pkts_compl, &bytes_compl);
193 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
195 txdata->tx_pkt_cons = sw_cons;
196 txdata->tx_bd_cons = bd_cons;
198 /* Need to make the tx_bd_cons update visible to start_xmit()
199 * before checking for netif_tx_queue_stopped(). Without the
200 * memory barrier, there is a small possibility that
201 * start_xmit() will miss it and cause the queue to be stopped
203 * On the other hand we need an rmb() here to ensure the proper
204 * ordering of bit testing in the following
205 * netif_tx_queue_stopped(txq) call.
209 if (unlikely(netif_tx_queue_stopped(txq))) {
210 /* Taking tx_lock() is needed to prevent reenabling the queue
211 * while it's empty. This could have happen if rx_action() gets
212 * suspended in bnx2x_tx_int() after the condition before
213 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
215 * stops the queue->sees fresh tx_bd_cons->releases the queue->
216 * sends some packets consuming the whole queue again->
220 __netif_tx_lock(txq, smp_processor_id());
222 if ((netif_tx_queue_stopped(txq)) &&
223 (bp->state == BNX2X_STATE_OPEN) &&
224 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
225 netif_tx_wake_queue(txq);
227 __netif_tx_unlock(txq);
232 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
235 u16 last_max = fp->last_max_sge;
237 if (SUB_S16(idx, last_max) > 0)
238 fp->last_max_sge = idx;
241 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
243 struct eth_end_agg_rx_cqe *cqe)
245 struct bnx2x *bp = fp->bp;
246 u16 last_max, last_elem, first_elem;
253 /* First mark all used pages */
254 for (i = 0; i < sge_len; i++)
255 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
256 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
258 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
259 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
261 /* Here we assume that the last SGE index is the biggest */
262 prefetch((void *)(fp->sge_mask));
263 bnx2x_update_last_max_sge(fp,
264 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
266 last_max = RX_SGE(fp->last_max_sge);
267 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
268 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
270 /* If ring is not full */
271 if (last_elem + 1 != first_elem)
274 /* Now update the prod */
275 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
276 if (likely(fp->sge_mask[i]))
279 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
280 delta += BIT_VEC64_ELEM_SZ;
284 fp->rx_sge_prod += delta;
285 /* clear page-end entries */
286 bnx2x_clear_sge_mask_next_elems(fp);
289 DP(NETIF_MSG_RX_STATUS,
290 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
291 fp->last_max_sge, fp->rx_sge_prod);
294 /* Set Toeplitz hash value in the skb using the value from the
295 * CQE (calculated by HW).
297 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
298 const struct eth_fast_path_rx_cqe *cqe,
301 /* Set Toeplitz hash from CQE */
302 if ((bp->dev->features & NETIF_F_RXHASH) &&
303 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
304 enum eth_rss_hash_type htype;
306 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
307 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
308 (htype == TCP_IPV6_HASH_TYPE);
309 return le32_to_cpu(cqe->rss_hash_result);
315 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
317 struct eth_fast_path_rx_cqe *cqe)
319 struct bnx2x *bp = fp->bp;
320 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
321 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
322 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
324 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
325 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
327 /* print error if current state != stop */
328 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
329 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
331 /* Try to map an empty data buffer from the aggregation info */
332 mapping = dma_map_single(&bp->pdev->dev,
333 first_buf->data + NET_SKB_PAD,
334 fp->rx_buf_size, DMA_FROM_DEVICE);
336 * ...if it fails - move the skb from the consumer to the producer
337 * and set the current aggregation state as ERROR to drop it
338 * when TPA_STOP arrives.
341 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
342 /* Move the BD from the consumer to the producer */
343 bnx2x_reuse_rx_data(fp, cons, prod);
344 tpa_info->tpa_state = BNX2X_TPA_ERROR;
348 /* move empty data from pool to prod */
349 prod_rx_buf->data = first_buf->data;
350 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
351 /* point prod_bd to new data */
352 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
353 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
355 /* move partial skb from cons to pool (don't unmap yet) */
356 *first_buf = *cons_rx_buf;
358 /* mark bin state as START */
359 tpa_info->parsing_flags =
360 le16_to_cpu(cqe->pars_flags.flags);
361 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
362 tpa_info->tpa_state = BNX2X_TPA_START;
363 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
364 tpa_info->placement_offset = cqe->placement_offset;
365 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
366 if (fp->mode == TPA_MODE_GRO) {
367 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
368 tpa_info->full_page =
369 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
370 tpa_info->gro_size = gro_size;
373 #ifdef BNX2X_STOP_ON_ERROR
374 fp->tpa_queue_used |= (1 << queue);
375 #ifdef _ASM_GENERIC_INT_L64_H
376 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
378 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
384 /* Timestamp option length allowed for TPA aggregation:
386 * nop nop kind length echo val
388 #define TPA_TSTAMP_OPT_LEN 12
390 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
393 * @parsing_flags: parsing flags from the START CQE
394 * @len_on_bd: total length of the first packet for the
397 * Approximate value of the MSS for this aggregation calculated using
398 * the first packet of it.
400 static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
404 * TPA arrgregation won't have either IP options or TCP options
405 * other than timestamp or IPv6 extension headers.
407 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
409 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
410 PRS_FLAG_OVERETH_IPV6)
411 hdrs_len += sizeof(struct ipv6hdr);
413 hdrs_len += sizeof(struct iphdr);
416 /* Check if there was a TCP timestamp, if there is it's will
417 * always be 12 bytes length: nop nop kind length echo val.
419 * Otherwise FW would close the aggregation.
421 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
422 hdrs_len += TPA_TSTAMP_OPT_LEN;
424 return len_on_bd - hdrs_len;
427 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
428 struct bnx2x_fastpath *fp, u16 index)
430 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
431 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
432 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
435 if (unlikely(page == NULL)) {
436 BNX2X_ERR("Can't alloc sge\n");
440 mapping = dma_map_page(&bp->pdev->dev, page, 0,
441 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
442 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
443 __free_pages(page, PAGES_PER_SGE_SHIFT);
444 BNX2X_ERR("Can't map sge\n");
449 dma_unmap_addr_set(sw_buf, mapping, mapping);
451 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
452 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
457 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
458 struct bnx2x_agg_info *tpa_info,
461 struct eth_end_agg_rx_cqe *cqe,
464 struct sw_rx_page *rx_pg, old_rx_pg;
465 u32 i, frag_len, frag_size;
466 int err, j, frag_id = 0;
467 u16 len_on_bd = tpa_info->len_on_bd;
468 u16 full_page = 0, gro_size = 0;
470 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
472 if (fp->mode == TPA_MODE_GRO) {
473 gro_size = tpa_info->gro_size;
474 full_page = tpa_info->full_page;
477 /* This is needed in order to enable forwarding support */
479 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
480 tpa_info->parsing_flags, len_on_bd);
483 if (fp->mode == TPA_MODE_GRO)
484 skb_shinfo(skb)->gso_type =
485 (GET_FLAG(tpa_info->parsing_flags,
486 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
487 PRS_FLAG_OVERETH_IPV6) ?
488 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
492 #ifdef BNX2X_STOP_ON_ERROR
493 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
494 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
496 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
502 /* Run through the SGL and compose the fragmented skb */
503 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
504 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
506 /* FW gives the indices of the SGE as if the ring is an array
507 (meaning that "next" element will consume 2 indices) */
508 if (fp->mode == TPA_MODE_GRO)
509 frag_len = min_t(u32, frag_size, (u32)full_page);
511 frag_len = min_t(u32, frag_size,
512 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
514 rx_pg = &fp->rx_page_ring[sge_idx];
517 /* If we fail to allocate a substitute page, we simply stop
518 where we are and drop the whole packet */
519 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
521 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
525 /* Unmap the page as we r going to pass it to the stack */
526 dma_unmap_page(&bp->pdev->dev,
527 dma_unmap_addr(&old_rx_pg, mapping),
528 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
529 /* Add one frag and update the appropriate fields in the skb */
530 if (fp->mode == TPA_MODE_LRO)
531 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
535 for (rem = frag_len; rem > 0; rem -= gro_size) {
536 int len = rem > gro_size ? gro_size : rem;
537 skb_fill_page_desc(skb, frag_id++,
538 old_rx_pg.page, offset, len);
540 get_page(old_rx_pg.page);
545 skb->data_len += frag_len;
546 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
547 skb->len += frag_len;
549 frag_size -= frag_len;
555 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
556 struct bnx2x_agg_info *tpa_info,
558 struct eth_end_agg_rx_cqe *cqe,
561 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
562 u8 pad = tpa_info->placement_offset;
563 u16 len = tpa_info->len_on_bd;
564 struct sk_buff *skb = NULL;
565 u8 *new_data, *data = rx_buf->data;
566 u8 old_tpa_state = tpa_info->tpa_state;
568 tpa_info->tpa_state = BNX2X_TPA_STOP;
570 /* If we there was an error during the handling of the TPA_START -
571 * drop this aggregation.
573 if (old_tpa_state == BNX2X_TPA_ERROR)
576 /* Try to allocate the new data */
577 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
579 /* Unmap skb in the pool anyway, as we are going to change
580 pool entry status to BNX2X_TPA_STOP even if new skb allocation
582 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
583 fp->rx_buf_size, DMA_FROM_DEVICE);
584 if (likely(new_data))
585 skb = build_skb(data, 0);
588 #ifdef BNX2X_STOP_ON_ERROR
589 if (pad + len > fp->rx_buf_size) {
590 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
591 pad, len, fp->rx_buf_size);
597 skb_reserve(skb, pad + NET_SKB_PAD);
599 skb->rxhash = tpa_info->rxhash;
600 skb->l4_rxhash = tpa_info->l4_rxhash;
602 skb->protocol = eth_type_trans(skb, bp->dev);
603 skb->ip_summed = CHECKSUM_UNNECESSARY;
605 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
606 skb, cqe, cqe_idx)) {
607 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
608 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
609 napi_gro_receive(&fp->napi, skb);
611 DP(NETIF_MSG_RX_STATUS,
612 "Failed to allocate new pages - dropping packet!\n");
613 dev_kfree_skb_any(skb);
617 /* put new data in bin */
618 rx_buf->data = new_data;
624 /* drop the packet and keep the buffer in the bin */
625 DP(NETIF_MSG_RX_STATUS,
626 "Failed to allocate or map a new skb - dropping packet!\n");
627 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
630 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
631 struct bnx2x_fastpath *fp, u16 index)
634 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
635 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
638 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
639 if (unlikely(data == NULL))
642 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
645 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
647 BNX2X_ERR("Can't map rx data\n");
652 dma_unmap_addr_set(rx_buf, mapping, mapping);
654 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
655 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
661 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
662 struct bnx2x_fastpath *fp,
663 struct bnx2x_eth_q_stats *qstats)
665 /* Do nothing if no IP/L4 csum validation was done */
667 if (cqe->fast_path_cqe.status_flags &
668 (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
669 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
672 /* If both IP/L4 validation were done, check if an error was found. */
674 if (cqe->fast_path_cqe.type_error_flags &
675 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
676 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
677 qstats->hw_csum_err++;
679 skb->ip_summed = CHECKSUM_UNNECESSARY;
682 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
684 struct bnx2x *bp = fp->bp;
685 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
686 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
689 #ifdef BNX2X_STOP_ON_ERROR
690 if (unlikely(bp->panic))
694 /* CQ "next element" is of the size of the regular element,
695 that's why it's ok here */
696 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
697 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
700 bd_cons = fp->rx_bd_cons;
701 bd_prod = fp->rx_bd_prod;
702 bd_prod_fw = bd_prod;
703 sw_comp_cons = fp->rx_comp_cons;
704 sw_comp_prod = fp->rx_comp_prod;
706 /* Memory barrier necessary as speculative reads of the rx
707 * buffer can be ahead of the index in the status block
711 DP(NETIF_MSG_RX_STATUS,
712 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
713 fp->index, hw_comp_cons, sw_comp_cons);
715 while (sw_comp_cons != hw_comp_cons) {
716 struct sw_rx_bd *rx_buf = NULL;
718 union eth_rx_cqe *cqe;
719 struct eth_fast_path_rx_cqe *cqe_fp;
721 enum eth_rx_cqe_type cqe_fp_type;
726 #ifdef BNX2X_STOP_ON_ERROR
727 if (unlikely(bp->panic))
731 comp_ring_cons = RCQ_BD(sw_comp_cons);
732 bd_prod = RX_BD(bd_prod);
733 bd_cons = RX_BD(bd_cons);
735 cqe = &fp->rx_comp_ring[comp_ring_cons];
736 cqe_fp = &cqe->fast_path_cqe;
737 cqe_fp_flags = cqe_fp->type_error_flags;
738 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
740 DP(NETIF_MSG_RX_STATUS,
741 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
742 CQE_TYPE(cqe_fp_flags),
743 cqe_fp_flags, cqe_fp->status_flags,
744 le32_to_cpu(cqe_fp->rss_hash_result),
745 le16_to_cpu(cqe_fp->vlan_tag),
746 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
748 /* is this a slowpath msg? */
749 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
750 bnx2x_sp_event(fp, cqe);
754 rx_buf = &fp->rx_buf_ring[bd_cons];
757 if (!CQE_TYPE_FAST(cqe_fp_type)) {
758 struct bnx2x_agg_info *tpa_info;
759 u16 frag_size, pages;
760 #ifdef BNX2X_STOP_ON_ERROR
762 if (fp->disable_tpa &&
763 (CQE_TYPE_START(cqe_fp_type) ||
764 CQE_TYPE_STOP(cqe_fp_type)))
765 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
766 CQE_TYPE(cqe_fp_type));
769 if (CQE_TYPE_START(cqe_fp_type)) {
770 u16 queue = cqe_fp->queue_index;
771 DP(NETIF_MSG_RX_STATUS,
772 "calling tpa_start on queue %d\n",
775 bnx2x_tpa_start(fp, queue,
782 queue = cqe->end_agg_cqe.queue_index;
783 tpa_info = &fp->tpa_info[queue];
784 DP(NETIF_MSG_RX_STATUS,
785 "calling tpa_stop on queue %d\n",
788 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
791 if (fp->mode == TPA_MODE_GRO)
792 pages = (frag_size + tpa_info->full_page - 1) /
795 pages = SGE_PAGE_ALIGN(frag_size) >>
798 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
799 &cqe->end_agg_cqe, comp_ring_cons);
800 #ifdef BNX2X_STOP_ON_ERROR
805 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
809 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
810 pad = cqe_fp->placement_offset;
811 dma_sync_single_for_cpu(&bp->pdev->dev,
812 dma_unmap_addr(rx_buf, mapping),
813 pad + RX_COPY_THRESH,
816 prefetch(data + pad); /* speedup eth_type_trans() */
817 /* is this an error packet? */
818 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
819 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
820 "ERROR flags %x rx packet %u\n",
821 cqe_fp_flags, sw_comp_cons);
822 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
826 /* Since we don't have a jumbo ring
827 * copy small packets if mtu > 1500
829 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
830 (len <= RX_COPY_THRESH)) {
831 skb = netdev_alloc_skb_ip_align(bp->dev, len);
833 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
834 "ERROR packet dropped because of alloc failure\n");
835 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
838 memcpy(skb->data, data + pad, len);
839 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
841 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
842 dma_unmap_single(&bp->pdev->dev,
843 dma_unmap_addr(rx_buf, mapping),
846 skb = build_skb(data, 0);
847 if (unlikely(!skb)) {
849 bnx2x_fp_qstats(bp, fp)->
850 rx_skb_alloc_failed++;
853 skb_reserve(skb, pad);
855 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
856 "ERROR packet dropped because of alloc failure\n");
857 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
859 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
865 skb->protocol = eth_type_trans(skb, bp->dev);
867 /* Set Toeplitz hash for a none-LRO skb */
868 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
869 skb->l4_rxhash = l4_rxhash;
871 skb_checksum_none_assert(skb);
873 if (bp->dev->features & NETIF_F_RXCSUM)
874 bnx2x_csum_validate(skb, cqe, fp,
875 bnx2x_fp_qstats(bp, fp));
877 skb_record_rx_queue(skb, fp->rx_queue);
879 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
881 __vlan_hwaccel_put_tag(skb,
882 le16_to_cpu(cqe_fp->vlan_tag));
883 napi_gro_receive(&fp->napi, skb);
889 bd_cons = NEXT_RX_IDX(bd_cons);
890 bd_prod = NEXT_RX_IDX(bd_prod);
891 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
894 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
895 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
897 if (rx_pkt == budget)
901 fp->rx_bd_cons = bd_cons;
902 fp->rx_bd_prod = bd_prod_fw;
903 fp->rx_comp_cons = sw_comp_cons;
904 fp->rx_comp_prod = sw_comp_prod;
906 /* Update producers */
907 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
910 fp->rx_pkt += rx_pkt;
916 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
918 struct bnx2x_fastpath *fp = fp_cookie;
919 struct bnx2x *bp = fp->bp;
923 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
924 fp->index, fp->fw_sb_id, fp->igu_sb_id);
925 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
927 #ifdef BNX2X_STOP_ON_ERROR
928 if (unlikely(bp->panic))
932 /* Handle Rx and Tx according to MSI-X vector */
933 prefetch(fp->rx_cons_sb);
935 for_each_cos_in_tx_queue(fp, cos)
936 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
938 prefetch(&fp->sb_running_index[SM_RX_ID]);
939 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
944 /* HW Lock for shared dual port PHYs */
945 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
947 mutex_lock(&bp->port.phy_mutex);
949 if (bp->port.need_hw_lock)
950 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
953 void bnx2x_release_phy_lock(struct bnx2x *bp)
955 if (bp->port.need_hw_lock)
956 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
958 mutex_unlock(&bp->port.phy_mutex);
961 /* calculates MF speed according to current linespeed and MF configuration */
962 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
964 u16 line_speed = bp->link_vars.line_speed;
966 u16 maxCfg = bnx2x_extract_max_cfg(bp,
967 bp->mf_config[BP_VN(bp)]);
969 /* Calculate the current MAX line speed limit for the MF
973 line_speed = (line_speed * maxCfg) / 100;
975 u16 vn_max_rate = maxCfg * 100;
977 if (vn_max_rate < line_speed)
978 line_speed = vn_max_rate;
986 * bnx2x_fill_report_data - fill link report data to report
989 * @data: link state to update
991 * It uses a none-atomic bit operations because is called under the mutex.
993 static void bnx2x_fill_report_data(struct bnx2x *bp,
994 struct bnx2x_link_report_data *data)
996 u16 line_speed = bnx2x_get_mf_speed(bp);
998 memset(data, 0, sizeof(*data));
1000 /* Fill the report data: efective line speed */
1001 data->line_speed = line_speed;
1004 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1005 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1006 &data->link_report_flags);
1009 if (bp->link_vars.duplex == DUPLEX_FULL)
1010 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1012 /* Rx Flow Control is ON */
1013 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1014 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1016 /* Tx Flow Control is ON */
1017 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1018 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1022 * bnx2x_link_report - report link status to OS.
1024 * @bp: driver handle
1026 * Calls the __bnx2x_link_report() under the same locking scheme
1027 * as a link/PHY state managing code to ensure a consistent link
1031 void bnx2x_link_report(struct bnx2x *bp)
1033 bnx2x_acquire_phy_lock(bp);
1034 __bnx2x_link_report(bp);
1035 bnx2x_release_phy_lock(bp);
1039 * __bnx2x_link_report - report link status to OS.
1041 * @bp: driver handle
1043 * None atomic inmlementation.
1044 * Should be called under the phy_lock.
1046 void __bnx2x_link_report(struct bnx2x *bp)
1048 struct bnx2x_link_report_data cur_data;
1051 if (!CHIP_IS_E1(bp))
1052 bnx2x_read_mf_cfg(bp);
1054 /* Read the current link report info */
1055 bnx2x_fill_report_data(bp, &cur_data);
1057 /* Don't report link down or exactly the same link status twice */
1058 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1059 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1060 &bp->last_reported_link.link_report_flags) &&
1061 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1062 &cur_data.link_report_flags)))
1067 /* We are going to report a new link parameters now -
1068 * remember the current data for the next time.
1070 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1072 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1073 &cur_data.link_report_flags)) {
1074 netif_carrier_off(bp->dev);
1075 netdev_err(bp->dev, "NIC Link is Down\n");
1081 netif_carrier_on(bp->dev);
1083 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1084 &cur_data.link_report_flags))
1089 /* Handle the FC at the end so that only these flags would be
1090 * possibly set. This way we may easily check if there is no FC
1093 if (cur_data.link_report_flags) {
1094 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1095 &cur_data.link_report_flags)) {
1096 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1097 &cur_data.link_report_flags))
1098 flow = "ON - receive & transmit";
1100 flow = "ON - receive";
1102 flow = "ON - transmit";
1107 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1108 cur_data.line_speed, duplex, flow);
1112 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1116 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1117 struct eth_rx_sge *sge;
1119 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1121 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1122 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1125 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1126 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1130 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1131 struct bnx2x_fastpath *fp, int last)
1135 for (i = 0; i < last; i++) {
1136 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1137 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1138 u8 *data = first_buf->data;
1141 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1144 if (tpa_info->tpa_state == BNX2X_TPA_START)
1145 dma_unmap_single(&bp->pdev->dev,
1146 dma_unmap_addr(first_buf, mapping),
1147 fp->rx_buf_size, DMA_FROM_DEVICE);
1149 first_buf->data = NULL;
1153 void bnx2x_init_rx_rings(struct bnx2x *bp)
1155 int func = BP_FUNC(bp);
1159 /* Allocate TPA resources */
1160 for_each_rx_queue(bp, j) {
1161 struct bnx2x_fastpath *fp = &bp->fp[j];
1164 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1166 if (!fp->disable_tpa) {
1167 /* Fill the per-aggregtion pool */
1168 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1169 struct bnx2x_agg_info *tpa_info =
1171 struct sw_rx_bd *first_buf =
1172 &tpa_info->first_buf;
1174 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1176 if (!first_buf->data) {
1177 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1179 bnx2x_free_tpa_pool(bp, fp, i);
1180 fp->disable_tpa = 1;
1183 dma_unmap_addr_set(first_buf, mapping, 0);
1184 tpa_info->tpa_state = BNX2X_TPA_STOP;
1187 /* "next page" elements initialization */
1188 bnx2x_set_next_page_sgl(fp);
1190 /* set SGEs bit mask */
1191 bnx2x_init_sge_ring_bit_mask(fp);
1193 /* Allocate SGEs and initialize the ring elements */
1194 for (i = 0, ring_prod = 0;
1195 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1197 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1198 BNX2X_ERR("was only able to allocate %d rx sges\n",
1200 BNX2X_ERR("disabling TPA for queue[%d]\n",
1202 /* Cleanup already allocated elements */
1203 bnx2x_free_rx_sge_range(bp, fp,
1205 bnx2x_free_tpa_pool(bp, fp,
1207 fp->disable_tpa = 1;
1211 ring_prod = NEXT_SGE_IDX(ring_prod);
1214 fp->rx_sge_prod = ring_prod;
1218 for_each_rx_queue(bp, j) {
1219 struct bnx2x_fastpath *fp = &bp->fp[j];
1223 /* Activate BD ring */
1225 * this will generate an interrupt (to the TSTORM)
1226 * must only be done after chip is initialized
1228 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1234 if (CHIP_IS_E1(bp)) {
1235 REG_WR(bp, BAR_USTRORM_INTMEM +
1236 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1237 U64_LO(fp->rx_comp_mapping));
1238 REG_WR(bp, BAR_USTRORM_INTMEM +
1239 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1240 U64_HI(fp->rx_comp_mapping));
1245 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1250 for_each_tx_queue(bp, i) {
1251 struct bnx2x_fastpath *fp = &bp->fp[i];
1252 for_each_cos_in_tx_queue(fp, cos) {
1253 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1254 unsigned pkts_compl = 0, bytes_compl = 0;
1256 u16 sw_prod = txdata->tx_pkt_prod;
1257 u16 sw_cons = txdata->tx_pkt_cons;
1259 while (sw_cons != sw_prod) {
1260 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1261 &pkts_compl, &bytes_compl);
1264 netdev_tx_reset_queue(
1265 netdev_get_tx_queue(bp->dev,
1266 txdata->txq_index));
1271 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1273 struct bnx2x *bp = fp->bp;
1276 /* ring wasn't allocated */
1277 if (fp->rx_buf_ring == NULL)
1280 for (i = 0; i < NUM_RX_BD; i++) {
1281 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1282 u8 *data = rx_buf->data;
1286 dma_unmap_single(&bp->pdev->dev,
1287 dma_unmap_addr(rx_buf, mapping),
1288 fp->rx_buf_size, DMA_FROM_DEVICE);
1290 rx_buf->data = NULL;
1295 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1299 for_each_rx_queue(bp, j) {
1300 struct bnx2x_fastpath *fp = &bp->fp[j];
1302 bnx2x_free_rx_bds(fp);
1304 if (!fp->disable_tpa)
1305 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1309 void bnx2x_free_skbs(struct bnx2x *bp)
1311 bnx2x_free_tx_skbs(bp);
1312 bnx2x_free_rx_skbs(bp);
1315 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1317 /* load old values */
1318 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1320 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1321 /* leave all but MAX value */
1322 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1324 /* set new MAX value */
1325 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1326 & FUNC_MF_CFG_MAX_BW_MASK;
1328 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1333 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1335 * @bp: driver handle
1336 * @nvecs: number of vectors to be released
1338 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1342 if (nvecs == offset)
1344 free_irq(bp->msix_table[offset].vector, bp->dev);
1345 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1346 bp->msix_table[offset].vector);
1349 if (nvecs == offset)
1354 for_each_eth_queue(bp, i) {
1355 if (nvecs == offset)
1357 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1358 i, bp->msix_table[offset].vector);
1360 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1364 void bnx2x_free_irq(struct bnx2x *bp)
1366 if (bp->flags & USING_MSIX_FLAG &&
1367 !(bp->flags & USING_SINGLE_MSIX_FLAG))
1368 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1371 free_irq(bp->dev->irq, bp->dev);
1374 int bnx2x_enable_msix(struct bnx2x *bp)
1376 int msix_vec = 0, i, rc, req_cnt;
1378 bp->msix_table[msix_vec].entry = msix_vec;
1379 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1380 bp->msix_table[0].entry);
1384 bp->msix_table[msix_vec].entry = msix_vec;
1385 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1386 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1389 /* We need separate vectors for ETH queues only (not FCoE) */
1390 for_each_eth_queue(bp, i) {
1391 bp->msix_table[msix_vec].entry = msix_vec;
1392 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1393 msix_vec, msix_vec, i);
1397 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
1399 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1402 * reconfigure number of tx/rx queues according to available
1405 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1406 /* how less vectors we will have? */
1407 int diff = req_cnt - rc;
1409 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1411 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1414 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1418 * decrease number of queues by number of unallocated entries
1420 bp->num_queues -= diff;
1422 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1424 } else if (rc > 0) {
1425 /* Get by with single vector */
1426 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1428 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1433 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1434 bp->flags |= USING_SINGLE_MSIX_FLAG;
1436 } else if (rc < 0) {
1437 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1441 bp->flags |= USING_MSIX_FLAG;
1446 /* fall to INTx if not enough memory */
1448 bp->flags |= DISABLE_MSI_FLAG;
1453 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1455 int i, rc, offset = 0;
1457 rc = request_irq(bp->msix_table[offset++].vector,
1458 bnx2x_msix_sp_int, 0,
1459 bp->dev->name, bp->dev);
1461 BNX2X_ERR("request sp irq failed\n");
1468 for_each_eth_queue(bp, i) {
1469 struct bnx2x_fastpath *fp = &bp->fp[i];
1470 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1473 rc = request_irq(bp->msix_table[offset].vector,
1474 bnx2x_msix_fp_int, 0, fp->name, fp);
1476 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1477 bp->msix_table[offset].vector, rc);
1478 bnx2x_free_msix_irqs(bp, offset);
1485 i = BNX2X_NUM_ETH_QUEUES(bp);
1486 offset = 1 + CNIC_PRESENT;
1487 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1488 bp->msix_table[0].vector,
1489 0, bp->msix_table[offset].vector,
1490 i - 1, bp->msix_table[offset + i - 1].vector);
1495 int bnx2x_enable_msi(struct bnx2x *bp)
1499 rc = pci_enable_msi(bp->pdev);
1501 BNX2X_DEV_INFO("MSI is not attainable\n");
1504 bp->flags |= USING_MSI_FLAG;
1509 static int bnx2x_req_irq(struct bnx2x *bp)
1511 unsigned long flags;
1514 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1517 flags = IRQF_SHARED;
1519 if (bp->flags & USING_MSIX_FLAG)
1520 irq = bp->msix_table[0].vector;
1522 irq = bp->pdev->irq;
1524 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1527 static int bnx2x_setup_irqs(struct bnx2x *bp)
1530 if (bp->flags & USING_MSIX_FLAG &&
1531 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1532 rc = bnx2x_req_msix_irqs(bp);
1537 rc = bnx2x_req_irq(bp);
1539 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1542 if (bp->flags & USING_MSI_FLAG) {
1543 bp->dev->irq = bp->pdev->irq;
1544 netdev_info(bp->dev, "using MSI IRQ %d\n",
1547 if (bp->flags & USING_MSIX_FLAG) {
1548 bp->dev->irq = bp->msix_table[0].vector;
1549 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1557 static void bnx2x_napi_enable(struct bnx2x *bp)
1561 for_each_rx_queue(bp, i)
1562 napi_enable(&bnx2x_fp(bp, i, napi));
1565 static void bnx2x_napi_disable(struct bnx2x *bp)
1569 for_each_rx_queue(bp, i)
1570 napi_disable(&bnx2x_fp(bp, i, napi));
1573 void bnx2x_netif_start(struct bnx2x *bp)
1575 if (netif_running(bp->dev)) {
1576 bnx2x_napi_enable(bp);
1577 bnx2x_int_enable(bp);
1578 if (bp->state == BNX2X_STATE_OPEN)
1579 netif_tx_wake_all_queues(bp->dev);
1583 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1585 bnx2x_int_disable_sync(bp, disable_hw);
1586 bnx2x_napi_disable(bp);
1589 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1591 struct bnx2x *bp = netdev_priv(dev);
1595 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1596 u16 ether_type = ntohs(hdr->h_proto);
1598 /* Skip VLAN tag if present */
1599 if (ether_type == ETH_P_8021Q) {
1600 struct vlan_ethhdr *vhdr =
1601 (struct vlan_ethhdr *)skb->data;
1603 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1606 /* If ethertype is FCoE or FIP - use FCoE ring */
1607 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1608 return bnx2x_fcoe_tx(bp, txq_index);
1611 /* select a non-FCoE queue */
1612 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1616 void bnx2x_set_num_queues(struct bnx2x *bp)
1619 bp->num_queues = bnx2x_calc_num_queues(bp);
1622 /* override in STORAGE SD modes */
1623 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1626 /* Add special queues */
1627 bp->num_queues += NON_ETH_CONTEXT_USE;
1629 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1633 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1635 * @bp: Driver handle
1637 * We currently support for at most 16 Tx queues for each CoS thus we will
1638 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1641 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1642 * index after all ETH L2 indices.
1644 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1645 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1646 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1648 * The proper configuration of skb->queue_mapping is handled by
1649 * bnx2x_select_queue() and __skb_tx_hash().
1651 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1652 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1654 static int bnx2x_set_real_num_queues(struct bnx2x *bp)
1658 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1659 rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE;
1661 /* account for fcoe queue */
1669 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1671 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1674 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1676 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1680 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1686 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1690 for_each_queue(bp, i) {
1691 struct bnx2x_fastpath *fp = &bp->fp[i];
1694 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1697 * Although there are no IP frames expected to arrive to
1698 * this ring we still want to add an
1699 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1702 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1705 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1706 IP_HEADER_ALIGNMENT_PADDING +
1709 BNX2X_FW_RX_ALIGN_END;
1710 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1714 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1717 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1719 /* Prepare the initial contents fo the indirection table if RSS is
1722 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1723 bp->rss_conf_obj.ind_table[i] =
1725 ethtool_rxfh_indir_default(i, num_eth_queues);
1728 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1729 * per-port, so if explicit configuration is needed , do it only
1732 * For 57712 and newer on the other hand it's a per-function
1735 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1738 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1741 struct bnx2x_config_rss_params params = {NULL};
1744 /* Although RSS is meaningless when there is a single HW queue we
1745 * still need it enabled in order to have HW Rx hash generated.
1747 * if (!is_eth_multi(bp))
1748 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1751 params.rss_obj = rss_obj;
1753 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
1755 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
1757 /* RSS configuration */
1758 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
1759 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
1760 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
1761 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
1762 if (rss_obj->udp_rss_v4)
1763 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
1764 if (rss_obj->udp_rss_v6)
1765 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
1768 params.rss_result_mask = MULTI_MASK;
1770 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
1774 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1775 params.rss_key[i] = random32();
1777 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
1780 return bnx2x_config_rss(bp, ¶ms);
1783 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1785 struct bnx2x_func_state_params func_params = {NULL};
1787 /* Prepare parameters for function state transitions */
1788 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1790 func_params.f_obj = &bp->func_obj;
1791 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1793 func_params.params.hw_init.load_phase = load_code;
1795 return bnx2x_func_state_change(bp, &func_params);
1799 * Cleans the object that have internal lists without sending
1800 * ramrods. Should be run when interrutps are disabled.
1802 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1805 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1806 struct bnx2x_mcast_ramrod_params rparam = {NULL};
1807 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
1809 /***************** Cleanup MACs' object first *************************/
1811 /* Wait for completion of requested */
1812 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1813 /* Perform a dry cleanup */
1814 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1816 /* Clean ETH primary MAC */
1817 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1818 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
1821 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1823 /* Cleanup UC list */
1825 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1826 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1829 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1831 /***************** Now clean mcast object *****************************/
1832 rparam.mcast_obj = &bp->mcast_obj;
1833 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1835 /* Add a DEL command... */
1836 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1838 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1841 /* ...and wait until all pending commands are cleared */
1842 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1845 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1850 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1854 #ifndef BNX2X_STOP_ON_ERROR
1855 #define LOAD_ERROR_EXIT(bp, label) \
1857 (bp)->state = BNX2X_STATE_ERROR; \
1861 #define LOAD_ERROR_EXIT(bp, label) \
1863 (bp)->state = BNX2X_STATE_ERROR; \
1869 bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1871 /* build FW version dword */
1872 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1873 (BCM_5710_FW_MINOR_VERSION << 8) +
1874 (BCM_5710_FW_REVISION_VERSION << 16) +
1875 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1877 /* read loaded FW from chip */
1878 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1880 DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1882 if (loaded_fw != my_fw) {
1884 BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1893 * bnx2x_bz_fp - zero content of the fastpath structure.
1895 * @bp: driver handle
1896 * @index: fastpath index to be zeroed
1898 * Makes sure the contents of the bp->fp[index].napi is kept
1901 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1903 struct bnx2x_fastpath *fp = &bp->fp[index];
1904 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
1907 struct napi_struct orig_napi = fp->napi;
1908 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
1909 /* bzero bnx2x_fastpath contents */
1910 if (bp->stats_init) {
1911 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1912 memset(fp, 0, sizeof(*fp));
1914 /* Keep Queue statistics */
1915 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1916 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1918 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1920 if (tmp_eth_q_stats)
1921 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
1922 sizeof(struct bnx2x_eth_q_stats));
1924 tmp_eth_q_stats_old =
1925 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1927 if (tmp_eth_q_stats_old)
1928 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
1929 sizeof(struct bnx2x_eth_q_stats_old));
1931 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1932 memset(fp, 0, sizeof(*fp));
1934 if (tmp_eth_q_stats) {
1935 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
1936 sizeof(struct bnx2x_eth_q_stats));
1937 kfree(tmp_eth_q_stats);
1940 if (tmp_eth_q_stats_old) {
1941 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
1942 sizeof(struct bnx2x_eth_q_stats_old));
1943 kfree(tmp_eth_q_stats_old);
1948 /* Restore the NAPI object as it has been already initialized */
1949 fp->napi = orig_napi;
1950 fp->tpa_info = orig_tpa_info;
1954 fp->max_cos = bp->max_cos;
1956 /* Special queues support only one CoS */
1959 /* Init txdata pointers */
1962 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
1965 for_each_cos_in_tx_queue(fp, cos)
1966 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
1967 BNX2X_NUM_ETH_QUEUES(bp) + index];
1970 * set the tpa flag for each queue. The tpa flag determines the queue
1971 * minimal size so it must be set prior to queue memory allocation
1973 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1974 (bp->flags & GRO_ENABLE_FLAG &&
1975 bnx2x_mtu_allows_gro(bp->dev->mtu)));
1976 if (bp->flags & TPA_ENABLE_FLAG)
1977 fp->mode = TPA_MODE_LRO;
1978 else if (bp->flags & GRO_ENABLE_FLAG)
1979 fp->mode = TPA_MODE_GRO;
1982 /* We don't want TPA on an FCoE L2 ring */
1984 fp->disable_tpa = 1;
1989 /* must be called with rtnl_lock */
1990 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1992 int port = BP_PORT(bp);
1996 #ifdef BNX2X_STOP_ON_ERROR
1997 if (unlikely(bp->panic)) {
1998 BNX2X_ERR("Can't load NIC when there is panic\n");
2003 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2005 /* Set the initial link reported state to link down */
2006 bnx2x_acquire_phy_lock(bp);
2007 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2008 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2009 &bp->last_reported_link.link_report_flags);
2010 bnx2x_release_phy_lock(bp);
2012 /* must be called before memory allocation and HW init */
2013 bnx2x_ilt_set_info(bp);
2016 * Zero fastpath structures preserving invariants like napi, which are
2017 * allocated only once, fp index, max_cos, bp pointer.
2018 * Also set fp->disable_tpa and txdata_ptr.
2020 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2021 for_each_queue(bp, i)
2023 memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size *
2024 sizeof(struct bnx2x_fp_txdata));
2027 /* Set the receive queues buffer size */
2028 bnx2x_set_rx_buf_size(bp);
2030 if (bnx2x_alloc_mem(bp))
2033 /* As long as bnx2x_alloc_mem() may possibly update
2034 * bp->num_queues, bnx2x_set_real_num_queues() should always
2037 rc = bnx2x_set_real_num_queues(bp);
2039 BNX2X_ERR("Unable to set real_num_queues\n");
2040 LOAD_ERROR_EXIT(bp, load_error0);
2043 /* configure multi cos mappings in kernel.
2044 * this configuration may be overriden by a multi class queue discipline
2045 * or by a dcbx negotiation result.
2047 bnx2x_setup_tc(bp->dev, bp->max_cos);
2049 /* Add all NAPI objects */
2050 bnx2x_add_all_napi(bp);
2051 bnx2x_napi_enable(bp);
2053 /* set pf load just before approaching the MCP */
2054 bnx2x_set_pf_load(bp);
2056 /* Send LOAD_REQUEST command to MCP
2057 * Returns the type of LOAD command:
2058 * if it is the first port to be initialized
2059 * common blocks should be initialized, otherwise - not
2061 if (!BP_NOMCP(bp)) {
2064 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2065 DRV_MSG_SEQ_NUMBER_MASK);
2066 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2068 /* Get current FW pulse sequence */
2069 bp->fw_drv_pulse_wr_seq =
2070 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2071 DRV_PULSE_SEQ_MASK);
2072 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2074 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
2076 BNX2X_ERR("MCP response failure, aborting\n");
2078 LOAD_ERROR_EXIT(bp, load_error1);
2080 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2081 BNX2X_ERR("Driver load refused\n");
2082 rc = -EBUSY; /* other port in diagnostic mode */
2083 LOAD_ERROR_EXIT(bp, load_error1);
2085 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2086 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2087 /* abort nic load if version mismatch */
2088 if (!bnx2x_test_firmware_version(bp, true)) {
2090 LOAD_ERROR_EXIT(bp, load_error2);
2095 int path = BP_PATH(bp);
2097 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2098 path, load_count[path][0], load_count[path][1],
2099 load_count[path][2]);
2100 load_count[path][0]++;
2101 load_count[path][1 + port]++;
2102 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2103 path, load_count[path][0], load_count[path][1],
2104 load_count[path][2]);
2105 if (load_count[path][0] == 1)
2106 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
2107 else if (load_count[path][1 + port] == 1)
2108 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2110 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2113 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2114 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2115 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2118 * We need the barrier to ensure the ordering between the
2119 * writing to bp->port.pmf here and reading it from the
2120 * bnx2x_periodic_task().
2126 DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
2128 /* Init Function state controlling object */
2129 bnx2x__init_func_obj(bp);
2132 rc = bnx2x_init_hw(bp, load_code);
2134 BNX2X_ERR("HW init failed, aborting\n");
2135 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2136 LOAD_ERROR_EXIT(bp, load_error2);
2139 /* Connect to IRQs */
2140 rc = bnx2x_setup_irqs(bp);
2142 BNX2X_ERR("IRQs setup failed\n");
2143 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2144 LOAD_ERROR_EXIT(bp, load_error2);
2147 /* Setup NIC internals and enable interrupts */
2148 bnx2x_nic_init(bp, load_code);
2150 /* Init per-function objects */
2151 bnx2x_init_bp_objs(bp);
2153 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2154 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2155 (bp->common.shmem2_base)) {
2156 if (SHMEM2_HAS(bp, dcc_support))
2157 SHMEM2_WR(bp, dcc_support,
2158 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2159 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2160 if (SHMEM2_HAS(bp, afex_driver_support))
2161 SHMEM2_WR(bp, afex_driver_support,
2162 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2165 /* Set AFEX default VLAN tag to an invalid value */
2166 bp->afex_def_vlan_tag = -1;
2168 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2169 rc = bnx2x_func_start(bp);
2171 BNX2X_ERR("Function start failed!\n");
2172 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2173 LOAD_ERROR_EXIT(bp, load_error3);
2176 /* Send LOAD_DONE command to MCP */
2177 if (!BP_NOMCP(bp)) {
2178 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2180 BNX2X_ERR("MCP response failure, aborting\n");
2182 LOAD_ERROR_EXIT(bp, load_error3);
2186 rc = bnx2x_setup_leading(bp);
2188 BNX2X_ERR("Setup leading failed!\n");
2189 LOAD_ERROR_EXIT(bp, load_error3);
2193 /* Enable Timer scan */
2194 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2197 for_each_nondefault_queue(bp, i) {
2198 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2200 BNX2X_ERR("Queue setup failed\n");
2201 LOAD_ERROR_EXIT(bp, load_error4);
2205 rc = bnx2x_init_rss_pf(bp);
2207 BNX2X_ERR("PF RSS init failed\n");
2208 LOAD_ERROR_EXIT(bp, load_error4);
2211 /* Now when Clients are configured we are ready to work */
2212 bp->state = BNX2X_STATE_OPEN;
2214 /* Configure a ucast MAC */
2215 rc = bnx2x_set_eth_mac(bp, true);
2217 BNX2X_ERR("Setting Ethernet MAC failed\n");
2218 LOAD_ERROR_EXIT(bp, load_error4);
2221 if (bp->pending_max) {
2222 bnx2x_update_max_mf_config(bp, bp->pending_max);
2223 bp->pending_max = 0;
2227 bnx2x_initial_phy_init(bp, load_mode);
2229 /* Start fast path */
2231 /* Initialize Rx filter. */
2232 netif_addr_lock_bh(bp->dev);
2233 bnx2x_set_rx_mode(bp->dev);
2234 netif_addr_unlock_bh(bp->dev);
2237 switch (load_mode) {
2239 /* Tx queue should be only reenabled */
2240 netif_tx_wake_all_queues(bp->dev);
2244 netif_tx_start_all_queues(bp->dev);
2245 smp_mb__after_clear_bit();
2249 case LOAD_LOOPBACK_EXT:
2250 bp->state = BNX2X_STATE_DIAG;
2258 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
2260 bnx2x__link_status_update(bp);
2262 /* start the timer */
2263 mod_timer(&bp->timer, jiffies + bp->current_interval);
2266 /* re-read iscsi info */
2267 bnx2x_get_iscsi_info(bp);
2268 bnx2x_setup_cnic_irq_info(bp);
2269 bnx2x_setup_cnic_info(bp);
2270 if (bp->state == BNX2X_STATE_OPEN)
2271 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2274 /* mark driver is loaded in shmem2 */
2275 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2277 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2278 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2279 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2280 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2283 /* Wait for all pending SP commands to complete */
2284 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2285 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2286 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2290 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2291 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2292 bnx2x_dcbx_init(bp, false);
2296 #ifndef BNX2X_STOP_ON_ERROR
2299 /* Disable Timer scan */
2300 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2303 bnx2x_int_disable_sync(bp, 1);
2305 /* Clean queueable objects */
2306 bnx2x_squeeze_objects(bp);
2308 /* Free SKBs, SGEs, TPA pool and driver internals */
2309 bnx2x_free_skbs(bp);
2310 for_each_rx_queue(bp, i)
2311 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2316 if (!BP_NOMCP(bp)) {
2317 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2318 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2323 bnx2x_napi_disable(bp);
2324 /* clear pf_load status, as it was already set */
2325 bnx2x_clear_pf_load(bp);
2330 #endif /* ! BNX2X_STOP_ON_ERROR */
2333 /* must be called with rtnl_lock */
2334 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2337 bool global = false;
2339 /* mark driver is unloaded in shmem2 */
2340 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2342 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2343 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2344 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2347 if ((bp->state == BNX2X_STATE_CLOSED) ||
2348 (bp->state == BNX2X_STATE_ERROR)) {
2349 /* We can get here if the driver has been unloaded
2350 * during parity error recovery and is either waiting for a
2351 * leader to complete or for other functions to unload and
2352 * then ifdown has been issued. In this case we want to
2353 * unload and let other functions to complete a recovery
2356 bp->recovery_state = BNX2X_RECOVERY_DONE;
2358 bnx2x_release_leader_lock(bp);
2361 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2362 BNX2X_ERR("Can't unload in closed or error state\n");
2367 * It's important to set the bp->state to the value different from
2368 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2369 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2371 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2375 bnx2x_tx_disable(bp);
2376 netdev_reset_tc(bp->dev);
2379 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2382 bp->rx_mode = BNX2X_RX_MODE_NONE;
2384 del_timer_sync(&bp->timer);
2386 /* Set ALWAYS_ALIVE bit in shmem */
2387 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2389 bnx2x_drv_pulse(bp);
2391 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2392 bnx2x_save_statistics(bp);
2394 /* Cleanup the chip if needed */
2395 if (unload_mode != UNLOAD_RECOVERY)
2396 bnx2x_chip_cleanup(bp, unload_mode);
2398 /* Send the UNLOAD_REQUEST to the MCP */
2399 bnx2x_send_unload_req(bp, unload_mode);
2402 * Prevent transactions to host from the functions on the
2403 * engine that doesn't reset global blocks in case of global
2404 * attention once gloabl blocks are reset and gates are opened
2405 * (the engine which leader will perform the recovery
2408 if (!CHIP_IS_E1x(bp))
2409 bnx2x_pf_disable(bp);
2411 /* Disable HW interrupts, NAPI */
2412 bnx2x_netif_stop(bp, 1);
2413 /* Delete all NAPI objects */
2414 bnx2x_del_all_napi(bp);
2419 /* Report UNLOAD_DONE to MCP */
2420 bnx2x_send_unload_done(bp);
2424 * At this stage no more interrupts will arrive so we may safly clean
2425 * the queueable objects here in case they failed to get cleaned so far.
2427 bnx2x_squeeze_objects(bp);
2429 /* There should be no more pending SP commands at this stage */
2434 /* Free SKBs, SGEs, TPA pool and driver internals */
2435 bnx2x_free_skbs(bp);
2436 for_each_rx_queue(bp, i)
2437 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2441 bp->state = BNX2X_STATE_CLOSED;
2443 /* Check if there are pending parity attentions. If there are - set
2444 * RECOVERY_IN_PROGRESS.
2446 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2447 bnx2x_set_reset_in_progress(bp);
2449 /* Set RESET_IS_GLOBAL if needed */
2451 bnx2x_set_reset_global(bp);
2455 /* The last driver must disable a "close the gate" if there is no
2456 * parity attention or "process kill" pending.
2458 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2459 bnx2x_disable_close_the_gate(bp);
2464 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2468 /* If there is no power capability, silently succeed */
2470 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2474 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2478 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2479 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2480 PCI_PM_CTRL_PME_STATUS));
2482 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2483 /* delay required during transition out of D3hot */
2488 /* If there are other clients above don't
2489 shut down the power */
2490 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2492 /* Don't shut down the power for emulation and FPGA */
2493 if (CHIP_REV_IS_SLOW(bp))
2496 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2500 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2502 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2505 /* No more memory access after this point until
2506 * device is brought back to D0.
2511 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
2518 * net_device service functions
2520 int bnx2x_poll(struct napi_struct *napi, int budget)
2524 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2526 struct bnx2x *bp = fp->bp;
2529 #ifdef BNX2X_STOP_ON_ERROR
2530 if (unlikely(bp->panic)) {
2531 napi_complete(napi);
2536 for_each_cos_in_tx_queue(fp, cos)
2537 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2538 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
2541 if (bnx2x_has_rx_work(fp)) {
2542 work_done += bnx2x_rx_int(fp, budget - work_done);
2544 /* must not complete if we consumed full budget */
2545 if (work_done >= budget)
2549 /* Fall out from the NAPI loop if needed */
2550 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2552 /* No need to update SB for FCoE L2 ring as long as
2553 * it's connected to the default SB and the SB
2554 * has been updated when NAPI was scheduled.
2556 if (IS_FCOE_FP(fp)) {
2557 napi_complete(napi);
2562 bnx2x_update_fpsb_idx(fp);
2563 /* bnx2x_has_rx_work() reads the status block,
2564 * thus we need to ensure that status block indices
2565 * have been actually read (bnx2x_update_fpsb_idx)
2566 * prior to this check (bnx2x_has_rx_work) so that
2567 * we won't write the "newer" value of the status block
2568 * to IGU (if there was a DMA right after
2569 * bnx2x_has_rx_work and if there is no rmb, the memory
2570 * reading (bnx2x_update_fpsb_idx) may be postponed
2571 * to right before bnx2x_ack_sb). In this case there
2572 * will never be another interrupt until there is
2573 * another update of the status block, while there
2574 * is still unhandled work.
2578 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2579 napi_complete(napi);
2580 /* Re-enable interrupts */
2581 DP(NETIF_MSG_RX_STATUS,
2582 "Update index to %d\n", fp->fp_hc_idx);
2583 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2584 le16_to_cpu(fp->fp_hc_idx),
2594 /* we split the first BD into headers and data BDs
2595 * to ease the pain of our fellow microcode engineers
2596 * we use one mapping for both BDs
2598 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2599 struct bnx2x_fp_txdata *txdata,
2600 struct sw_tx_bd *tx_buf,
2601 struct eth_tx_start_bd **tx_bd, u16 hlen,
2602 u16 bd_prod, int nbd)
2604 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2605 struct eth_tx_bd *d_tx_bd;
2607 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2609 /* first fix first BD */
2610 h_tx_bd->nbd = cpu_to_le16(nbd);
2611 h_tx_bd->nbytes = cpu_to_le16(hlen);
2613 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2614 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
2616 /* now get a new data BD
2617 * (after the pbd) and fill it */
2618 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2619 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2621 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2622 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2624 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2625 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2626 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2628 /* this marks the BD as one that has no individual mapping */
2629 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2631 DP(NETIF_MSG_TX_QUEUED,
2632 "TSO split data size is %d (%x:%x)\n",
2633 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2636 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2641 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2644 csum = (u16) ~csum_fold(csum_sub(csum,
2645 csum_partial(t_header - fix, fix, 0)));
2648 csum = (u16) ~csum_fold(csum_add(csum,
2649 csum_partial(t_header, -fix, 0)));
2651 return swab16(csum);
2654 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2658 if (skb->ip_summed != CHECKSUM_PARTIAL)
2662 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2664 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2665 rc |= XMIT_CSUM_TCP;
2669 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2670 rc |= XMIT_CSUM_TCP;
2674 if (skb_is_gso_v6(skb))
2675 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2676 else if (skb_is_gso(skb))
2677 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2682 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2683 /* check if packet requires linearization (packet is too fragmented)
2684 no need to check fragmentation if page size > 8K (there will be no
2685 violation to FW restrictions) */
2686 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2691 int first_bd_sz = 0;
2693 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2694 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2696 if (xmit_type & XMIT_GSO) {
2697 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2698 /* Check if LSO packet needs to be copied:
2699 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2700 int wnd_size = MAX_FETCH_BD - 3;
2701 /* Number of windows to check */
2702 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2707 /* Headers length */
2708 hlen = (int)(skb_transport_header(skb) - skb->data) +
2711 /* Amount of data (w/o headers) on linear part of SKB*/
2712 first_bd_sz = skb_headlen(skb) - hlen;
2714 wnd_sum = first_bd_sz;
2716 /* Calculate the first sum - it's special */
2717 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2719 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2721 /* If there was data on linear skb data - check it */
2722 if (first_bd_sz > 0) {
2723 if (unlikely(wnd_sum < lso_mss)) {
2728 wnd_sum -= first_bd_sz;
2731 /* Others are easier: run through the frag list and
2732 check all windows */
2733 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2735 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2737 if (unlikely(wnd_sum < lso_mss)) {
2742 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2745 /* in non-LSO too fragmented packet should always
2752 if (unlikely(to_copy))
2753 DP(NETIF_MSG_TX_QUEUED,
2754 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
2755 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2756 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2762 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2765 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2766 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2767 ETH_TX_PARSE_BD_E2_LSO_MSS;
2768 if ((xmit_type & XMIT_GSO_V6) &&
2769 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2770 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2774 * bnx2x_set_pbd_gso - update PBD in GSO case.
2778 * @xmit_type: xmit flags
2780 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2781 struct eth_tx_parse_bd_e1x *pbd,
2784 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2785 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2786 pbd->tcp_flags = pbd_tcp_flags(skb);
2788 if (xmit_type & XMIT_GSO_V4) {
2789 pbd->ip_id = swab16(ip_hdr(skb)->id);
2790 pbd->tcp_pseudo_csum =
2791 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2793 0, IPPROTO_TCP, 0));
2796 pbd->tcp_pseudo_csum =
2797 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2798 &ipv6_hdr(skb)->daddr,
2799 0, IPPROTO_TCP, 0));
2801 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2805 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2807 * @bp: driver handle
2809 * @parsing_data: data to be updated
2810 * @xmit_type: xmit flags
2814 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2815 u32 *parsing_data, u32 xmit_type)
2818 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2819 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2820 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2822 if (xmit_type & XMIT_CSUM_TCP) {
2823 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2824 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2825 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2827 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2829 /* We support checksum offload for TCP and UDP only.
2830 * No need to pass the UDP header length - it's a constant.
2832 return skb_transport_header(skb) +
2833 sizeof(struct udphdr) - skb->data;
2836 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2837 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2839 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2841 if (xmit_type & XMIT_CSUM_V4)
2842 tx_start_bd->bd_flags.as_bitfield |=
2843 ETH_TX_BD_FLAGS_IP_CSUM;
2845 tx_start_bd->bd_flags.as_bitfield |=
2846 ETH_TX_BD_FLAGS_IPV6;
2848 if (!(xmit_type & XMIT_CSUM_TCP))
2849 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2853 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2855 * @bp: driver handle
2857 * @pbd: parse BD to be updated
2858 * @xmit_type: xmit flags
2860 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2861 struct eth_tx_parse_bd_e1x *pbd,
2864 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2866 /* for now NS flag is not used in Linux */
2868 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2869 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2871 pbd->ip_hlen_w = (skb_transport_header(skb) -
2872 skb_network_header(skb)) >> 1;
2874 hlen += pbd->ip_hlen_w;
2876 /* We support checksum offload for TCP and UDP only */
2877 if (xmit_type & XMIT_CSUM_TCP)
2878 hlen += tcp_hdrlen(skb) / 2;
2880 hlen += sizeof(struct udphdr) / 2;
2882 pbd->total_hlen_w = cpu_to_le16(hlen);
2885 if (xmit_type & XMIT_CSUM_TCP) {
2886 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2889 s8 fix = SKB_CS_OFF(skb); /* signed! */
2891 DP(NETIF_MSG_TX_QUEUED,
2892 "hlen %d fix %d csum before fix %x\n",
2893 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2895 /* HW bug: fixup the CSUM */
2896 pbd->tcp_pseudo_csum =
2897 bnx2x_csum_fix(skb_transport_header(skb),
2900 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2901 pbd->tcp_pseudo_csum);
2907 /* called with netif_tx_lock
2908 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2909 * netif_wake_queue()
2911 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2913 struct bnx2x *bp = netdev_priv(dev);
2915 struct netdev_queue *txq;
2916 struct bnx2x_fp_txdata *txdata;
2917 struct sw_tx_bd *tx_buf;
2918 struct eth_tx_start_bd *tx_start_bd, *first_bd;
2919 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2920 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2921 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2922 u32 pbd_e2_parsing_data = 0;
2923 u16 pkt_prod, bd_prod;
2926 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2929 __le16 pkt_size = 0;
2931 u8 mac_type = UNICAST_ADDRESS;
2933 #ifdef BNX2X_STOP_ON_ERROR
2934 if (unlikely(bp->panic))
2935 return NETDEV_TX_BUSY;
2938 txq_index = skb_get_queue_mapping(skb);
2939 txq = netdev_get_tx_queue(dev, txq_index);
2941 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2943 txdata = &bp->bnx2x_txq[txq_index];
2945 /* enable this debug print to view the transmission queue being used
2946 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
2947 txq_index, fp_index, txdata_index); */
2949 /* enable this debug print to view the tranmission details
2950 DP(NETIF_MSG_TX_QUEUED,
2951 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
2952 txdata->cid, fp_index, txdata_index, txdata, fp); */
2954 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2955 skb_shinfo(skb)->nr_frags +
2957 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2958 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
2959 netif_tx_stop_queue(txq);
2960 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2961 return NETDEV_TX_BUSY;
2964 DP(NETIF_MSG_TX_QUEUED,
2965 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
2966 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2967 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2969 eth = (struct ethhdr *)skb->data;
2971 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2972 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2973 if (is_broadcast_ether_addr(eth->h_dest))
2974 mac_type = BROADCAST_ADDRESS;
2976 mac_type = MULTICAST_ADDRESS;
2979 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2980 /* First, check if we need to linearize the skb (due to FW
2981 restrictions). No need to check fragmentation if page size > 8K
2982 (there will be no violation to FW restrictions) */
2983 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2984 /* Statistics of linearization */
2986 if (skb_linearize(skb) != 0) {
2987 DP(NETIF_MSG_TX_QUEUED,
2988 "SKB linearization failed - silently dropping this SKB\n");
2989 dev_kfree_skb_any(skb);
2990 return NETDEV_TX_OK;
2994 /* Map skb linear data for DMA */
2995 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2996 skb_headlen(skb), DMA_TO_DEVICE);
2997 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2998 DP(NETIF_MSG_TX_QUEUED,
2999 "SKB mapping failed - silently dropping this SKB\n");
3000 dev_kfree_skb_any(skb);
3001 return NETDEV_TX_OK;
3004 Please read carefully. First we use one BD which we mark as start,
3005 then we have a parsing info BD (used for TSO or xsum),
3006 and only then we have the rest of the TSO BDs.
3007 (don't forget to mark the last one as last,
3008 and to unmap only AFTER you write to the BD ...)
3009 And above all, all pdb sizes are in words - NOT DWORDS!
3012 /* get current pkt produced now - advance it just before sending packet
3013 * since mapping of pages may fail and cause packet to be dropped
3015 pkt_prod = txdata->tx_pkt_prod;
3016 bd_prod = TX_BD(txdata->tx_bd_prod);
3018 /* get a tx_buf and first BD
3019 * tx_start_bd may be changed during SPLIT,
3020 * but first_bd will always stay first
3022 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3023 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3024 first_bd = tx_start_bd;
3026 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3027 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
3031 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
3033 /* remember the first BD of the packet */
3034 tx_buf->first_bd = txdata->tx_bd_prod;
3038 DP(NETIF_MSG_TX_QUEUED,
3039 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3040 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3042 if (vlan_tx_tag_present(skb)) {
3043 tx_start_bd->vlan_or_ethertype =
3044 cpu_to_le16(vlan_tx_tag_get(skb));
3045 tx_start_bd->bd_flags.as_bitfield |=
3046 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3048 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3050 /* turn on parsing and get a BD */
3051 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3053 if (xmit_type & XMIT_CSUM)
3054 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3056 if (!CHIP_IS_E1x(bp)) {
3057 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3058 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3059 /* Set PBD in checksum offload case */
3060 if (xmit_type & XMIT_CSUM)
3061 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3062 &pbd_e2_parsing_data,
3066 * fill in the MAC addresses in the PBD - for local
3069 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3070 &pbd_e2->src_mac_addr_mid,
3071 &pbd_e2->src_mac_addr_lo,
3073 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3074 &pbd_e2->dst_mac_addr_mid,
3075 &pbd_e2->dst_mac_addr_lo,
3079 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3080 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3081 /* Set PBD in checksum offload case */
3082 if (xmit_type & XMIT_CSUM)
3083 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3087 /* Setup the data pointer of the first BD of the packet */
3088 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3089 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3090 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3091 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3092 pkt_size = tx_start_bd->nbytes;
3094 DP(NETIF_MSG_TX_QUEUED,
3095 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
3096 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3097 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
3098 tx_start_bd->bd_flags.as_bitfield,
3099 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3101 if (xmit_type & XMIT_GSO) {
3103 DP(NETIF_MSG_TX_QUEUED,
3104 "TSO packet len %d hlen %d total len %d tso size %d\n",
3105 skb->len, hlen, skb_headlen(skb),
3106 skb_shinfo(skb)->gso_size);
3108 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3110 if (unlikely(skb_headlen(skb) > hlen))
3111 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3114 if (!CHIP_IS_E1x(bp))
3115 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3118 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3121 /* Set the PBD's parsing_data field if not zero
3122 * (for the chips newer than 57711).
3124 if (pbd_e2_parsing_data)
3125 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3127 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3129 /* Handle fragmented skb */
3130 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3131 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3133 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3134 skb_frag_size(frag), DMA_TO_DEVICE);
3135 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3136 unsigned int pkts_compl = 0, bytes_compl = 0;
3138 DP(NETIF_MSG_TX_QUEUED,
3139 "Unable to map page - dropping packet...\n");
3141 /* we need unmap all buffers already mapped
3143 * first_bd->nbd need to be properly updated
3144 * before call to bnx2x_free_tx_pkt
3146 first_bd->nbd = cpu_to_le16(nbd);
3147 bnx2x_free_tx_pkt(bp, txdata,
3148 TX_BD(txdata->tx_pkt_prod),
3149 &pkts_compl, &bytes_compl);
3150 return NETDEV_TX_OK;
3153 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3154 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3155 if (total_pkt_bd == NULL)
3156 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3158 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3159 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3160 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3161 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3164 DP(NETIF_MSG_TX_QUEUED,
3165 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3166 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3167 le16_to_cpu(tx_data_bd->nbytes));
3170 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3172 /* update with actual num BDs */
3173 first_bd->nbd = cpu_to_le16(nbd);
3175 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3177 /* now send a tx doorbell, counting the next BD
3178 * if the packet contains or ends with it
3180 if (TX_BD_POFF(bd_prod) < nbd)
3183 /* total_pkt_bytes should be set on the first data BD if
3184 * it's not an LSO packet and there is more than one
3185 * data BD. In this case pkt_size is limited by an MTU value.
3186 * However we prefer to set it for an LSO packet (while we don't
3187 * have to) in order to save some CPU cycles in a none-LSO
3188 * case, when we much more care about them.
3190 if (total_pkt_bd != NULL)
3191 total_pkt_bd->total_pkt_bytes = pkt_size;
3194 DP(NETIF_MSG_TX_QUEUED,
3195 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3196 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3197 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3198 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3199 le16_to_cpu(pbd_e1x->total_hlen_w));
3201 DP(NETIF_MSG_TX_QUEUED,
3202 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3203 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3204 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3205 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3206 pbd_e2->parsing_data);
3207 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3209 netdev_tx_sent_queue(txq, skb->len);
3211 skb_tx_timestamp(skb);
3213 txdata->tx_pkt_prod++;
3215 * Make sure that the BD data is updated before updating the producer
3216 * since FW might read the BD right after the producer is updated.
3217 * This is only applicable for weak-ordered memory model archs such
3218 * as IA-64. The following barrier is also mandatory since FW will
3219 * assumes packets must have BDs.
3223 txdata->tx_db.data.prod += nbd;
3226 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3230 txdata->tx_bd_prod += nbd;
3232 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3233 netif_tx_stop_queue(txq);
3235 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3236 * ordering of set_bit() in netif_tx_stop_queue() and read of
3240 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3241 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
3242 netif_tx_wake_queue(txq);
3246 return NETDEV_TX_OK;
3250 * bnx2x_setup_tc - routine to configure net_device for multi tc
3252 * @netdev: net device to configure
3253 * @tc: number of traffic classes to enable
3255 * callback connected to the ndo_setup_tc function pointer
3257 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3259 int cos, prio, count, offset;
3260 struct bnx2x *bp = netdev_priv(dev);
3262 /* setup tc must be called under rtnl lock */
3265 /* no traffic classes requested. aborting */
3267 netdev_reset_tc(dev);
3271 /* requested to support too many traffic classes */
3272 if (num_tc > bp->max_cos) {
3273 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3274 num_tc, bp->max_cos);
3278 /* declare amount of supported traffic classes */
3279 if (netdev_set_num_tc(dev, num_tc)) {
3280 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3284 /* configure priority to traffic class mapping */
3285 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3286 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3287 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3288 "mapping priority %d to tc %d\n",
3289 prio, bp->prio_to_cos[prio]);
3293 /* Use this configuration to diffrentiate tc0 from other COSes
3294 This can be used for ets or pfc, and save the effort of setting
3295 up a multio class queue disc or negotiating DCBX with a switch
3296 netdev_set_prio_tc_map(dev, 0, 0);
3297 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3298 for (prio = 1; prio < 16; prio++) {
3299 netdev_set_prio_tc_map(dev, prio, 1);
3300 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3303 /* configure traffic class to transmission queue mapping */
3304 for (cos = 0; cos < bp->max_cos; cos++) {
3305 count = BNX2X_NUM_ETH_QUEUES(bp);
3306 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
3307 netdev_set_tc_queue(dev, cos, count, offset);
3308 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3309 "mapping tc %d to offset %d count %d\n",
3310 cos, offset, count);
3316 /* called with rtnl_lock */
3317 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3319 struct sockaddr *addr = p;
3320 struct bnx2x *bp = netdev_priv(dev);
3323 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3324 BNX2X_ERR("Requested MAC address is not valid\n");
3329 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3330 !is_zero_ether_addr(addr->sa_data)) {
3331 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3336 if (netif_running(dev)) {
3337 rc = bnx2x_set_eth_mac(bp, false);
3342 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
3343 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3345 if (netif_running(dev))
3346 rc = bnx2x_set_eth_mac(bp, true);
3351 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3353 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3354 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3359 if (IS_FCOE_IDX(fp_index)) {
3360 memset(sb, 0, sizeof(union host_hc_status_block));
3361 fp->status_blk_mapping = 0;
3366 if (!CHIP_IS_E1x(bp))
3367 BNX2X_PCI_FREE(sb->e2_sb,
3368 bnx2x_fp(bp, fp_index,
3369 status_blk_mapping),
3370 sizeof(struct host_hc_status_block_e2));
3372 BNX2X_PCI_FREE(sb->e1x_sb,
3373 bnx2x_fp(bp, fp_index,
3374 status_blk_mapping),
3375 sizeof(struct host_hc_status_block_e1x));
3380 if (!skip_rx_queue(bp, fp_index)) {
3381 bnx2x_free_rx_bds(fp);
3383 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3384 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3385 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3386 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3387 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3389 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3390 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3391 sizeof(struct eth_fast_path_rx_cqe) *
3395 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3396 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3397 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3398 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3402 if (!skip_tx_queue(bp, fp_index)) {
3403 /* fastpath tx rings: tx_buf tx_desc */
3404 for_each_cos_in_tx_queue(fp, cos) {
3405 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3407 DP(NETIF_MSG_IFDOWN,
3408 "freeing tx memory of fp %d cos %d cid %d\n",
3409 fp_index, cos, txdata->cid);
3411 BNX2X_FREE(txdata->tx_buf_ring);
3412 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3413 txdata->tx_desc_mapping,
3414 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3417 /* end of fastpath */
3420 void bnx2x_free_fp_mem(struct bnx2x *bp)
3423 for_each_queue(bp, i)
3424 bnx2x_free_fp_mem_at(bp, i);
3427 static void set_sb_shortcuts(struct bnx2x *bp, int index)
3429 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3430 if (!CHIP_IS_E1x(bp)) {
3431 bnx2x_fp(bp, index, sb_index_values) =
3432 (__le16 *)status_blk.e2_sb->sb.index_values;
3433 bnx2x_fp(bp, index, sb_running_index) =
3434 (__le16 *)status_blk.e2_sb->sb.running_index;
3436 bnx2x_fp(bp, index, sb_index_values) =
3437 (__le16 *)status_blk.e1x_sb->sb.index_values;
3438 bnx2x_fp(bp, index, sb_running_index) =
3439 (__le16 *)status_blk.e1x_sb->sb.running_index;
3443 /* Returns the number of actually allocated BDs */
3444 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3447 struct bnx2x *bp = fp->bp;
3448 u16 ring_prod, cqe_ring_prod;
3449 int i, failure_cnt = 0;
3451 fp->rx_comp_cons = 0;
3452 cqe_ring_prod = ring_prod = 0;
3454 /* This routine is called only during fo init so
3455 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3457 for (i = 0; i < rx_ring_size; i++) {
3458 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3462 ring_prod = NEXT_RX_IDX(ring_prod);
3463 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3464 WARN_ON(ring_prod <= (i - failure_cnt));
3468 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3469 i - failure_cnt, fp->index);
3471 fp->rx_bd_prod = ring_prod;
3472 /* Limit the CQE producer by the CQE ring size */
3473 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3475 fp->rx_pkt = fp->rx_calls = 0;
3477 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3479 return i - failure_cnt;
3482 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3486 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3487 struct eth_rx_cqe_next_page *nextpg;
3489 nextpg = (struct eth_rx_cqe_next_page *)
3490 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3492 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3493 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3495 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3496 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3500 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3502 union host_hc_status_block *sb;
3503 struct bnx2x_fastpath *fp = &bp->fp[index];
3506 int rx_ring_size = 0;
3509 if (!bp->rx_ring_size &&
3510 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3511 rx_ring_size = MIN_RX_SIZE_NONTPA;
3512 bp->rx_ring_size = rx_ring_size;
3515 if (!bp->rx_ring_size) {
3516 u32 cfg = SHMEM_RD(bp,
3517 dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
3519 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3521 /* Dercease ring size for 1G functions */
3522 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3523 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3526 /* allocate at least number of buffers required by FW */
3527 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3528 MIN_RX_SIZE_TPA, rx_ring_size);
3530 bp->rx_ring_size = rx_ring_size;
3531 } else /* if rx_ring_size specified - use it */
3532 rx_ring_size = bp->rx_ring_size;
3535 sb = &bnx2x_fp(bp, index, status_blk);
3537 if (!IS_FCOE_IDX(index)) {
3540 if (!CHIP_IS_E1x(bp))
3541 BNX2X_PCI_ALLOC(sb->e2_sb,
3542 &bnx2x_fp(bp, index, status_blk_mapping),
3543 sizeof(struct host_hc_status_block_e2));
3545 BNX2X_PCI_ALLOC(sb->e1x_sb,
3546 &bnx2x_fp(bp, index, status_blk_mapping),
3547 sizeof(struct host_hc_status_block_e1x));
3552 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3553 * set shortcuts for it.
3555 if (!IS_FCOE_IDX(index))
3556 set_sb_shortcuts(bp, index);
3559 if (!skip_tx_queue(bp, index)) {
3560 /* fastpath tx rings: tx_buf tx_desc */
3561 for_each_cos_in_tx_queue(fp, cos) {
3562 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3565 "allocating tx memory of fp %d cos %d\n",
3568 BNX2X_ALLOC(txdata->tx_buf_ring,
3569 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3570 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3571 &txdata->tx_desc_mapping,
3572 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3577 if (!skip_rx_queue(bp, index)) {
3578 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3579 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3580 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3581 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3582 &bnx2x_fp(bp, index, rx_desc_mapping),
3583 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3585 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3586 &bnx2x_fp(bp, index, rx_comp_mapping),
3587 sizeof(struct eth_fast_path_rx_cqe) *
3591 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3592 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3593 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3594 &bnx2x_fp(bp, index, rx_sge_mapping),
3595 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3597 bnx2x_set_next_page_rx_bd(fp);
3600 bnx2x_set_next_page_rx_cq(fp);
3603 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3604 if (ring_size < rx_ring_size)
3610 /* handles low memory cases */
3612 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3614 /* FW will drop all packets if queue is not big enough,
3615 * In these cases we disable the queue
3616 * Min size is different for OOO, TPA and non-TPA queues
3618 if (ring_size < (fp->disable_tpa ?
3619 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3620 /* release memory allocated for this queue */
3621 bnx2x_free_fp_mem_at(bp, index);
3627 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3632 * 1. Allocate FP for leading - fatal if error
3633 * 2. {CNIC} Allocate FCoE FP - fatal if error
3634 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3635 * 4. Allocate RSS - fix number of queues if error
3639 if (bnx2x_alloc_fp_mem_at(bp, 0))
3645 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3646 /* we will fail load process instead of mark
3653 for_each_nondefault_eth_queue(bp, i)
3654 if (bnx2x_alloc_fp_mem_at(bp, i))
3657 /* handle memory failures */
3658 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3659 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3664 * move non eth FPs next to last eth FP
3665 * must be done in that order
3666 * FCOE_IDX < FWD_IDX < OOO_IDX
3669 /* move FCoE fp even NO_FCOE_FLAG is on */
3670 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
3672 bp->num_queues -= delta;
3673 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3674 bp->num_queues + delta, bp->num_queues);
3680 void bnx2x_free_mem_bp(struct bnx2x *bp)
3682 kfree(bp->fp->tpa_info);
3685 kfree(bp->fp_stats);
3686 kfree(bp->bnx2x_txq);
3687 kfree(bp->msix_table);
3691 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3693 struct bnx2x_fastpath *fp;
3694 struct msix_entry *tbl;
3695 struct bnx2x_ilt *ilt;
3696 int msix_table_size = 0;
3701 * The biggest MSI-X table we might need is as a maximum number of fast
3702 * path IGU SBs plus default SB (for PF).
3704 msix_table_size = bp->igu_sb_cnt + 1;
3706 /* fp array: RSS plus CNIC related L2 queues */
3707 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE;
3708 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3710 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
3713 for (i = 0; i < fp_array_size; i++) {
3715 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
3716 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
3717 if (!(fp[i].tpa_info))
3723 /* allocate sp objs */
3724 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
3729 /* allocate fp_stats */
3730 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
3735 /* Allocate memory for the transmission queues array */
3736 bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS;
3738 bp->bnx2x_txq_size++;
3740 bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size,
3741 sizeof(struct bnx2x_fp_txdata), GFP_KERNEL);
3746 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3749 bp->msix_table = tbl;
3752 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3759 bnx2x_free_mem_bp(bp);
3764 int bnx2x_reload_if_running(struct net_device *dev)
3766 struct bnx2x *bp = netdev_priv(dev);
3768 if (unlikely(!netif_running(dev)))
3771 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3772 return bnx2x_nic_load(bp, LOAD_NORMAL);
3775 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3777 u32 sel_phy_idx = 0;
3778 if (bp->link_params.num_phys <= 1)
3781 if (bp->link_vars.link_up) {
3782 sel_phy_idx = EXT_PHY1;
3783 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3784 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3785 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3786 sel_phy_idx = EXT_PHY2;
3789 switch (bnx2x_phy_selection(&bp->link_params)) {
3790 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3791 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3792 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3793 sel_phy_idx = EXT_PHY1;
3795 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3796 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3797 sel_phy_idx = EXT_PHY2;
3805 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3807 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3809 * The selected actived PHY is always after swapping (in case PHY
3810 * swapping is enabled). So when swapping is enabled, we need to reverse
3814 if (bp->link_params.multi_phy_config &
3815 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3816 if (sel_phy_idx == EXT_PHY1)
3817 sel_phy_idx = EXT_PHY2;
3818 else if (sel_phy_idx == EXT_PHY2)
3819 sel_phy_idx = EXT_PHY1;
3821 return LINK_CONFIG_IDX(sel_phy_idx);
3824 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3825 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3827 struct bnx2x *bp = netdev_priv(dev);
3828 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3831 case NETDEV_FCOE_WWNN:
3832 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3833 cp->fcoe_wwn_node_name_lo);
3835 case NETDEV_FCOE_WWPN:
3836 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3837 cp->fcoe_wwn_port_name_lo);
3840 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
3848 /* called with rtnl_lock */
3849 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3851 struct bnx2x *bp = netdev_priv(dev);
3853 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3854 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
3858 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3859 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
3860 BNX2X_ERR("Can't support requested MTU size\n");
3864 /* This does not race with packet allocation
3865 * because the actual alloc size is
3866 * only updated as part of load
3870 return bnx2x_reload_if_running(dev);
3873 netdev_features_t bnx2x_fix_features(struct net_device *dev,
3874 netdev_features_t features)
3876 struct bnx2x *bp = netdev_priv(dev);
3878 /* TPA requires Rx CSUM offloading */
3879 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
3880 features &= ~NETIF_F_LRO;
3881 features &= ~NETIF_F_GRO;
3887 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
3889 struct bnx2x *bp = netdev_priv(dev);
3890 u32 flags = bp->flags;
3891 bool bnx2x_reload = false;
3893 if (features & NETIF_F_LRO)
3894 flags |= TPA_ENABLE_FLAG;
3896 flags &= ~TPA_ENABLE_FLAG;
3898 if (features & NETIF_F_GRO)
3899 flags |= GRO_ENABLE_FLAG;
3901 flags &= ~GRO_ENABLE_FLAG;
3903 if (features & NETIF_F_LOOPBACK) {
3904 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3905 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3906 bnx2x_reload = true;
3909 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3910 bp->link_params.loopback_mode = LOOPBACK_NONE;
3911 bnx2x_reload = true;
3915 if (flags ^ bp->flags) {
3917 bnx2x_reload = true;
3921 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3922 return bnx2x_reload_if_running(dev);
3923 /* else: bnx2x_nic_load() will be called at end of recovery */
3929 void bnx2x_tx_timeout(struct net_device *dev)
3931 struct bnx2x *bp = netdev_priv(dev);
3933 #ifdef BNX2X_STOP_ON_ERROR
3938 smp_mb__before_clear_bit();
3939 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3940 smp_mb__after_clear_bit();
3942 /* This allows the netif to be shutdown gracefully before resetting */
3943 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3946 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3948 struct net_device *dev = pci_get_drvdata(pdev);
3952 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3955 bp = netdev_priv(dev);
3959 pci_save_state(pdev);
3961 if (!netif_running(dev)) {
3966 netif_device_detach(dev);
3968 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3970 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3977 int bnx2x_resume(struct pci_dev *pdev)
3979 struct net_device *dev = pci_get_drvdata(pdev);
3984 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3987 bp = netdev_priv(dev);
3989 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3990 BNX2X_ERR("Handling parity error recovery. Try again later\n");
3996 pci_restore_state(pdev);
3998 if (!netif_running(dev)) {
4003 bnx2x_set_power_state(bp, PCI_D0);
4004 netif_device_attach(dev);
4006 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4014 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4017 /* ustorm cxt validation */
4018 cxt->ustorm_ag_context.cdu_usage =
4019 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4020 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4021 /* xcontext validation */
4022 cxt->xstorm_ag_context.cdu_reserved =
4023 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4024 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4027 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4028 u8 fw_sb_id, u8 sb_index,
4032 u32 addr = BAR_CSTRORM_INTMEM +
4033 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4034 REG_WR8(bp, addr, ticks);
4036 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4037 port, fw_sb_id, sb_index, ticks);
4040 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4041 u16 fw_sb_id, u8 sb_index,
4044 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4045 u32 addr = BAR_CSTRORM_INTMEM +
4046 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4047 u16 flags = REG_RD16(bp, addr);
4049 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4050 flags |= enable_flag;
4051 REG_WR16(bp, addr, flags);
4053 "port %x fw_sb_id %d sb_index %d disable %d\n",
4054 port, fw_sb_id, sb_index, disable);
4057 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4058 u8 sb_index, u8 disable, u16 usec)
4060 int port = BP_PORT(bp);
4061 u8 ticks = usec / BNX2X_BTR;
4063 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4065 disable = disable ? 1 : (usec ? 0 : 1);
4066 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);