1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2012 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/prefetch.h>
27 #include "bnx2x_cmn.h"
28 #include "bnx2x_init.h"
34 * bnx2x_move_fp - move content of the fastpath structure.
37 * @from: source FP index
38 * @to: destination FP index
40 * Makes sure the contents of the bp->fp[to].napi is kept
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
43 * source onto the target. Update txdata pointers and related
46 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
57 /* Copy the NAPI object as it has been already initialized */
58 from_fp->napi = to_fp->napi;
60 /* Move bnx2x_fastpath contents */
61 memcpy(to_fp, from_fp, sizeof(*to_fp));
64 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
67 /* move fp_stats contents as well, as their indices match fp ones */
68 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
70 /* Update txdata pointers in fp and move txdata content accordingly:
71 * Each fp consumes 'max_cos' txdata structures, so the index should be
72 * decremented by max_cos x delta.
75 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
78 if (from == FCOE_IDX(bp)) {
79 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
83 memcpy(&bp->bnx2x_txq[old_txdata_index],
84 &bp->bnx2x_txq[new_txdata_index],
85 sizeof(struct bnx2x_fp_txdata));
86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
89 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
91 /* free skb in the packet ring at pos idx
92 * return idx of last bd freed
94 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
95 u16 idx, unsigned int *pkts_compl,
96 unsigned int *bytes_compl)
98 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
99 struct eth_tx_start_bd *tx_start_bd;
100 struct eth_tx_bd *tx_data_bd;
101 struct sk_buff *skb = tx_buf->skb;
102 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
105 /* prefetch skb end pointer to speedup dev_kfree_skb() */
108 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
109 txdata->txq_index, idx, tx_buf, skb);
112 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
113 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
114 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
117 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
118 #ifdef BNX2X_STOP_ON_ERROR
119 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
120 BNX2X_ERR("BAD nbd!\n");
124 new_cons = nbd + tx_buf->first_bd;
126 /* Get the next bd */
127 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
129 /* Skip a parse bd... */
131 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
133 /* ...and the TSO split header bd since they have no mapping */
134 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
136 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
142 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
143 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
144 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
146 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
153 (*bytes_compl) += skb->len;
156 dev_kfree_skb_any(skb);
157 tx_buf->first_bd = 0;
163 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
165 struct netdev_queue *txq;
166 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
167 unsigned int pkts_compl = 0, bytes_compl = 0;
169 #ifdef BNX2X_STOP_ON_ERROR
170 if (unlikely(bp->panic))
174 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
175 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
176 sw_cons = txdata->tx_pkt_cons;
178 while (sw_cons != hw_cons) {
181 pkt_cons = TX_BD(sw_cons);
183 DP(NETIF_MSG_TX_DONE,
184 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
185 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
187 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
188 &pkts_compl, &bytes_compl);
193 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
195 txdata->tx_pkt_cons = sw_cons;
196 txdata->tx_bd_cons = bd_cons;
198 /* Need to make the tx_bd_cons update visible to start_xmit()
199 * before checking for netif_tx_queue_stopped(). Without the
200 * memory barrier, there is a small possibility that
201 * start_xmit() will miss it and cause the queue to be stopped
203 * On the other hand we need an rmb() here to ensure the proper
204 * ordering of bit testing in the following
205 * netif_tx_queue_stopped(txq) call.
209 if (unlikely(netif_tx_queue_stopped(txq))) {
210 /* Taking tx_lock() is needed to prevent reenabling the queue
211 * while it's empty. This could have happen if rx_action() gets
212 * suspended in bnx2x_tx_int() after the condition before
213 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
215 * stops the queue->sees fresh tx_bd_cons->releases the queue->
216 * sends some packets consuming the whole queue again->
220 __netif_tx_lock(txq, smp_processor_id());
222 if ((netif_tx_queue_stopped(txq)) &&
223 (bp->state == BNX2X_STATE_OPEN) &&
224 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
225 netif_tx_wake_queue(txq);
227 __netif_tx_unlock(txq);
232 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
235 u16 last_max = fp->last_max_sge;
237 if (SUB_S16(idx, last_max) > 0)
238 fp->last_max_sge = idx;
241 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
243 struct eth_end_agg_rx_cqe *cqe)
245 struct bnx2x *bp = fp->bp;
246 u16 last_max, last_elem, first_elem;
253 /* First mark all used pages */
254 for (i = 0; i < sge_len; i++)
255 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
256 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
258 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
259 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
261 /* Here we assume that the last SGE index is the biggest */
262 prefetch((void *)(fp->sge_mask));
263 bnx2x_update_last_max_sge(fp,
264 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
266 last_max = RX_SGE(fp->last_max_sge);
267 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
268 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
270 /* If ring is not full */
271 if (last_elem + 1 != first_elem)
274 /* Now update the prod */
275 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
276 if (likely(fp->sge_mask[i]))
279 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
280 delta += BIT_VEC64_ELEM_SZ;
284 fp->rx_sge_prod += delta;
285 /* clear page-end entries */
286 bnx2x_clear_sge_mask_next_elems(fp);
289 DP(NETIF_MSG_RX_STATUS,
290 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
291 fp->last_max_sge, fp->rx_sge_prod);
294 /* Set Toeplitz hash value in the skb using the value from the
295 * CQE (calculated by HW).
297 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
298 const struct eth_fast_path_rx_cqe *cqe,
301 /* Set Toeplitz hash from CQE */
302 if ((bp->dev->features & NETIF_F_RXHASH) &&
303 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
304 enum eth_rss_hash_type htype;
306 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
307 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
308 (htype == TCP_IPV6_HASH_TYPE);
309 return le32_to_cpu(cqe->rss_hash_result);
315 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
317 struct eth_fast_path_rx_cqe *cqe)
319 struct bnx2x *bp = fp->bp;
320 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
321 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
322 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
324 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
325 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
327 /* print error if current state != stop */
328 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
329 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
331 /* Try to map an empty data buffer from the aggregation info */
332 mapping = dma_map_single(&bp->pdev->dev,
333 first_buf->data + NET_SKB_PAD,
334 fp->rx_buf_size, DMA_FROM_DEVICE);
336 * ...if it fails - move the skb from the consumer to the producer
337 * and set the current aggregation state as ERROR to drop it
338 * when TPA_STOP arrives.
341 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
342 /* Move the BD from the consumer to the producer */
343 bnx2x_reuse_rx_data(fp, cons, prod);
344 tpa_info->tpa_state = BNX2X_TPA_ERROR;
348 /* move empty data from pool to prod */
349 prod_rx_buf->data = first_buf->data;
350 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
351 /* point prod_bd to new data */
352 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
353 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
355 /* move partial skb from cons to pool (don't unmap yet) */
356 *first_buf = *cons_rx_buf;
358 /* mark bin state as START */
359 tpa_info->parsing_flags =
360 le16_to_cpu(cqe->pars_flags.flags);
361 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
362 tpa_info->tpa_state = BNX2X_TPA_START;
363 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
364 tpa_info->placement_offset = cqe->placement_offset;
365 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
366 if (fp->mode == TPA_MODE_GRO) {
367 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
368 tpa_info->full_page =
369 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
370 tpa_info->gro_size = gro_size;
373 #ifdef BNX2X_STOP_ON_ERROR
374 fp->tpa_queue_used |= (1 << queue);
375 #ifdef _ASM_GENERIC_INT_L64_H
376 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
378 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
384 /* Timestamp option length allowed for TPA aggregation:
386 * nop nop kind length echo val
388 #define TPA_TSTAMP_OPT_LEN 12
390 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
393 * @parsing_flags: parsing flags from the START CQE
394 * @len_on_bd: total length of the first packet for the
397 * Approximate value of the MSS for this aggregation calculated using
398 * the first packet of it.
400 static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
404 * TPA arrgregation won't have either IP options or TCP options
405 * other than timestamp or IPv6 extension headers.
407 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
409 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
410 PRS_FLAG_OVERETH_IPV6)
411 hdrs_len += sizeof(struct ipv6hdr);
413 hdrs_len += sizeof(struct iphdr);
416 /* Check if there was a TCP timestamp, if there is it's will
417 * always be 12 bytes length: nop nop kind length echo val.
419 * Otherwise FW would close the aggregation.
421 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
422 hdrs_len += TPA_TSTAMP_OPT_LEN;
424 return len_on_bd - hdrs_len;
427 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
428 struct bnx2x_fastpath *fp, u16 index)
430 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
431 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
432 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
435 if (unlikely(page == NULL)) {
436 BNX2X_ERR("Can't alloc sge\n");
440 mapping = dma_map_page(&bp->pdev->dev, page, 0,
441 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
442 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
443 __free_pages(page, PAGES_PER_SGE_SHIFT);
444 BNX2X_ERR("Can't map sge\n");
449 dma_unmap_addr_set(sw_buf, mapping, mapping);
451 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
452 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
457 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
458 struct bnx2x_agg_info *tpa_info,
461 struct eth_end_agg_rx_cqe *cqe,
464 struct sw_rx_page *rx_pg, old_rx_pg;
465 u32 i, frag_len, frag_size;
466 int err, j, frag_id = 0;
467 u16 len_on_bd = tpa_info->len_on_bd;
468 u16 full_page = 0, gro_size = 0;
470 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
472 if (fp->mode == TPA_MODE_GRO) {
473 gro_size = tpa_info->gro_size;
474 full_page = tpa_info->full_page;
477 /* This is needed in order to enable forwarding support */
479 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
480 tpa_info->parsing_flags, len_on_bd);
483 if (fp->mode == TPA_MODE_GRO)
484 skb_shinfo(skb)->gso_type =
485 (GET_FLAG(tpa_info->parsing_flags,
486 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
487 PRS_FLAG_OVERETH_IPV6) ?
488 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
492 #ifdef BNX2X_STOP_ON_ERROR
493 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
494 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
496 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
502 /* Run through the SGL and compose the fragmented skb */
503 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
504 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
506 /* FW gives the indices of the SGE as if the ring is an array
507 (meaning that "next" element will consume 2 indices) */
508 if (fp->mode == TPA_MODE_GRO)
509 frag_len = min_t(u32, frag_size, (u32)full_page);
511 frag_len = min_t(u32, frag_size,
512 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
514 rx_pg = &fp->rx_page_ring[sge_idx];
517 /* If we fail to allocate a substitute page, we simply stop
518 where we are and drop the whole packet */
519 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
521 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
525 /* Unmap the page as we r going to pass it to the stack */
526 dma_unmap_page(&bp->pdev->dev,
527 dma_unmap_addr(&old_rx_pg, mapping),
528 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
529 /* Add one frag and update the appropriate fields in the skb */
530 if (fp->mode == TPA_MODE_LRO)
531 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
535 for (rem = frag_len; rem > 0; rem -= gro_size) {
536 int len = rem > gro_size ? gro_size : rem;
537 skb_fill_page_desc(skb, frag_id++,
538 old_rx_pg.page, offset, len);
540 get_page(old_rx_pg.page);
545 skb->data_len += frag_len;
546 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
547 skb->len += frag_len;
549 frag_size -= frag_len;
555 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
556 struct bnx2x_agg_info *tpa_info,
558 struct eth_end_agg_rx_cqe *cqe,
561 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
562 u8 pad = tpa_info->placement_offset;
563 u16 len = tpa_info->len_on_bd;
564 struct sk_buff *skb = NULL;
565 u8 *new_data, *data = rx_buf->data;
566 u8 old_tpa_state = tpa_info->tpa_state;
568 tpa_info->tpa_state = BNX2X_TPA_STOP;
570 /* If we there was an error during the handling of the TPA_START -
571 * drop this aggregation.
573 if (old_tpa_state == BNX2X_TPA_ERROR)
576 /* Try to allocate the new data */
577 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
579 /* Unmap skb in the pool anyway, as we are going to change
580 pool entry status to BNX2X_TPA_STOP even if new skb allocation
582 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
583 fp->rx_buf_size, DMA_FROM_DEVICE);
584 if (likely(new_data))
585 skb = build_skb(data, 0);
588 #ifdef BNX2X_STOP_ON_ERROR
589 if (pad + len > fp->rx_buf_size) {
590 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
591 pad, len, fp->rx_buf_size);
597 skb_reserve(skb, pad + NET_SKB_PAD);
599 skb->rxhash = tpa_info->rxhash;
600 skb->l4_rxhash = tpa_info->l4_rxhash;
602 skb->protocol = eth_type_trans(skb, bp->dev);
603 skb->ip_summed = CHECKSUM_UNNECESSARY;
605 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
606 skb, cqe, cqe_idx)) {
607 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
608 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
609 napi_gro_receive(&fp->napi, skb);
611 DP(NETIF_MSG_RX_STATUS,
612 "Failed to allocate new pages - dropping packet!\n");
613 dev_kfree_skb_any(skb);
617 /* put new data in bin */
618 rx_buf->data = new_data;
624 /* drop the packet and keep the buffer in the bin */
625 DP(NETIF_MSG_RX_STATUS,
626 "Failed to allocate or map a new skb - dropping packet!\n");
627 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
630 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
631 struct bnx2x_fastpath *fp, u16 index)
634 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
635 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
638 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
639 if (unlikely(data == NULL))
642 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
645 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
647 BNX2X_ERR("Can't map rx data\n");
652 dma_unmap_addr_set(rx_buf, mapping, mapping);
654 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
655 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
661 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
662 struct bnx2x_fastpath *fp,
663 struct bnx2x_eth_q_stats *qstats)
665 /* Do nothing if no L4 csum validation was done.
666 * We do not check whether IP csum was validated. For IPv4 we assume
667 * that if the card got as far as validating the L4 csum, it also
668 * validated the IP csum. IPv6 has no IP csum.
670 if (cqe->fast_path_cqe.status_flags &
671 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
674 /* If L4 validation was done, check if an error was found. */
676 if (cqe->fast_path_cqe.type_error_flags &
677 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
678 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
679 qstats->hw_csum_err++;
681 skb->ip_summed = CHECKSUM_UNNECESSARY;
684 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
686 struct bnx2x *bp = fp->bp;
687 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
688 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
691 #ifdef BNX2X_STOP_ON_ERROR
692 if (unlikely(bp->panic))
696 /* CQ "next element" is of the size of the regular element,
697 that's why it's ok here */
698 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
699 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
702 bd_cons = fp->rx_bd_cons;
703 bd_prod = fp->rx_bd_prod;
704 bd_prod_fw = bd_prod;
705 sw_comp_cons = fp->rx_comp_cons;
706 sw_comp_prod = fp->rx_comp_prod;
708 /* Memory barrier necessary as speculative reads of the rx
709 * buffer can be ahead of the index in the status block
713 DP(NETIF_MSG_RX_STATUS,
714 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
715 fp->index, hw_comp_cons, sw_comp_cons);
717 while (sw_comp_cons != hw_comp_cons) {
718 struct sw_rx_bd *rx_buf = NULL;
720 union eth_rx_cqe *cqe;
721 struct eth_fast_path_rx_cqe *cqe_fp;
723 enum eth_rx_cqe_type cqe_fp_type;
728 #ifdef BNX2X_STOP_ON_ERROR
729 if (unlikely(bp->panic))
733 comp_ring_cons = RCQ_BD(sw_comp_cons);
734 bd_prod = RX_BD(bd_prod);
735 bd_cons = RX_BD(bd_cons);
737 cqe = &fp->rx_comp_ring[comp_ring_cons];
738 cqe_fp = &cqe->fast_path_cqe;
739 cqe_fp_flags = cqe_fp->type_error_flags;
740 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
742 DP(NETIF_MSG_RX_STATUS,
743 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
744 CQE_TYPE(cqe_fp_flags),
745 cqe_fp_flags, cqe_fp->status_flags,
746 le32_to_cpu(cqe_fp->rss_hash_result),
747 le16_to_cpu(cqe_fp->vlan_tag),
748 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
750 /* is this a slowpath msg? */
751 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
752 bnx2x_sp_event(fp, cqe);
756 rx_buf = &fp->rx_buf_ring[bd_cons];
759 if (!CQE_TYPE_FAST(cqe_fp_type)) {
760 struct bnx2x_agg_info *tpa_info;
761 u16 frag_size, pages;
762 #ifdef BNX2X_STOP_ON_ERROR
764 if (fp->disable_tpa &&
765 (CQE_TYPE_START(cqe_fp_type) ||
766 CQE_TYPE_STOP(cqe_fp_type)))
767 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
768 CQE_TYPE(cqe_fp_type));
771 if (CQE_TYPE_START(cqe_fp_type)) {
772 u16 queue = cqe_fp->queue_index;
773 DP(NETIF_MSG_RX_STATUS,
774 "calling tpa_start on queue %d\n",
777 bnx2x_tpa_start(fp, queue,
784 queue = cqe->end_agg_cqe.queue_index;
785 tpa_info = &fp->tpa_info[queue];
786 DP(NETIF_MSG_RX_STATUS,
787 "calling tpa_stop on queue %d\n",
790 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
793 if (fp->mode == TPA_MODE_GRO)
794 pages = (frag_size + tpa_info->full_page - 1) /
797 pages = SGE_PAGE_ALIGN(frag_size) >>
800 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
801 &cqe->end_agg_cqe, comp_ring_cons);
802 #ifdef BNX2X_STOP_ON_ERROR
807 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
811 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
812 pad = cqe_fp->placement_offset;
813 dma_sync_single_for_cpu(&bp->pdev->dev,
814 dma_unmap_addr(rx_buf, mapping),
815 pad + RX_COPY_THRESH,
818 prefetch(data + pad); /* speedup eth_type_trans() */
819 /* is this an error packet? */
820 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
821 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
822 "ERROR flags %x rx packet %u\n",
823 cqe_fp_flags, sw_comp_cons);
824 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
828 /* Since we don't have a jumbo ring
829 * copy small packets if mtu > 1500
831 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
832 (len <= RX_COPY_THRESH)) {
833 skb = netdev_alloc_skb_ip_align(bp->dev, len);
835 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
836 "ERROR packet dropped because of alloc failure\n");
837 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
840 memcpy(skb->data, data + pad, len);
841 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
843 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
844 dma_unmap_single(&bp->pdev->dev,
845 dma_unmap_addr(rx_buf, mapping),
848 skb = build_skb(data, 0);
849 if (unlikely(!skb)) {
851 bnx2x_fp_qstats(bp, fp)->
852 rx_skb_alloc_failed++;
855 skb_reserve(skb, pad);
857 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
858 "ERROR packet dropped because of alloc failure\n");
859 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
861 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
867 skb->protocol = eth_type_trans(skb, bp->dev);
869 /* Set Toeplitz hash for a none-LRO skb */
870 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
871 skb->l4_rxhash = l4_rxhash;
873 skb_checksum_none_assert(skb);
875 if (bp->dev->features & NETIF_F_RXCSUM)
876 bnx2x_csum_validate(skb, cqe, fp,
877 bnx2x_fp_qstats(bp, fp));
879 skb_record_rx_queue(skb, fp->rx_queue);
881 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
883 __vlan_hwaccel_put_tag(skb,
884 le16_to_cpu(cqe_fp->vlan_tag));
885 napi_gro_receive(&fp->napi, skb);
891 bd_cons = NEXT_RX_IDX(bd_cons);
892 bd_prod = NEXT_RX_IDX(bd_prod);
893 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
896 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
897 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
899 if (rx_pkt == budget)
903 fp->rx_bd_cons = bd_cons;
904 fp->rx_bd_prod = bd_prod_fw;
905 fp->rx_comp_cons = sw_comp_cons;
906 fp->rx_comp_prod = sw_comp_prod;
908 /* Update producers */
909 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
912 fp->rx_pkt += rx_pkt;
918 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
920 struct bnx2x_fastpath *fp = fp_cookie;
921 struct bnx2x *bp = fp->bp;
925 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
926 fp->index, fp->fw_sb_id, fp->igu_sb_id);
927 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
929 #ifdef BNX2X_STOP_ON_ERROR
930 if (unlikely(bp->panic))
934 /* Handle Rx and Tx according to MSI-X vector */
935 prefetch(fp->rx_cons_sb);
937 for_each_cos_in_tx_queue(fp, cos)
938 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
940 prefetch(&fp->sb_running_index[SM_RX_ID]);
941 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
946 /* HW Lock for shared dual port PHYs */
947 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
949 mutex_lock(&bp->port.phy_mutex);
951 if (bp->port.need_hw_lock)
952 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
955 void bnx2x_release_phy_lock(struct bnx2x *bp)
957 if (bp->port.need_hw_lock)
958 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
960 mutex_unlock(&bp->port.phy_mutex);
963 /* calculates MF speed according to current linespeed and MF configuration */
964 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
966 u16 line_speed = bp->link_vars.line_speed;
968 u16 maxCfg = bnx2x_extract_max_cfg(bp,
969 bp->mf_config[BP_VN(bp)]);
971 /* Calculate the current MAX line speed limit for the MF
975 line_speed = (line_speed * maxCfg) / 100;
977 u16 vn_max_rate = maxCfg * 100;
979 if (vn_max_rate < line_speed)
980 line_speed = vn_max_rate;
988 * bnx2x_fill_report_data - fill link report data to report
991 * @data: link state to update
993 * It uses a none-atomic bit operations because is called under the mutex.
995 static void bnx2x_fill_report_data(struct bnx2x *bp,
996 struct bnx2x_link_report_data *data)
998 u16 line_speed = bnx2x_get_mf_speed(bp);
1000 memset(data, 0, sizeof(*data));
1002 /* Fill the report data: efective line speed */
1003 data->line_speed = line_speed;
1006 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1007 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1008 &data->link_report_flags);
1011 if (bp->link_vars.duplex == DUPLEX_FULL)
1012 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1014 /* Rx Flow Control is ON */
1015 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1016 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1018 /* Tx Flow Control is ON */
1019 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1020 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1024 * bnx2x_link_report - report link status to OS.
1026 * @bp: driver handle
1028 * Calls the __bnx2x_link_report() under the same locking scheme
1029 * as a link/PHY state managing code to ensure a consistent link
1033 void bnx2x_link_report(struct bnx2x *bp)
1035 bnx2x_acquire_phy_lock(bp);
1036 __bnx2x_link_report(bp);
1037 bnx2x_release_phy_lock(bp);
1041 * __bnx2x_link_report - report link status to OS.
1043 * @bp: driver handle
1045 * None atomic inmlementation.
1046 * Should be called under the phy_lock.
1048 void __bnx2x_link_report(struct bnx2x *bp)
1050 struct bnx2x_link_report_data cur_data;
1053 if (!CHIP_IS_E1(bp))
1054 bnx2x_read_mf_cfg(bp);
1056 /* Read the current link report info */
1057 bnx2x_fill_report_data(bp, &cur_data);
1059 /* Don't report link down or exactly the same link status twice */
1060 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1061 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1062 &bp->last_reported_link.link_report_flags) &&
1063 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1064 &cur_data.link_report_flags)))
1069 /* We are going to report a new link parameters now -
1070 * remember the current data for the next time.
1072 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1074 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1075 &cur_data.link_report_flags)) {
1076 netif_carrier_off(bp->dev);
1077 netdev_err(bp->dev, "NIC Link is Down\n");
1083 netif_carrier_on(bp->dev);
1085 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1086 &cur_data.link_report_flags))
1091 /* Handle the FC at the end so that only these flags would be
1092 * possibly set. This way we may easily check if there is no FC
1095 if (cur_data.link_report_flags) {
1096 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1097 &cur_data.link_report_flags)) {
1098 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1099 &cur_data.link_report_flags))
1100 flow = "ON - receive & transmit";
1102 flow = "ON - receive";
1104 flow = "ON - transmit";
1109 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1110 cur_data.line_speed, duplex, flow);
1114 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1118 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1119 struct eth_rx_sge *sge;
1121 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1123 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1124 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1127 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1128 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1132 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1133 struct bnx2x_fastpath *fp, int last)
1137 for (i = 0; i < last; i++) {
1138 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1139 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1140 u8 *data = first_buf->data;
1143 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1146 if (tpa_info->tpa_state == BNX2X_TPA_START)
1147 dma_unmap_single(&bp->pdev->dev,
1148 dma_unmap_addr(first_buf, mapping),
1149 fp->rx_buf_size, DMA_FROM_DEVICE);
1151 first_buf->data = NULL;
1155 void bnx2x_init_rx_rings(struct bnx2x *bp)
1157 int func = BP_FUNC(bp);
1161 /* Allocate TPA resources */
1162 for_each_rx_queue(bp, j) {
1163 struct bnx2x_fastpath *fp = &bp->fp[j];
1166 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1168 if (!fp->disable_tpa) {
1169 /* Fill the per-aggregtion pool */
1170 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1171 struct bnx2x_agg_info *tpa_info =
1173 struct sw_rx_bd *first_buf =
1174 &tpa_info->first_buf;
1176 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1178 if (!first_buf->data) {
1179 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1181 bnx2x_free_tpa_pool(bp, fp, i);
1182 fp->disable_tpa = 1;
1185 dma_unmap_addr_set(first_buf, mapping, 0);
1186 tpa_info->tpa_state = BNX2X_TPA_STOP;
1189 /* "next page" elements initialization */
1190 bnx2x_set_next_page_sgl(fp);
1192 /* set SGEs bit mask */
1193 bnx2x_init_sge_ring_bit_mask(fp);
1195 /* Allocate SGEs and initialize the ring elements */
1196 for (i = 0, ring_prod = 0;
1197 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1199 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1200 BNX2X_ERR("was only able to allocate %d rx sges\n",
1202 BNX2X_ERR("disabling TPA for queue[%d]\n",
1204 /* Cleanup already allocated elements */
1205 bnx2x_free_rx_sge_range(bp, fp,
1207 bnx2x_free_tpa_pool(bp, fp,
1209 fp->disable_tpa = 1;
1213 ring_prod = NEXT_SGE_IDX(ring_prod);
1216 fp->rx_sge_prod = ring_prod;
1220 for_each_rx_queue(bp, j) {
1221 struct bnx2x_fastpath *fp = &bp->fp[j];
1225 /* Activate BD ring */
1227 * this will generate an interrupt (to the TSTORM)
1228 * must only be done after chip is initialized
1230 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1236 if (CHIP_IS_E1(bp)) {
1237 REG_WR(bp, BAR_USTRORM_INTMEM +
1238 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1239 U64_LO(fp->rx_comp_mapping));
1240 REG_WR(bp, BAR_USTRORM_INTMEM +
1241 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1242 U64_HI(fp->rx_comp_mapping));
1247 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1252 for_each_tx_queue(bp, i) {
1253 struct bnx2x_fastpath *fp = &bp->fp[i];
1254 for_each_cos_in_tx_queue(fp, cos) {
1255 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1256 unsigned pkts_compl = 0, bytes_compl = 0;
1258 u16 sw_prod = txdata->tx_pkt_prod;
1259 u16 sw_cons = txdata->tx_pkt_cons;
1261 while (sw_cons != sw_prod) {
1262 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1263 &pkts_compl, &bytes_compl);
1266 netdev_tx_reset_queue(
1267 netdev_get_tx_queue(bp->dev,
1268 txdata->txq_index));
1273 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1275 struct bnx2x *bp = fp->bp;
1278 /* ring wasn't allocated */
1279 if (fp->rx_buf_ring == NULL)
1282 for (i = 0; i < NUM_RX_BD; i++) {
1283 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1284 u8 *data = rx_buf->data;
1288 dma_unmap_single(&bp->pdev->dev,
1289 dma_unmap_addr(rx_buf, mapping),
1290 fp->rx_buf_size, DMA_FROM_DEVICE);
1292 rx_buf->data = NULL;
1297 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1301 for_each_rx_queue(bp, j) {
1302 struct bnx2x_fastpath *fp = &bp->fp[j];
1304 bnx2x_free_rx_bds(fp);
1306 if (!fp->disable_tpa)
1307 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1311 void bnx2x_free_skbs(struct bnx2x *bp)
1313 bnx2x_free_tx_skbs(bp);
1314 bnx2x_free_rx_skbs(bp);
1317 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1319 /* load old values */
1320 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1322 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1323 /* leave all but MAX value */
1324 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1326 /* set new MAX value */
1327 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1328 & FUNC_MF_CFG_MAX_BW_MASK;
1330 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1335 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1337 * @bp: driver handle
1338 * @nvecs: number of vectors to be released
1340 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1344 if (nvecs == offset)
1346 free_irq(bp->msix_table[offset].vector, bp->dev);
1347 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1348 bp->msix_table[offset].vector);
1351 if (nvecs == offset)
1356 for_each_eth_queue(bp, i) {
1357 if (nvecs == offset)
1359 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1360 i, bp->msix_table[offset].vector);
1362 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1366 void bnx2x_free_irq(struct bnx2x *bp)
1368 if (bp->flags & USING_MSIX_FLAG &&
1369 !(bp->flags & USING_SINGLE_MSIX_FLAG))
1370 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1373 free_irq(bp->dev->irq, bp->dev);
1376 int bnx2x_enable_msix(struct bnx2x *bp)
1378 int msix_vec = 0, i, rc, req_cnt;
1380 bp->msix_table[msix_vec].entry = msix_vec;
1381 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1382 bp->msix_table[0].entry);
1386 bp->msix_table[msix_vec].entry = msix_vec;
1387 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1388 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1391 /* We need separate vectors for ETH queues only (not FCoE) */
1392 for_each_eth_queue(bp, i) {
1393 bp->msix_table[msix_vec].entry = msix_vec;
1394 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1395 msix_vec, msix_vec, i);
1399 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
1401 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1404 * reconfigure number of tx/rx queues according to available
1407 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1408 /* how less vectors we will have? */
1409 int diff = req_cnt - rc;
1411 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1413 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1416 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1420 * decrease number of queues by number of unallocated entries
1422 bp->num_queues -= diff;
1424 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1426 } else if (rc > 0) {
1427 /* Get by with single vector */
1428 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1430 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1435 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1436 bp->flags |= USING_SINGLE_MSIX_FLAG;
1438 } else if (rc < 0) {
1439 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1443 bp->flags |= USING_MSIX_FLAG;
1448 /* fall to INTx if not enough memory */
1450 bp->flags |= DISABLE_MSI_FLAG;
1455 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1457 int i, rc, offset = 0;
1459 rc = request_irq(bp->msix_table[offset++].vector,
1460 bnx2x_msix_sp_int, 0,
1461 bp->dev->name, bp->dev);
1463 BNX2X_ERR("request sp irq failed\n");
1470 for_each_eth_queue(bp, i) {
1471 struct bnx2x_fastpath *fp = &bp->fp[i];
1472 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1475 rc = request_irq(bp->msix_table[offset].vector,
1476 bnx2x_msix_fp_int, 0, fp->name, fp);
1478 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1479 bp->msix_table[offset].vector, rc);
1480 bnx2x_free_msix_irqs(bp, offset);
1487 i = BNX2X_NUM_ETH_QUEUES(bp);
1488 offset = 1 + CNIC_PRESENT;
1489 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1490 bp->msix_table[0].vector,
1491 0, bp->msix_table[offset].vector,
1492 i - 1, bp->msix_table[offset + i - 1].vector);
1497 int bnx2x_enable_msi(struct bnx2x *bp)
1501 rc = pci_enable_msi(bp->pdev);
1503 BNX2X_DEV_INFO("MSI is not attainable\n");
1506 bp->flags |= USING_MSI_FLAG;
1511 static int bnx2x_req_irq(struct bnx2x *bp)
1513 unsigned long flags;
1516 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1519 flags = IRQF_SHARED;
1521 if (bp->flags & USING_MSIX_FLAG)
1522 irq = bp->msix_table[0].vector;
1524 irq = bp->pdev->irq;
1526 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1529 static int bnx2x_setup_irqs(struct bnx2x *bp)
1532 if (bp->flags & USING_MSIX_FLAG &&
1533 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1534 rc = bnx2x_req_msix_irqs(bp);
1539 rc = bnx2x_req_irq(bp);
1541 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1544 if (bp->flags & USING_MSI_FLAG) {
1545 bp->dev->irq = bp->pdev->irq;
1546 netdev_info(bp->dev, "using MSI IRQ %d\n",
1549 if (bp->flags & USING_MSIX_FLAG) {
1550 bp->dev->irq = bp->msix_table[0].vector;
1551 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1559 static void bnx2x_napi_enable(struct bnx2x *bp)
1563 for_each_rx_queue(bp, i)
1564 napi_enable(&bnx2x_fp(bp, i, napi));
1567 static void bnx2x_napi_disable(struct bnx2x *bp)
1571 for_each_rx_queue(bp, i)
1572 napi_disable(&bnx2x_fp(bp, i, napi));
1575 void bnx2x_netif_start(struct bnx2x *bp)
1577 if (netif_running(bp->dev)) {
1578 bnx2x_napi_enable(bp);
1579 bnx2x_int_enable(bp);
1580 if (bp->state == BNX2X_STATE_OPEN)
1581 netif_tx_wake_all_queues(bp->dev);
1585 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1587 bnx2x_int_disable_sync(bp, disable_hw);
1588 bnx2x_napi_disable(bp);
1591 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1593 struct bnx2x *bp = netdev_priv(dev);
1597 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1598 u16 ether_type = ntohs(hdr->h_proto);
1600 /* Skip VLAN tag if present */
1601 if (ether_type == ETH_P_8021Q) {
1602 struct vlan_ethhdr *vhdr =
1603 (struct vlan_ethhdr *)skb->data;
1605 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1608 /* If ethertype is FCoE or FIP - use FCoE ring */
1609 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1610 return bnx2x_fcoe_tx(bp, txq_index);
1613 /* select a non-FCoE queue */
1614 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1618 void bnx2x_set_num_queues(struct bnx2x *bp)
1621 bp->num_queues = bnx2x_calc_num_queues(bp);
1624 /* override in STORAGE SD modes */
1625 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1628 /* Add special queues */
1629 bp->num_queues += NON_ETH_CONTEXT_USE;
1631 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1635 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1637 * @bp: Driver handle
1639 * We currently support for at most 16 Tx queues for each CoS thus we will
1640 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1643 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1644 * index after all ETH L2 indices.
1646 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1647 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1648 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1650 * The proper configuration of skb->queue_mapping is handled by
1651 * bnx2x_select_queue() and __skb_tx_hash().
1653 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1654 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1656 static int bnx2x_set_real_num_queues(struct bnx2x *bp)
1660 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1661 rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE;
1663 /* account for fcoe queue */
1671 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1673 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1676 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1678 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1682 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1688 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1692 for_each_queue(bp, i) {
1693 struct bnx2x_fastpath *fp = &bp->fp[i];
1696 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1699 * Although there are no IP frames expected to arrive to
1700 * this ring we still want to add an
1701 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1704 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1707 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1708 IP_HEADER_ALIGNMENT_PADDING +
1711 BNX2X_FW_RX_ALIGN_END;
1712 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1716 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1719 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1721 /* Prepare the initial contents fo the indirection table if RSS is
1724 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1725 bp->rss_conf_obj.ind_table[i] =
1727 ethtool_rxfh_indir_default(i, num_eth_queues);
1730 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1731 * per-port, so if explicit configuration is needed , do it only
1734 * For 57712 and newer on the other hand it's a per-function
1737 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1740 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1743 struct bnx2x_config_rss_params params = {NULL};
1746 /* Although RSS is meaningless when there is a single HW queue we
1747 * still need it enabled in order to have HW Rx hash generated.
1749 * if (!is_eth_multi(bp))
1750 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1753 params.rss_obj = rss_obj;
1755 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
1757 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
1759 /* RSS configuration */
1760 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
1761 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
1762 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
1763 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
1764 if (rss_obj->udp_rss_v4)
1765 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
1766 if (rss_obj->udp_rss_v6)
1767 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
1770 params.rss_result_mask = MULTI_MASK;
1772 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
1776 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1777 params.rss_key[i] = random32();
1779 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
1782 return bnx2x_config_rss(bp, ¶ms);
1785 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1787 struct bnx2x_func_state_params func_params = {NULL};
1789 /* Prepare parameters for function state transitions */
1790 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1792 func_params.f_obj = &bp->func_obj;
1793 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1795 func_params.params.hw_init.load_phase = load_code;
1797 return bnx2x_func_state_change(bp, &func_params);
1801 * Cleans the object that have internal lists without sending
1802 * ramrods. Should be run when interrutps are disabled.
1804 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1807 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1808 struct bnx2x_mcast_ramrod_params rparam = {NULL};
1809 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
1811 /***************** Cleanup MACs' object first *************************/
1813 /* Wait for completion of requested */
1814 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1815 /* Perform a dry cleanup */
1816 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1818 /* Clean ETH primary MAC */
1819 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1820 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
1823 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1825 /* Cleanup UC list */
1827 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1828 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1831 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1833 /***************** Now clean mcast object *****************************/
1834 rparam.mcast_obj = &bp->mcast_obj;
1835 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1837 /* Add a DEL command... */
1838 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1840 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1843 /* ...and wait until all pending commands are cleared */
1844 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1847 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1852 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1856 #ifndef BNX2X_STOP_ON_ERROR
1857 #define LOAD_ERROR_EXIT(bp, label) \
1859 (bp)->state = BNX2X_STATE_ERROR; \
1863 #define LOAD_ERROR_EXIT(bp, label) \
1865 (bp)->state = BNX2X_STATE_ERROR; \
1871 bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1873 /* build FW version dword */
1874 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1875 (BCM_5710_FW_MINOR_VERSION << 8) +
1876 (BCM_5710_FW_REVISION_VERSION << 16) +
1877 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1879 /* read loaded FW from chip */
1880 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1882 DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1884 if (loaded_fw != my_fw) {
1886 BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1895 * bnx2x_bz_fp - zero content of the fastpath structure.
1897 * @bp: driver handle
1898 * @index: fastpath index to be zeroed
1900 * Makes sure the contents of the bp->fp[index].napi is kept
1903 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1905 struct bnx2x_fastpath *fp = &bp->fp[index];
1906 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
1909 struct napi_struct orig_napi = fp->napi;
1910 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
1911 /* bzero bnx2x_fastpath contents */
1912 if (bp->stats_init) {
1913 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1914 memset(fp, 0, sizeof(*fp));
1916 /* Keep Queue statistics */
1917 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1918 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1920 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1922 if (tmp_eth_q_stats)
1923 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
1924 sizeof(struct bnx2x_eth_q_stats));
1926 tmp_eth_q_stats_old =
1927 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1929 if (tmp_eth_q_stats_old)
1930 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
1931 sizeof(struct bnx2x_eth_q_stats_old));
1933 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1934 memset(fp, 0, sizeof(*fp));
1936 if (tmp_eth_q_stats) {
1937 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
1938 sizeof(struct bnx2x_eth_q_stats));
1939 kfree(tmp_eth_q_stats);
1942 if (tmp_eth_q_stats_old) {
1943 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
1944 sizeof(struct bnx2x_eth_q_stats_old));
1945 kfree(tmp_eth_q_stats_old);
1950 /* Restore the NAPI object as it has been already initialized */
1951 fp->napi = orig_napi;
1952 fp->tpa_info = orig_tpa_info;
1956 fp->max_cos = bp->max_cos;
1958 /* Special queues support only one CoS */
1961 /* Init txdata pointers */
1964 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
1967 for_each_cos_in_tx_queue(fp, cos)
1968 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
1969 BNX2X_NUM_ETH_QUEUES(bp) + index];
1972 * set the tpa flag for each queue. The tpa flag determines the queue
1973 * minimal size so it must be set prior to queue memory allocation
1975 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1976 (bp->flags & GRO_ENABLE_FLAG &&
1977 bnx2x_mtu_allows_gro(bp->dev->mtu)));
1978 if (bp->flags & TPA_ENABLE_FLAG)
1979 fp->mode = TPA_MODE_LRO;
1980 else if (bp->flags & GRO_ENABLE_FLAG)
1981 fp->mode = TPA_MODE_GRO;
1984 /* We don't want TPA on an FCoE L2 ring */
1986 fp->disable_tpa = 1;
1991 /* must be called with rtnl_lock */
1992 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1994 int port = BP_PORT(bp);
1998 #ifdef BNX2X_STOP_ON_ERROR
1999 if (unlikely(bp->panic)) {
2000 BNX2X_ERR("Can't load NIC when there is panic\n");
2005 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2007 /* Set the initial link reported state to link down */
2008 bnx2x_acquire_phy_lock(bp);
2009 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2010 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2011 &bp->last_reported_link.link_report_flags);
2012 bnx2x_release_phy_lock(bp);
2014 /* must be called before memory allocation and HW init */
2015 bnx2x_ilt_set_info(bp);
2018 * Zero fastpath structures preserving invariants like napi, which are
2019 * allocated only once, fp index, max_cos, bp pointer.
2020 * Also set fp->disable_tpa and txdata_ptr.
2022 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2023 for_each_queue(bp, i)
2025 memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size *
2026 sizeof(struct bnx2x_fp_txdata));
2029 /* Set the receive queues buffer size */
2030 bnx2x_set_rx_buf_size(bp);
2032 if (bnx2x_alloc_mem(bp))
2035 /* As long as bnx2x_alloc_mem() may possibly update
2036 * bp->num_queues, bnx2x_set_real_num_queues() should always
2039 rc = bnx2x_set_real_num_queues(bp);
2041 BNX2X_ERR("Unable to set real_num_queues\n");
2042 LOAD_ERROR_EXIT(bp, load_error0);
2045 /* configure multi cos mappings in kernel.
2046 * this configuration may be overriden by a multi class queue discipline
2047 * or by a dcbx negotiation result.
2049 bnx2x_setup_tc(bp->dev, bp->max_cos);
2051 /* Add all NAPI objects */
2052 bnx2x_add_all_napi(bp);
2053 bnx2x_napi_enable(bp);
2055 /* set pf load just before approaching the MCP */
2056 bnx2x_set_pf_load(bp);
2058 /* Send LOAD_REQUEST command to MCP
2059 * Returns the type of LOAD command:
2060 * if it is the first port to be initialized
2061 * common blocks should be initialized, otherwise - not
2063 if (!BP_NOMCP(bp)) {
2066 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2067 DRV_MSG_SEQ_NUMBER_MASK);
2068 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2070 /* Get current FW pulse sequence */
2071 bp->fw_drv_pulse_wr_seq =
2072 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2073 DRV_PULSE_SEQ_MASK);
2074 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2076 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
2078 BNX2X_ERR("MCP response failure, aborting\n");
2080 LOAD_ERROR_EXIT(bp, load_error1);
2082 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2083 BNX2X_ERR("Driver load refused\n");
2084 rc = -EBUSY; /* other port in diagnostic mode */
2085 LOAD_ERROR_EXIT(bp, load_error1);
2087 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2088 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2089 /* abort nic load if version mismatch */
2090 if (!bnx2x_test_firmware_version(bp, true)) {
2092 LOAD_ERROR_EXIT(bp, load_error2);
2097 int path = BP_PATH(bp);
2099 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2100 path, load_count[path][0], load_count[path][1],
2101 load_count[path][2]);
2102 load_count[path][0]++;
2103 load_count[path][1 + port]++;
2104 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2105 path, load_count[path][0], load_count[path][1],
2106 load_count[path][2]);
2107 if (load_count[path][0] == 1)
2108 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
2109 else if (load_count[path][1 + port] == 1)
2110 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2112 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2115 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2116 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2117 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2120 * We need the barrier to ensure the ordering between the
2121 * writing to bp->port.pmf here and reading it from the
2122 * bnx2x_periodic_task().
2128 DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
2130 /* Init Function state controlling object */
2131 bnx2x__init_func_obj(bp);
2134 rc = bnx2x_init_hw(bp, load_code);
2136 BNX2X_ERR("HW init failed, aborting\n");
2137 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2138 LOAD_ERROR_EXIT(bp, load_error2);
2141 /* Connect to IRQs */
2142 rc = bnx2x_setup_irqs(bp);
2144 BNX2X_ERR("IRQs setup failed\n");
2145 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2146 LOAD_ERROR_EXIT(bp, load_error2);
2149 /* Setup NIC internals and enable interrupts */
2150 bnx2x_nic_init(bp, load_code);
2152 /* Init per-function objects */
2153 bnx2x_init_bp_objs(bp);
2155 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2156 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2157 (bp->common.shmem2_base)) {
2158 if (SHMEM2_HAS(bp, dcc_support))
2159 SHMEM2_WR(bp, dcc_support,
2160 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2161 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2162 if (SHMEM2_HAS(bp, afex_driver_support))
2163 SHMEM2_WR(bp, afex_driver_support,
2164 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2167 /* Set AFEX default VLAN tag to an invalid value */
2168 bp->afex_def_vlan_tag = -1;
2170 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2171 rc = bnx2x_func_start(bp);
2173 BNX2X_ERR("Function start failed!\n");
2174 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2175 LOAD_ERROR_EXIT(bp, load_error3);
2178 /* Send LOAD_DONE command to MCP */
2179 if (!BP_NOMCP(bp)) {
2180 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2182 BNX2X_ERR("MCP response failure, aborting\n");
2184 LOAD_ERROR_EXIT(bp, load_error3);
2188 rc = bnx2x_setup_leading(bp);
2190 BNX2X_ERR("Setup leading failed!\n");
2191 LOAD_ERROR_EXIT(bp, load_error3);
2195 /* Enable Timer scan */
2196 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2199 for_each_nondefault_queue(bp, i) {
2200 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2202 BNX2X_ERR("Queue setup failed\n");
2203 LOAD_ERROR_EXIT(bp, load_error4);
2207 rc = bnx2x_init_rss_pf(bp);
2209 BNX2X_ERR("PF RSS init failed\n");
2210 LOAD_ERROR_EXIT(bp, load_error4);
2213 /* Now when Clients are configured we are ready to work */
2214 bp->state = BNX2X_STATE_OPEN;
2216 /* Configure a ucast MAC */
2217 rc = bnx2x_set_eth_mac(bp, true);
2219 BNX2X_ERR("Setting Ethernet MAC failed\n");
2220 LOAD_ERROR_EXIT(bp, load_error4);
2223 if (bp->pending_max) {
2224 bnx2x_update_max_mf_config(bp, bp->pending_max);
2225 bp->pending_max = 0;
2229 bnx2x_initial_phy_init(bp, load_mode);
2231 /* Start fast path */
2233 /* Initialize Rx filter. */
2234 netif_addr_lock_bh(bp->dev);
2235 bnx2x_set_rx_mode(bp->dev);
2236 netif_addr_unlock_bh(bp->dev);
2239 switch (load_mode) {
2241 /* Tx queue should be only reenabled */
2242 netif_tx_wake_all_queues(bp->dev);
2246 netif_tx_start_all_queues(bp->dev);
2247 smp_mb__after_clear_bit();
2251 case LOAD_LOOPBACK_EXT:
2252 bp->state = BNX2X_STATE_DIAG;
2260 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
2262 bnx2x__link_status_update(bp);
2264 /* start the timer */
2265 mod_timer(&bp->timer, jiffies + bp->current_interval);
2268 /* re-read iscsi info */
2269 bnx2x_get_iscsi_info(bp);
2270 bnx2x_setup_cnic_irq_info(bp);
2271 bnx2x_setup_cnic_info(bp);
2272 if (bp->state == BNX2X_STATE_OPEN)
2273 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2276 /* mark driver is loaded in shmem2 */
2277 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2279 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2280 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2281 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2282 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2285 /* Wait for all pending SP commands to complete */
2286 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2287 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2288 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2292 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2293 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2294 bnx2x_dcbx_init(bp, false);
2298 #ifndef BNX2X_STOP_ON_ERROR
2301 /* Disable Timer scan */
2302 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2305 bnx2x_int_disable_sync(bp, 1);
2307 /* Clean queueable objects */
2308 bnx2x_squeeze_objects(bp);
2310 /* Free SKBs, SGEs, TPA pool and driver internals */
2311 bnx2x_free_skbs(bp);
2312 for_each_rx_queue(bp, i)
2313 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2318 if (!BP_NOMCP(bp)) {
2319 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2320 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2325 bnx2x_napi_disable(bp);
2326 /* clear pf_load status, as it was already set */
2327 bnx2x_clear_pf_load(bp);
2332 #endif /* ! BNX2X_STOP_ON_ERROR */
2335 /* must be called with rtnl_lock */
2336 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2339 bool global = false;
2341 /* mark driver is unloaded in shmem2 */
2342 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2344 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2345 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2346 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2349 if ((bp->state == BNX2X_STATE_CLOSED) ||
2350 (bp->state == BNX2X_STATE_ERROR)) {
2351 /* We can get here if the driver has been unloaded
2352 * during parity error recovery and is either waiting for a
2353 * leader to complete or for other functions to unload and
2354 * then ifdown has been issued. In this case we want to
2355 * unload and let other functions to complete a recovery
2358 bp->recovery_state = BNX2X_RECOVERY_DONE;
2360 bnx2x_release_leader_lock(bp);
2363 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2364 BNX2X_ERR("Can't unload in closed or error state\n");
2369 * It's important to set the bp->state to the value different from
2370 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2371 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2373 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2377 bnx2x_tx_disable(bp);
2378 netdev_reset_tc(bp->dev);
2381 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2384 bp->rx_mode = BNX2X_RX_MODE_NONE;
2386 del_timer_sync(&bp->timer);
2388 /* Set ALWAYS_ALIVE bit in shmem */
2389 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2391 bnx2x_drv_pulse(bp);
2393 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2394 bnx2x_save_statistics(bp);
2396 /* Cleanup the chip if needed */
2397 if (unload_mode != UNLOAD_RECOVERY)
2398 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2400 /* Send the UNLOAD_REQUEST to the MCP */
2401 bnx2x_send_unload_req(bp, unload_mode);
2404 * Prevent transactions to host from the functions on the
2405 * engine that doesn't reset global blocks in case of global
2406 * attention once gloabl blocks are reset and gates are opened
2407 * (the engine which leader will perform the recovery
2410 if (!CHIP_IS_E1x(bp))
2411 bnx2x_pf_disable(bp);
2413 /* Disable HW interrupts, NAPI */
2414 bnx2x_netif_stop(bp, 1);
2415 /* Delete all NAPI objects */
2416 bnx2x_del_all_napi(bp);
2421 /* Report UNLOAD_DONE to MCP */
2422 bnx2x_send_unload_done(bp, false);
2426 * At this stage no more interrupts will arrive so we may safly clean
2427 * the queueable objects here in case they failed to get cleaned so far.
2429 bnx2x_squeeze_objects(bp);
2431 /* There should be no more pending SP commands at this stage */
2436 /* Free SKBs, SGEs, TPA pool and driver internals */
2437 bnx2x_free_skbs(bp);
2438 for_each_rx_queue(bp, i)
2439 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2443 bp->state = BNX2X_STATE_CLOSED;
2445 /* Check if there are pending parity attentions. If there are - set
2446 * RECOVERY_IN_PROGRESS.
2448 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2449 bnx2x_set_reset_in_progress(bp);
2451 /* Set RESET_IS_GLOBAL if needed */
2453 bnx2x_set_reset_global(bp);
2457 /* The last driver must disable a "close the gate" if there is no
2458 * parity attention or "process kill" pending.
2460 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2461 bnx2x_disable_close_the_gate(bp);
2466 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2470 /* If there is no power capability, silently succeed */
2472 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2476 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2480 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2481 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2482 PCI_PM_CTRL_PME_STATUS));
2484 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2485 /* delay required during transition out of D3hot */
2490 /* If there are other clients above don't
2491 shut down the power */
2492 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2494 /* Don't shut down the power for emulation and FPGA */
2495 if (CHIP_REV_IS_SLOW(bp))
2498 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2502 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2504 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2507 /* No more memory access after this point until
2508 * device is brought back to D0.
2513 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
2520 * net_device service functions
2522 int bnx2x_poll(struct napi_struct *napi, int budget)
2526 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2528 struct bnx2x *bp = fp->bp;
2531 #ifdef BNX2X_STOP_ON_ERROR
2532 if (unlikely(bp->panic)) {
2533 napi_complete(napi);
2538 for_each_cos_in_tx_queue(fp, cos)
2539 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2540 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
2543 if (bnx2x_has_rx_work(fp)) {
2544 work_done += bnx2x_rx_int(fp, budget - work_done);
2546 /* must not complete if we consumed full budget */
2547 if (work_done >= budget)
2551 /* Fall out from the NAPI loop if needed */
2552 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2554 /* No need to update SB for FCoE L2 ring as long as
2555 * it's connected to the default SB and the SB
2556 * has been updated when NAPI was scheduled.
2558 if (IS_FCOE_FP(fp)) {
2559 napi_complete(napi);
2564 bnx2x_update_fpsb_idx(fp);
2565 /* bnx2x_has_rx_work() reads the status block,
2566 * thus we need to ensure that status block indices
2567 * have been actually read (bnx2x_update_fpsb_idx)
2568 * prior to this check (bnx2x_has_rx_work) so that
2569 * we won't write the "newer" value of the status block
2570 * to IGU (if there was a DMA right after
2571 * bnx2x_has_rx_work and if there is no rmb, the memory
2572 * reading (bnx2x_update_fpsb_idx) may be postponed
2573 * to right before bnx2x_ack_sb). In this case there
2574 * will never be another interrupt until there is
2575 * another update of the status block, while there
2576 * is still unhandled work.
2580 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2581 napi_complete(napi);
2582 /* Re-enable interrupts */
2583 DP(NETIF_MSG_RX_STATUS,
2584 "Update index to %d\n", fp->fp_hc_idx);
2585 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2586 le16_to_cpu(fp->fp_hc_idx),
2596 /* we split the first BD into headers and data BDs
2597 * to ease the pain of our fellow microcode engineers
2598 * we use one mapping for both BDs
2600 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2601 struct bnx2x_fp_txdata *txdata,
2602 struct sw_tx_bd *tx_buf,
2603 struct eth_tx_start_bd **tx_bd, u16 hlen,
2604 u16 bd_prod, int nbd)
2606 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2607 struct eth_tx_bd *d_tx_bd;
2609 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2611 /* first fix first BD */
2612 h_tx_bd->nbd = cpu_to_le16(nbd);
2613 h_tx_bd->nbytes = cpu_to_le16(hlen);
2615 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2616 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
2618 /* now get a new data BD
2619 * (after the pbd) and fill it */
2620 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2621 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2623 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2624 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2626 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2627 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2628 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2630 /* this marks the BD as one that has no individual mapping */
2631 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2633 DP(NETIF_MSG_TX_QUEUED,
2634 "TSO split data size is %d (%x:%x)\n",
2635 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2638 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2643 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2646 csum = (u16) ~csum_fold(csum_sub(csum,
2647 csum_partial(t_header - fix, fix, 0)));
2650 csum = (u16) ~csum_fold(csum_add(csum,
2651 csum_partial(t_header, -fix, 0)));
2653 return swab16(csum);
2656 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2660 if (skb->ip_summed != CHECKSUM_PARTIAL)
2664 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2666 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2667 rc |= XMIT_CSUM_TCP;
2671 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2672 rc |= XMIT_CSUM_TCP;
2676 if (skb_is_gso_v6(skb))
2677 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2678 else if (skb_is_gso(skb))
2679 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2684 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2685 /* check if packet requires linearization (packet is too fragmented)
2686 no need to check fragmentation if page size > 8K (there will be no
2687 violation to FW restrictions) */
2688 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2693 int first_bd_sz = 0;
2695 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2696 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2698 if (xmit_type & XMIT_GSO) {
2699 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2700 /* Check if LSO packet needs to be copied:
2701 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2702 int wnd_size = MAX_FETCH_BD - 3;
2703 /* Number of windows to check */
2704 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2709 /* Headers length */
2710 hlen = (int)(skb_transport_header(skb) - skb->data) +
2713 /* Amount of data (w/o headers) on linear part of SKB*/
2714 first_bd_sz = skb_headlen(skb) - hlen;
2716 wnd_sum = first_bd_sz;
2718 /* Calculate the first sum - it's special */
2719 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2721 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2723 /* If there was data on linear skb data - check it */
2724 if (first_bd_sz > 0) {
2725 if (unlikely(wnd_sum < lso_mss)) {
2730 wnd_sum -= first_bd_sz;
2733 /* Others are easier: run through the frag list and
2734 check all windows */
2735 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2737 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2739 if (unlikely(wnd_sum < lso_mss)) {
2744 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2747 /* in non-LSO too fragmented packet should always
2754 if (unlikely(to_copy))
2755 DP(NETIF_MSG_TX_QUEUED,
2756 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
2757 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2758 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2764 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2767 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2768 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2769 ETH_TX_PARSE_BD_E2_LSO_MSS;
2770 if ((xmit_type & XMIT_GSO_V6) &&
2771 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2772 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2776 * bnx2x_set_pbd_gso - update PBD in GSO case.
2780 * @xmit_type: xmit flags
2782 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2783 struct eth_tx_parse_bd_e1x *pbd,
2786 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2787 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2788 pbd->tcp_flags = pbd_tcp_flags(skb);
2790 if (xmit_type & XMIT_GSO_V4) {
2791 pbd->ip_id = swab16(ip_hdr(skb)->id);
2792 pbd->tcp_pseudo_csum =
2793 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2795 0, IPPROTO_TCP, 0));
2798 pbd->tcp_pseudo_csum =
2799 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2800 &ipv6_hdr(skb)->daddr,
2801 0, IPPROTO_TCP, 0));
2803 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2807 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2809 * @bp: driver handle
2811 * @parsing_data: data to be updated
2812 * @xmit_type: xmit flags
2816 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2817 u32 *parsing_data, u32 xmit_type)
2820 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2821 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2822 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2824 if (xmit_type & XMIT_CSUM_TCP) {
2825 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2826 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2827 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2829 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2831 /* We support checksum offload for TCP and UDP only.
2832 * No need to pass the UDP header length - it's a constant.
2834 return skb_transport_header(skb) +
2835 sizeof(struct udphdr) - skb->data;
2838 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2839 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2841 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2843 if (xmit_type & XMIT_CSUM_V4)
2844 tx_start_bd->bd_flags.as_bitfield |=
2845 ETH_TX_BD_FLAGS_IP_CSUM;
2847 tx_start_bd->bd_flags.as_bitfield |=
2848 ETH_TX_BD_FLAGS_IPV6;
2850 if (!(xmit_type & XMIT_CSUM_TCP))
2851 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2855 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2857 * @bp: driver handle
2859 * @pbd: parse BD to be updated
2860 * @xmit_type: xmit flags
2862 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2863 struct eth_tx_parse_bd_e1x *pbd,
2866 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2868 /* for now NS flag is not used in Linux */
2870 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2871 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2873 pbd->ip_hlen_w = (skb_transport_header(skb) -
2874 skb_network_header(skb)) >> 1;
2876 hlen += pbd->ip_hlen_w;
2878 /* We support checksum offload for TCP and UDP only */
2879 if (xmit_type & XMIT_CSUM_TCP)
2880 hlen += tcp_hdrlen(skb) / 2;
2882 hlen += sizeof(struct udphdr) / 2;
2884 pbd->total_hlen_w = cpu_to_le16(hlen);
2887 if (xmit_type & XMIT_CSUM_TCP) {
2888 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2891 s8 fix = SKB_CS_OFF(skb); /* signed! */
2893 DP(NETIF_MSG_TX_QUEUED,
2894 "hlen %d fix %d csum before fix %x\n",
2895 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2897 /* HW bug: fixup the CSUM */
2898 pbd->tcp_pseudo_csum =
2899 bnx2x_csum_fix(skb_transport_header(skb),
2902 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2903 pbd->tcp_pseudo_csum);
2909 /* called with netif_tx_lock
2910 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2911 * netif_wake_queue()
2913 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2915 struct bnx2x *bp = netdev_priv(dev);
2917 struct netdev_queue *txq;
2918 struct bnx2x_fp_txdata *txdata;
2919 struct sw_tx_bd *tx_buf;
2920 struct eth_tx_start_bd *tx_start_bd, *first_bd;
2921 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2922 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2923 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2924 u32 pbd_e2_parsing_data = 0;
2925 u16 pkt_prod, bd_prod;
2928 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2931 __le16 pkt_size = 0;
2933 u8 mac_type = UNICAST_ADDRESS;
2935 #ifdef BNX2X_STOP_ON_ERROR
2936 if (unlikely(bp->panic))
2937 return NETDEV_TX_BUSY;
2940 txq_index = skb_get_queue_mapping(skb);
2941 txq = netdev_get_tx_queue(dev, txq_index);
2943 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2945 txdata = &bp->bnx2x_txq[txq_index];
2947 /* enable this debug print to view the transmission queue being used
2948 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
2949 txq_index, fp_index, txdata_index); */
2951 /* enable this debug print to view the tranmission details
2952 DP(NETIF_MSG_TX_QUEUED,
2953 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
2954 txdata->cid, fp_index, txdata_index, txdata, fp); */
2956 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2957 skb_shinfo(skb)->nr_frags +
2959 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2960 /* Handle special storage cases separately */
2961 if (txdata->tx_ring_size != 0) {
2962 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2963 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
2964 netif_tx_stop_queue(txq);
2967 return NETDEV_TX_BUSY;
2970 DP(NETIF_MSG_TX_QUEUED,
2971 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
2972 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2973 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2975 eth = (struct ethhdr *)skb->data;
2977 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2978 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2979 if (is_broadcast_ether_addr(eth->h_dest))
2980 mac_type = BROADCAST_ADDRESS;
2982 mac_type = MULTICAST_ADDRESS;
2985 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2986 /* First, check if we need to linearize the skb (due to FW
2987 restrictions). No need to check fragmentation if page size > 8K
2988 (there will be no violation to FW restrictions) */
2989 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2990 /* Statistics of linearization */
2992 if (skb_linearize(skb) != 0) {
2993 DP(NETIF_MSG_TX_QUEUED,
2994 "SKB linearization failed - silently dropping this SKB\n");
2995 dev_kfree_skb_any(skb);
2996 return NETDEV_TX_OK;
3000 /* Map skb linear data for DMA */
3001 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3002 skb_headlen(skb), DMA_TO_DEVICE);
3003 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3004 DP(NETIF_MSG_TX_QUEUED,
3005 "SKB mapping failed - silently dropping this SKB\n");
3006 dev_kfree_skb_any(skb);
3007 return NETDEV_TX_OK;
3010 Please read carefully. First we use one BD which we mark as start,
3011 then we have a parsing info BD (used for TSO or xsum),
3012 and only then we have the rest of the TSO BDs.
3013 (don't forget to mark the last one as last,
3014 and to unmap only AFTER you write to the BD ...)
3015 And above all, all pdb sizes are in words - NOT DWORDS!
3018 /* get current pkt produced now - advance it just before sending packet
3019 * since mapping of pages may fail and cause packet to be dropped
3021 pkt_prod = txdata->tx_pkt_prod;
3022 bd_prod = TX_BD(txdata->tx_bd_prod);
3024 /* get a tx_buf and first BD
3025 * tx_start_bd may be changed during SPLIT,
3026 * but first_bd will always stay first
3028 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3029 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3030 first_bd = tx_start_bd;
3032 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3033 SET_FLAG(tx_start_bd->general_data,
3034 ETH_TX_START_BD_PARSE_NBDS,
3038 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
3040 /* remember the first BD of the packet */
3041 tx_buf->first_bd = txdata->tx_bd_prod;
3045 DP(NETIF_MSG_TX_QUEUED,
3046 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3047 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3049 if (vlan_tx_tag_present(skb)) {
3050 tx_start_bd->vlan_or_ethertype =
3051 cpu_to_le16(vlan_tx_tag_get(skb));
3052 tx_start_bd->bd_flags.as_bitfield |=
3053 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3055 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3057 /* turn on parsing and get a BD */
3058 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3060 if (xmit_type & XMIT_CSUM)
3061 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3063 if (!CHIP_IS_E1x(bp)) {
3064 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3065 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3066 /* Set PBD in checksum offload case */
3067 if (xmit_type & XMIT_CSUM)
3068 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3069 &pbd_e2_parsing_data,
3073 * fill in the MAC addresses in the PBD - for local
3076 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3077 &pbd_e2->src_mac_addr_mid,
3078 &pbd_e2->src_mac_addr_lo,
3080 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3081 &pbd_e2->dst_mac_addr_mid,
3082 &pbd_e2->dst_mac_addr_lo,
3086 SET_FLAG(pbd_e2_parsing_data,
3087 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3089 u16 global_data = 0;
3090 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3091 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3092 /* Set PBD in checksum offload case */
3093 if (xmit_type & XMIT_CSUM)
3094 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3096 SET_FLAG(global_data,
3097 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3098 pbd_e1x->global_data |= cpu_to_le16(global_data);
3101 /* Setup the data pointer of the first BD of the packet */
3102 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3103 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3104 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3105 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3106 pkt_size = tx_start_bd->nbytes;
3108 DP(NETIF_MSG_TX_QUEUED,
3109 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
3110 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3111 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
3112 tx_start_bd->bd_flags.as_bitfield,
3113 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3115 if (xmit_type & XMIT_GSO) {
3117 DP(NETIF_MSG_TX_QUEUED,
3118 "TSO packet len %d hlen %d total len %d tso size %d\n",
3119 skb->len, hlen, skb_headlen(skb),
3120 skb_shinfo(skb)->gso_size);
3122 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3124 if (unlikely(skb_headlen(skb) > hlen))
3125 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3128 if (!CHIP_IS_E1x(bp))
3129 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3132 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3135 /* Set the PBD's parsing_data field if not zero
3136 * (for the chips newer than 57711).
3138 if (pbd_e2_parsing_data)
3139 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3141 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3143 /* Handle fragmented skb */
3144 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3145 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3147 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3148 skb_frag_size(frag), DMA_TO_DEVICE);
3149 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3150 unsigned int pkts_compl = 0, bytes_compl = 0;
3152 DP(NETIF_MSG_TX_QUEUED,
3153 "Unable to map page - dropping packet...\n");
3155 /* we need unmap all buffers already mapped
3157 * first_bd->nbd need to be properly updated
3158 * before call to bnx2x_free_tx_pkt
3160 first_bd->nbd = cpu_to_le16(nbd);
3161 bnx2x_free_tx_pkt(bp, txdata,
3162 TX_BD(txdata->tx_pkt_prod),
3163 &pkts_compl, &bytes_compl);
3164 return NETDEV_TX_OK;
3167 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3168 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3169 if (total_pkt_bd == NULL)
3170 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3172 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3173 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3174 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3175 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3178 DP(NETIF_MSG_TX_QUEUED,
3179 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3180 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3181 le16_to_cpu(tx_data_bd->nbytes));
3184 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3186 /* update with actual num BDs */
3187 first_bd->nbd = cpu_to_le16(nbd);
3189 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3191 /* now send a tx doorbell, counting the next BD
3192 * if the packet contains or ends with it
3194 if (TX_BD_POFF(bd_prod) < nbd)
3197 /* total_pkt_bytes should be set on the first data BD if
3198 * it's not an LSO packet and there is more than one
3199 * data BD. In this case pkt_size is limited by an MTU value.
3200 * However we prefer to set it for an LSO packet (while we don't
3201 * have to) in order to save some CPU cycles in a none-LSO
3202 * case, when we much more care about them.
3204 if (total_pkt_bd != NULL)
3205 total_pkt_bd->total_pkt_bytes = pkt_size;
3208 DP(NETIF_MSG_TX_QUEUED,
3209 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3210 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3211 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3212 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3213 le16_to_cpu(pbd_e1x->total_hlen_w));
3215 DP(NETIF_MSG_TX_QUEUED,
3216 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3217 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3218 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3219 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3220 pbd_e2->parsing_data);
3221 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3223 netdev_tx_sent_queue(txq, skb->len);
3225 skb_tx_timestamp(skb);
3227 txdata->tx_pkt_prod++;
3229 * Make sure that the BD data is updated before updating the producer
3230 * since FW might read the BD right after the producer is updated.
3231 * This is only applicable for weak-ordered memory model archs such
3232 * as IA-64. The following barrier is also mandatory since FW will
3233 * assumes packets must have BDs.
3237 txdata->tx_db.data.prod += nbd;
3240 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3244 txdata->tx_bd_prod += nbd;
3246 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3247 netif_tx_stop_queue(txq);
3249 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3250 * ordering of set_bit() in netif_tx_stop_queue() and read of
3254 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3255 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
3256 netif_tx_wake_queue(txq);
3260 return NETDEV_TX_OK;
3264 * bnx2x_setup_tc - routine to configure net_device for multi tc
3266 * @netdev: net device to configure
3267 * @tc: number of traffic classes to enable
3269 * callback connected to the ndo_setup_tc function pointer
3271 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3273 int cos, prio, count, offset;
3274 struct bnx2x *bp = netdev_priv(dev);
3276 /* setup tc must be called under rtnl lock */
3279 /* no traffic classes requested. aborting */
3281 netdev_reset_tc(dev);
3285 /* requested to support too many traffic classes */
3286 if (num_tc > bp->max_cos) {
3287 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3288 num_tc, bp->max_cos);
3292 /* declare amount of supported traffic classes */
3293 if (netdev_set_num_tc(dev, num_tc)) {
3294 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3298 /* configure priority to traffic class mapping */
3299 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3300 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3301 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3302 "mapping priority %d to tc %d\n",
3303 prio, bp->prio_to_cos[prio]);
3307 /* Use this configuration to diffrentiate tc0 from other COSes
3308 This can be used for ets or pfc, and save the effort of setting
3309 up a multio class queue disc or negotiating DCBX with a switch
3310 netdev_set_prio_tc_map(dev, 0, 0);
3311 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3312 for (prio = 1; prio < 16; prio++) {
3313 netdev_set_prio_tc_map(dev, prio, 1);
3314 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3317 /* configure traffic class to transmission queue mapping */
3318 for (cos = 0; cos < bp->max_cos; cos++) {
3319 count = BNX2X_NUM_ETH_QUEUES(bp);
3320 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
3321 netdev_set_tc_queue(dev, cos, count, offset);
3322 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3323 "mapping tc %d to offset %d count %d\n",
3324 cos, offset, count);
3330 /* called with rtnl_lock */
3331 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3333 struct sockaddr *addr = p;
3334 struct bnx2x *bp = netdev_priv(dev);
3337 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3338 BNX2X_ERR("Requested MAC address is not valid\n");
3343 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3344 !is_zero_ether_addr(addr->sa_data)) {
3345 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3350 if (netif_running(dev)) {
3351 rc = bnx2x_set_eth_mac(bp, false);
3356 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
3357 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3359 if (netif_running(dev))
3360 rc = bnx2x_set_eth_mac(bp, true);
3365 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3367 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3368 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3373 if (IS_FCOE_IDX(fp_index)) {
3374 memset(sb, 0, sizeof(union host_hc_status_block));
3375 fp->status_blk_mapping = 0;
3380 if (!CHIP_IS_E1x(bp))
3381 BNX2X_PCI_FREE(sb->e2_sb,
3382 bnx2x_fp(bp, fp_index,
3383 status_blk_mapping),
3384 sizeof(struct host_hc_status_block_e2));
3386 BNX2X_PCI_FREE(sb->e1x_sb,
3387 bnx2x_fp(bp, fp_index,
3388 status_blk_mapping),
3389 sizeof(struct host_hc_status_block_e1x));
3394 if (!skip_rx_queue(bp, fp_index)) {
3395 bnx2x_free_rx_bds(fp);
3397 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3398 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3399 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3400 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3401 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3403 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3404 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3405 sizeof(struct eth_fast_path_rx_cqe) *
3409 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3410 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3411 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3412 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3416 if (!skip_tx_queue(bp, fp_index)) {
3417 /* fastpath tx rings: tx_buf tx_desc */
3418 for_each_cos_in_tx_queue(fp, cos) {
3419 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3421 DP(NETIF_MSG_IFDOWN,
3422 "freeing tx memory of fp %d cos %d cid %d\n",
3423 fp_index, cos, txdata->cid);
3425 BNX2X_FREE(txdata->tx_buf_ring);
3426 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3427 txdata->tx_desc_mapping,
3428 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3431 /* end of fastpath */
3434 void bnx2x_free_fp_mem(struct bnx2x *bp)
3437 for_each_queue(bp, i)
3438 bnx2x_free_fp_mem_at(bp, i);
3441 static void set_sb_shortcuts(struct bnx2x *bp, int index)
3443 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3444 if (!CHIP_IS_E1x(bp)) {
3445 bnx2x_fp(bp, index, sb_index_values) =
3446 (__le16 *)status_blk.e2_sb->sb.index_values;
3447 bnx2x_fp(bp, index, sb_running_index) =
3448 (__le16 *)status_blk.e2_sb->sb.running_index;
3450 bnx2x_fp(bp, index, sb_index_values) =
3451 (__le16 *)status_blk.e1x_sb->sb.index_values;
3452 bnx2x_fp(bp, index, sb_running_index) =
3453 (__le16 *)status_blk.e1x_sb->sb.running_index;
3457 /* Returns the number of actually allocated BDs */
3458 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3461 struct bnx2x *bp = fp->bp;
3462 u16 ring_prod, cqe_ring_prod;
3463 int i, failure_cnt = 0;
3465 fp->rx_comp_cons = 0;
3466 cqe_ring_prod = ring_prod = 0;
3468 /* This routine is called only during fo init so
3469 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3471 for (i = 0; i < rx_ring_size; i++) {
3472 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3476 ring_prod = NEXT_RX_IDX(ring_prod);
3477 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3478 WARN_ON(ring_prod <= (i - failure_cnt));
3482 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3483 i - failure_cnt, fp->index);
3485 fp->rx_bd_prod = ring_prod;
3486 /* Limit the CQE producer by the CQE ring size */
3487 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3489 fp->rx_pkt = fp->rx_calls = 0;
3491 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3493 return i - failure_cnt;
3496 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3500 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3501 struct eth_rx_cqe_next_page *nextpg;
3503 nextpg = (struct eth_rx_cqe_next_page *)
3504 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3506 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3507 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3509 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3510 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3514 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3516 union host_hc_status_block *sb;
3517 struct bnx2x_fastpath *fp = &bp->fp[index];
3520 int rx_ring_size = 0;
3523 if (!bp->rx_ring_size &&
3524 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3525 rx_ring_size = MIN_RX_SIZE_NONTPA;
3526 bp->rx_ring_size = rx_ring_size;
3529 if (!bp->rx_ring_size) {
3530 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3532 if (CHIP_IS_E3(bp)) {
3533 u32 cfg = SHMEM_RD(bp,
3534 dev_info.port_hw_config[BP_PORT(bp)].
3537 /* Decrease ring size for 1G functions */
3538 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3539 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3543 /* allocate at least number of buffers required by FW */
3544 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3545 MIN_RX_SIZE_TPA, rx_ring_size);
3547 bp->rx_ring_size = rx_ring_size;
3548 } else /* if rx_ring_size specified - use it */
3549 rx_ring_size = bp->rx_ring_size;
3552 sb = &bnx2x_fp(bp, index, status_blk);
3554 if (!IS_FCOE_IDX(index)) {
3557 if (!CHIP_IS_E1x(bp))
3558 BNX2X_PCI_ALLOC(sb->e2_sb,
3559 &bnx2x_fp(bp, index, status_blk_mapping),
3560 sizeof(struct host_hc_status_block_e2));
3562 BNX2X_PCI_ALLOC(sb->e1x_sb,
3563 &bnx2x_fp(bp, index, status_blk_mapping),
3564 sizeof(struct host_hc_status_block_e1x));
3569 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3570 * set shortcuts for it.
3572 if (!IS_FCOE_IDX(index))
3573 set_sb_shortcuts(bp, index);
3576 if (!skip_tx_queue(bp, index)) {
3577 /* fastpath tx rings: tx_buf tx_desc */
3578 for_each_cos_in_tx_queue(fp, cos) {
3579 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3582 "allocating tx memory of fp %d cos %d\n",
3585 BNX2X_ALLOC(txdata->tx_buf_ring,
3586 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3587 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3588 &txdata->tx_desc_mapping,
3589 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3594 if (!skip_rx_queue(bp, index)) {
3595 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3596 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3597 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3598 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3599 &bnx2x_fp(bp, index, rx_desc_mapping),
3600 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3602 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3603 &bnx2x_fp(bp, index, rx_comp_mapping),
3604 sizeof(struct eth_fast_path_rx_cqe) *
3608 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3609 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3610 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3611 &bnx2x_fp(bp, index, rx_sge_mapping),
3612 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3614 bnx2x_set_next_page_rx_bd(fp);
3617 bnx2x_set_next_page_rx_cq(fp);
3620 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3621 if (ring_size < rx_ring_size)
3627 /* handles low memory cases */
3629 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3631 /* FW will drop all packets if queue is not big enough,
3632 * In these cases we disable the queue
3633 * Min size is different for OOO, TPA and non-TPA queues
3635 if (ring_size < (fp->disable_tpa ?
3636 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3637 /* release memory allocated for this queue */
3638 bnx2x_free_fp_mem_at(bp, index);
3644 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3649 * 1. Allocate FP for leading - fatal if error
3650 * 2. {CNIC} Allocate FCoE FP - fatal if error
3651 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3652 * 4. Allocate RSS - fix number of queues if error
3656 if (bnx2x_alloc_fp_mem_at(bp, 0))
3662 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3663 /* we will fail load process instead of mark
3670 for_each_nondefault_eth_queue(bp, i)
3671 if (bnx2x_alloc_fp_mem_at(bp, i))
3674 /* handle memory failures */
3675 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3676 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3681 * move non eth FPs next to last eth FP
3682 * must be done in that order
3683 * FCOE_IDX < FWD_IDX < OOO_IDX
3686 /* move FCoE fp even NO_FCOE_FLAG is on */
3687 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
3689 bp->num_queues -= delta;
3690 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3691 bp->num_queues + delta, bp->num_queues);
3697 void bnx2x_free_mem_bp(struct bnx2x *bp)
3699 kfree(bp->fp->tpa_info);
3702 kfree(bp->fp_stats);
3703 kfree(bp->bnx2x_txq);
3704 kfree(bp->msix_table);
3708 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3710 struct bnx2x_fastpath *fp;
3711 struct msix_entry *tbl;
3712 struct bnx2x_ilt *ilt;
3713 int msix_table_size = 0;
3718 * The biggest MSI-X table we might need is as a maximum number of fast
3719 * path IGU SBs plus default SB (for PF).
3721 msix_table_size = bp->igu_sb_cnt + 1;
3723 /* fp array: RSS plus CNIC related L2 queues */
3724 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE;
3725 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3727 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
3730 for (i = 0; i < fp_array_size; i++) {
3732 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
3733 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
3734 if (!(fp[i].tpa_info))
3740 /* allocate sp objs */
3741 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
3746 /* allocate fp_stats */
3747 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
3752 /* Allocate memory for the transmission queues array */
3753 bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS;
3755 bp->bnx2x_txq_size++;
3757 bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size,
3758 sizeof(struct bnx2x_fp_txdata), GFP_KERNEL);
3763 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3766 bp->msix_table = tbl;
3769 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3776 bnx2x_free_mem_bp(bp);
3781 int bnx2x_reload_if_running(struct net_device *dev)
3783 struct bnx2x *bp = netdev_priv(dev);
3785 if (unlikely(!netif_running(dev)))
3788 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
3789 return bnx2x_nic_load(bp, LOAD_NORMAL);
3792 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3794 u32 sel_phy_idx = 0;
3795 if (bp->link_params.num_phys <= 1)
3798 if (bp->link_vars.link_up) {
3799 sel_phy_idx = EXT_PHY1;
3800 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3801 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3802 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3803 sel_phy_idx = EXT_PHY2;
3806 switch (bnx2x_phy_selection(&bp->link_params)) {
3807 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3808 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3809 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3810 sel_phy_idx = EXT_PHY1;
3812 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3813 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3814 sel_phy_idx = EXT_PHY2;
3822 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3824 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3826 * The selected actived PHY is always after swapping (in case PHY
3827 * swapping is enabled). So when swapping is enabled, we need to reverse
3831 if (bp->link_params.multi_phy_config &
3832 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3833 if (sel_phy_idx == EXT_PHY1)
3834 sel_phy_idx = EXT_PHY2;
3835 else if (sel_phy_idx == EXT_PHY2)
3836 sel_phy_idx = EXT_PHY1;
3838 return LINK_CONFIG_IDX(sel_phy_idx);
3841 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3842 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3844 struct bnx2x *bp = netdev_priv(dev);
3845 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3848 case NETDEV_FCOE_WWNN:
3849 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3850 cp->fcoe_wwn_node_name_lo);
3852 case NETDEV_FCOE_WWPN:
3853 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3854 cp->fcoe_wwn_port_name_lo);
3857 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
3865 /* called with rtnl_lock */
3866 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3868 struct bnx2x *bp = netdev_priv(dev);
3870 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3871 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
3875 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3876 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
3877 BNX2X_ERR("Can't support requested MTU size\n");
3881 /* This does not race with packet allocation
3882 * because the actual alloc size is
3883 * only updated as part of load
3887 return bnx2x_reload_if_running(dev);
3890 netdev_features_t bnx2x_fix_features(struct net_device *dev,
3891 netdev_features_t features)
3893 struct bnx2x *bp = netdev_priv(dev);
3895 /* TPA requires Rx CSUM offloading */
3896 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
3897 features &= ~NETIF_F_LRO;
3898 features &= ~NETIF_F_GRO;
3904 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
3906 struct bnx2x *bp = netdev_priv(dev);
3907 u32 flags = bp->flags;
3908 bool bnx2x_reload = false;
3910 if (features & NETIF_F_LRO)
3911 flags |= TPA_ENABLE_FLAG;
3913 flags &= ~TPA_ENABLE_FLAG;
3915 if (features & NETIF_F_GRO)
3916 flags |= GRO_ENABLE_FLAG;
3918 flags &= ~GRO_ENABLE_FLAG;
3920 if (features & NETIF_F_LOOPBACK) {
3921 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3922 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3923 bnx2x_reload = true;
3926 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3927 bp->link_params.loopback_mode = LOOPBACK_NONE;
3928 bnx2x_reload = true;
3932 if (flags ^ bp->flags) {
3934 bnx2x_reload = true;
3938 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3939 return bnx2x_reload_if_running(dev);
3940 /* else: bnx2x_nic_load() will be called at end of recovery */
3946 void bnx2x_tx_timeout(struct net_device *dev)
3948 struct bnx2x *bp = netdev_priv(dev);
3950 #ifdef BNX2X_STOP_ON_ERROR
3955 smp_mb__before_clear_bit();
3956 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3957 smp_mb__after_clear_bit();
3959 /* This allows the netif to be shutdown gracefully before resetting */
3960 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3963 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3965 struct net_device *dev = pci_get_drvdata(pdev);
3969 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3972 bp = netdev_priv(dev);
3976 pci_save_state(pdev);
3978 if (!netif_running(dev)) {
3983 netif_device_detach(dev);
3985 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
3987 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3994 int bnx2x_resume(struct pci_dev *pdev)
3996 struct net_device *dev = pci_get_drvdata(pdev);
4001 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4004 bp = netdev_priv(dev);
4006 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4007 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4013 pci_restore_state(pdev);
4015 if (!netif_running(dev)) {
4020 bnx2x_set_power_state(bp, PCI_D0);
4021 netif_device_attach(dev);
4023 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4031 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4034 /* ustorm cxt validation */
4035 cxt->ustorm_ag_context.cdu_usage =
4036 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4037 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4038 /* xcontext validation */
4039 cxt->xstorm_ag_context.cdu_reserved =
4040 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4041 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4044 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4045 u8 fw_sb_id, u8 sb_index,
4049 u32 addr = BAR_CSTRORM_INTMEM +
4050 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4051 REG_WR8(bp, addr, ticks);
4053 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4054 port, fw_sb_id, sb_index, ticks);
4057 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4058 u16 fw_sb_id, u8 sb_index,
4061 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4062 u32 addr = BAR_CSTRORM_INTMEM +
4063 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4064 u16 flags = REG_RD16(bp, addr);
4066 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4067 flags |= enable_flag;
4068 REG_WR16(bp, addr, flags);
4070 "port %x fw_sb_id %d sb_index %d disable %d\n",
4071 port, fw_sb_id, sb_index, disable);
4074 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4075 u8 sb_index, u8 disable, u16 usec)
4077 int port = BP_PORT(bp);
4078 u8 ticks = usec / BNX2X_BTR;
4080 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4082 disable = disable ? 1 : (usec ? 0 : 1);
4083 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);