1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
24 #include <linux/crash_dump.h>
27 #include <net/ip6_checksum.h>
28 #include <net/busy_poll.h>
29 #include <linux/prefetch.h>
30 #include "bnx2x_cmn.h"
31 #include "bnx2x_init.h"
34 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
35 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
36 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
37 static int bnx2x_poll(struct napi_struct *napi, int budget);
39 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
43 /* Add NAPI objects */
44 for_each_rx_queue_cnic(bp, i) {
45 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
46 bnx2x_poll, NAPI_POLL_WEIGHT);
47 napi_hash_add(&bnx2x_fp(bp, i, napi));
51 static void bnx2x_add_all_napi(struct bnx2x *bp)
55 /* Add NAPI objects */
56 for_each_eth_queue(bp, i) {
57 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
58 bnx2x_poll, NAPI_POLL_WEIGHT);
59 napi_hash_add(&bnx2x_fp(bp, i, napi));
63 static int bnx2x_calc_num_queues(struct bnx2x *bp)
65 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
67 /* Reduce memory usage in kdump environment by using only one queue */
68 if (is_kdump_kernel())
71 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
76 * bnx2x_move_fp - move content of the fastpath structure.
79 * @from: source FP index
80 * @to: destination FP index
82 * Makes sure the contents of the bp->fp[to].napi is kept
83 * intact. This is done by first copying the napi struct from
84 * the target to the source, and then mem copying the entire
85 * source onto the target. Update txdata pointers and related
88 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
90 struct bnx2x_fastpath *from_fp = &bp->fp[from];
91 struct bnx2x_fastpath *to_fp = &bp->fp[to];
92 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
93 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
94 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
95 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
96 int old_max_eth_txqs, new_max_eth_txqs;
97 int old_txdata_index = 0, new_txdata_index = 0;
98 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
100 /* Copy the NAPI object as it has been already initialized */
101 from_fp->napi = to_fp->napi;
103 /* Move bnx2x_fastpath contents */
104 memcpy(to_fp, from_fp, sizeof(*to_fp));
107 /* Retain the tpa_info of the original `to' version as we don't want
108 * 2 FPs to contain the same tpa_info pointer.
110 to_fp->tpa_info = old_tpa_info;
112 /* move sp_objs contents as well, as their indices match fp ones */
113 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
115 /* move fp_stats contents as well, as their indices match fp ones */
116 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
118 /* Update txdata pointers in fp and move txdata content accordingly:
119 * Each fp consumes 'max_cos' txdata structures, so the index should be
120 * decremented by max_cos x delta.
123 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
124 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
126 if (from == FCOE_IDX(bp)) {
127 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
131 memcpy(&bp->bnx2x_txq[new_txdata_index],
132 &bp->bnx2x_txq[old_txdata_index],
133 sizeof(struct bnx2x_fp_txdata));
134 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
138 * bnx2x_fill_fw_str - Fill buffer with FW version string.
141 * @buf: character buffer to fill with the fw name
142 * @buf_len: length of the above buffer
145 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
148 u8 phy_fw_ver[PHY_FW_VER_LEN];
150 phy_fw_ver[0] = '\0';
151 bnx2x_get_ext_phy_fw_version(&bp->link_params,
152 phy_fw_ver, PHY_FW_VER_LEN);
153 strlcpy(buf, bp->fw_ver, buf_len);
154 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
156 (bp->common.bc_ver & 0xff0000) >> 16,
157 (bp->common.bc_ver & 0xff00) >> 8,
158 (bp->common.bc_ver & 0xff),
159 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
161 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
166 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
169 * @delta: number of eth queues which were not allocated
171 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
173 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
175 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
176 * backward along the array could cause memory to be overridden
178 for (cos = 1; cos < bp->max_cos; cos++) {
179 for (i = 0; i < old_eth_num - delta; i++) {
180 struct bnx2x_fastpath *fp = &bp->fp[i];
181 int new_idx = cos * (old_eth_num - delta) + i;
183 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
184 sizeof(struct bnx2x_fp_txdata));
185 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
190 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
192 /* free skb in the packet ring at pos idx
193 * return idx of last bd freed
195 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
196 u16 idx, unsigned int *pkts_compl,
197 unsigned int *bytes_compl)
199 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
200 struct eth_tx_start_bd *tx_start_bd;
201 struct eth_tx_bd *tx_data_bd;
202 struct sk_buff *skb = tx_buf->skb;
203 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
205 u16 split_bd_len = 0;
207 /* prefetch skb end pointer to speedup dev_kfree_skb() */
210 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
211 txdata->txq_index, idx, tx_buf, skb);
213 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
215 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
216 #ifdef BNX2X_STOP_ON_ERROR
217 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
218 BNX2X_ERR("BAD nbd!\n");
222 new_cons = nbd + tx_buf->first_bd;
224 /* Get the next bd */
225 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
227 /* Skip a parse bd... */
229 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
231 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
232 /* Skip second parse bd... */
234 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
237 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
238 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
239 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
240 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
242 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
246 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
247 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
253 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
254 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
255 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
257 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
264 (*bytes_compl) += skb->len;
265 dev_kfree_skb_any(skb);
268 tx_buf->first_bd = 0;
274 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
276 struct netdev_queue *txq;
277 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
278 unsigned int pkts_compl = 0, bytes_compl = 0;
280 #ifdef BNX2X_STOP_ON_ERROR
281 if (unlikely(bp->panic))
285 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
286 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
287 sw_cons = txdata->tx_pkt_cons;
289 while (sw_cons != hw_cons) {
292 pkt_cons = TX_BD(sw_cons);
294 DP(NETIF_MSG_TX_DONE,
295 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
296 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
298 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
299 &pkts_compl, &bytes_compl);
304 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
306 txdata->tx_pkt_cons = sw_cons;
307 txdata->tx_bd_cons = bd_cons;
309 /* Need to make the tx_bd_cons update visible to start_xmit()
310 * before checking for netif_tx_queue_stopped(). Without the
311 * memory barrier, there is a small possibility that
312 * start_xmit() will miss it and cause the queue to be stopped
314 * On the other hand we need an rmb() here to ensure the proper
315 * ordering of bit testing in the following
316 * netif_tx_queue_stopped(txq) call.
320 if (unlikely(netif_tx_queue_stopped(txq))) {
321 /* Taking tx_lock() is needed to prevent re-enabling the queue
322 * while it's empty. This could have happen if rx_action() gets
323 * suspended in bnx2x_tx_int() after the condition before
324 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
326 * stops the queue->sees fresh tx_bd_cons->releases the queue->
327 * sends some packets consuming the whole queue again->
331 __netif_tx_lock(txq, smp_processor_id());
333 if ((netif_tx_queue_stopped(txq)) &&
334 (bp->state == BNX2X_STATE_OPEN) &&
335 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
336 netif_tx_wake_queue(txq);
338 __netif_tx_unlock(txq);
343 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
346 u16 last_max = fp->last_max_sge;
348 if (SUB_S16(idx, last_max) > 0)
349 fp->last_max_sge = idx;
352 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
354 struct eth_end_agg_rx_cqe *cqe)
356 struct bnx2x *bp = fp->bp;
357 u16 last_max, last_elem, first_elem;
364 /* First mark all used pages */
365 for (i = 0; i < sge_len; i++)
366 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
367 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
369 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
370 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
372 /* Here we assume that the last SGE index is the biggest */
373 prefetch((void *)(fp->sge_mask));
374 bnx2x_update_last_max_sge(fp,
375 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
377 last_max = RX_SGE(fp->last_max_sge);
378 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
379 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
381 /* If ring is not full */
382 if (last_elem + 1 != first_elem)
385 /* Now update the prod */
386 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
387 if (likely(fp->sge_mask[i]))
390 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
391 delta += BIT_VEC64_ELEM_SZ;
395 fp->rx_sge_prod += delta;
396 /* clear page-end entries */
397 bnx2x_clear_sge_mask_next_elems(fp);
400 DP(NETIF_MSG_RX_STATUS,
401 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
402 fp->last_max_sge, fp->rx_sge_prod);
405 /* Get Toeplitz hash value in the skb using the value from the
406 * CQE (calculated by HW).
408 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
409 const struct eth_fast_path_rx_cqe *cqe,
410 enum pkt_hash_types *rxhash_type)
412 /* Get Toeplitz hash from CQE */
413 if ((bp->dev->features & NETIF_F_RXHASH) &&
414 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
415 enum eth_rss_hash_type htype;
417 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
418 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
419 (htype == TCP_IPV6_HASH_TYPE)) ?
420 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
422 return le32_to_cpu(cqe->rss_hash_result);
424 *rxhash_type = PKT_HASH_TYPE_NONE;
428 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
430 struct eth_fast_path_rx_cqe *cqe)
432 struct bnx2x *bp = fp->bp;
433 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
434 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
435 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
437 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
438 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
440 /* print error if current state != stop */
441 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
442 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
444 /* Try to map an empty data buffer from the aggregation info */
445 mapping = dma_map_single(&bp->pdev->dev,
446 first_buf->data + NET_SKB_PAD,
447 fp->rx_buf_size, DMA_FROM_DEVICE);
449 * ...if it fails - move the skb from the consumer to the producer
450 * and set the current aggregation state as ERROR to drop it
451 * when TPA_STOP arrives.
454 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
455 /* Move the BD from the consumer to the producer */
456 bnx2x_reuse_rx_data(fp, cons, prod);
457 tpa_info->tpa_state = BNX2X_TPA_ERROR;
461 /* move empty data from pool to prod */
462 prod_rx_buf->data = first_buf->data;
463 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
464 /* point prod_bd to new data */
465 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
466 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
468 /* move partial skb from cons to pool (don't unmap yet) */
469 *first_buf = *cons_rx_buf;
471 /* mark bin state as START */
472 tpa_info->parsing_flags =
473 le16_to_cpu(cqe->pars_flags.flags);
474 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
475 tpa_info->tpa_state = BNX2X_TPA_START;
476 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
477 tpa_info->placement_offset = cqe->placement_offset;
478 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
479 if (fp->mode == TPA_MODE_GRO) {
480 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
481 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
482 tpa_info->gro_size = gro_size;
485 #ifdef BNX2X_STOP_ON_ERROR
486 fp->tpa_queue_used |= (1 << queue);
487 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
492 /* Timestamp option length allowed for TPA aggregation:
494 * nop nop kind length echo val
496 #define TPA_TSTAMP_OPT_LEN 12
498 * bnx2x_set_gro_params - compute GRO values
501 * @parsing_flags: parsing flags from the START CQE
502 * @len_on_bd: total length of the first packet for the
504 * @pkt_len: length of all segments
506 * Approximate value of the MSS for this aggregation calculated using
507 * the first packet of it.
508 * Compute number of aggregated segments, and gso_type.
510 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
511 u16 len_on_bd, unsigned int pkt_len,
512 u16 num_of_coalesced_segs)
514 /* TPA aggregation won't have either IP options or TCP options
515 * other than timestamp or IPv6 extension headers.
517 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
519 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
520 PRS_FLAG_OVERETH_IPV6) {
521 hdrs_len += sizeof(struct ipv6hdr);
522 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
524 hdrs_len += sizeof(struct iphdr);
525 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
528 /* Check if there was a TCP timestamp, if there is it's will
529 * always be 12 bytes length: nop nop kind length echo val.
531 * Otherwise FW would close the aggregation.
533 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
534 hdrs_len += TPA_TSTAMP_OPT_LEN;
536 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
538 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
539 * to skb_shinfo(skb)->gso_segs
541 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
544 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
545 u16 index, gfp_t gfp_mask)
547 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
548 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
549 struct bnx2x_alloc_pool *pool = &fp->page_pool;
552 if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
554 /* put page reference used by the memory pool, since we
555 * won't be using this page as the mempool anymore.
558 put_page(pool->page);
560 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
561 if (unlikely(!pool->page)) {
562 BNX2X_ERR("Can't alloc sge\n");
569 mapping = dma_map_page(&bp->pdev->dev, pool->page,
570 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
571 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
572 BNX2X_ERR("Can't map sge\n");
576 get_page(pool->page);
577 sw_buf->page = pool->page;
578 sw_buf->offset = pool->offset;
580 dma_unmap_addr_set(sw_buf, mapping, mapping);
582 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
583 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
585 pool->offset += SGE_PAGE_SIZE;
590 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
591 struct bnx2x_agg_info *tpa_info,
594 struct eth_end_agg_rx_cqe *cqe,
597 struct sw_rx_page *rx_pg, old_rx_pg;
598 u32 i, frag_len, frag_size;
599 int err, j, frag_id = 0;
600 u16 len_on_bd = tpa_info->len_on_bd;
601 u16 full_page = 0, gro_size = 0;
603 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
605 if (fp->mode == TPA_MODE_GRO) {
606 gro_size = tpa_info->gro_size;
607 full_page = tpa_info->full_page;
610 /* This is needed in order to enable forwarding support */
612 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
613 le16_to_cpu(cqe->pkt_len),
614 le16_to_cpu(cqe->num_of_coalesced_segs));
616 #ifdef BNX2X_STOP_ON_ERROR
617 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
618 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
620 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
626 /* Run through the SGL and compose the fragmented skb */
627 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
628 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
630 /* FW gives the indices of the SGE as if the ring is an array
631 (meaning that "next" element will consume 2 indices) */
632 if (fp->mode == TPA_MODE_GRO)
633 frag_len = min_t(u32, frag_size, (u32)full_page);
635 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
637 rx_pg = &fp->rx_page_ring[sge_idx];
640 /* If we fail to allocate a substitute page, we simply stop
641 where we are and drop the whole packet */
642 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
644 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
648 dma_unmap_page(&bp->pdev->dev,
649 dma_unmap_addr(&old_rx_pg, mapping),
650 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
651 /* Add one frag and update the appropriate fields in the skb */
652 if (fp->mode == TPA_MODE_LRO)
653 skb_fill_page_desc(skb, j, old_rx_pg.page,
654 old_rx_pg.offset, frag_len);
658 for (rem = frag_len; rem > 0; rem -= gro_size) {
659 int len = rem > gro_size ? gro_size : rem;
660 skb_fill_page_desc(skb, frag_id++,
662 old_rx_pg.offset + offset,
665 get_page(old_rx_pg.page);
670 skb->data_len += frag_len;
671 skb->truesize += SGE_PAGES;
672 skb->len += frag_len;
674 frag_size -= frag_len;
680 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
682 if (fp->rx_frag_size)
688 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
690 if (fp->rx_frag_size) {
691 /* GFP_KERNEL allocations are used only during initialization */
692 if (unlikely(gfp_mask & __GFP_WAIT))
693 return (void *)__get_free_page(gfp_mask);
695 return netdev_alloc_frag(fp->rx_frag_size);
698 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
702 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
704 const struct iphdr *iph = ip_hdr(skb);
707 skb_set_transport_header(skb, sizeof(struct iphdr));
710 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
711 iph->saddr, iph->daddr, 0);
714 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
716 struct ipv6hdr *iph = ipv6_hdr(skb);
719 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
722 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
723 &iph->saddr, &iph->daddr, 0);
726 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
727 void (*gro_func)(struct bnx2x*, struct sk_buff*))
729 skb_set_network_header(skb, 0);
731 tcp_gro_complete(skb);
735 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
739 if (skb_shinfo(skb)->gso_size) {
740 switch (be16_to_cpu(skb->protocol)) {
742 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
745 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
748 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
749 be16_to_cpu(skb->protocol));
753 skb_record_rx_queue(skb, fp->rx_queue);
754 napi_gro_receive(&fp->napi, skb);
757 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
758 struct bnx2x_agg_info *tpa_info,
760 struct eth_end_agg_rx_cqe *cqe,
763 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
764 u8 pad = tpa_info->placement_offset;
765 u16 len = tpa_info->len_on_bd;
766 struct sk_buff *skb = NULL;
767 u8 *new_data, *data = rx_buf->data;
768 u8 old_tpa_state = tpa_info->tpa_state;
770 tpa_info->tpa_state = BNX2X_TPA_STOP;
772 /* If we there was an error during the handling of the TPA_START -
773 * drop this aggregation.
775 if (old_tpa_state == BNX2X_TPA_ERROR)
778 /* Try to allocate the new data */
779 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
780 /* Unmap skb in the pool anyway, as we are going to change
781 pool entry status to BNX2X_TPA_STOP even if new skb allocation
783 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
784 fp->rx_buf_size, DMA_FROM_DEVICE);
785 if (likely(new_data))
786 skb = build_skb(data, fp->rx_frag_size);
789 #ifdef BNX2X_STOP_ON_ERROR
790 if (pad + len > fp->rx_buf_size) {
791 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
792 pad, len, fp->rx_buf_size);
798 skb_reserve(skb, pad + NET_SKB_PAD);
800 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
802 skb->protocol = eth_type_trans(skb, bp->dev);
803 skb->ip_summed = CHECKSUM_UNNECESSARY;
805 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
806 skb, cqe, cqe_idx)) {
807 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
808 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
809 bnx2x_gro_receive(bp, fp, skb);
811 DP(NETIF_MSG_RX_STATUS,
812 "Failed to allocate new pages - dropping packet!\n");
813 dev_kfree_skb_any(skb);
816 /* put new data in bin */
817 rx_buf->data = new_data;
822 bnx2x_frag_free(fp, new_data);
824 /* drop the packet and keep the buffer in the bin */
825 DP(NETIF_MSG_RX_STATUS,
826 "Failed to allocate or map a new skb - dropping packet!\n");
827 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
830 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
831 u16 index, gfp_t gfp_mask)
834 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
835 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
838 data = bnx2x_frag_alloc(fp, gfp_mask);
839 if (unlikely(data == NULL))
842 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
845 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
846 bnx2x_frag_free(fp, data);
847 BNX2X_ERR("Can't map rx data\n");
852 dma_unmap_addr_set(rx_buf, mapping, mapping);
854 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
855 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
861 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
862 struct bnx2x_fastpath *fp,
863 struct bnx2x_eth_q_stats *qstats)
865 /* Do nothing if no L4 csum validation was done.
866 * We do not check whether IP csum was validated. For IPv4 we assume
867 * that if the card got as far as validating the L4 csum, it also
868 * validated the IP csum. IPv6 has no IP csum.
870 if (cqe->fast_path_cqe.status_flags &
871 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
874 /* If L4 validation was done, check if an error was found. */
876 if (cqe->fast_path_cqe.type_error_flags &
877 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
878 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
879 qstats->hw_csum_err++;
881 skb->ip_summed = CHECKSUM_UNNECESSARY;
884 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
886 struct bnx2x *bp = fp->bp;
887 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
888 u16 sw_comp_cons, sw_comp_prod;
890 union eth_rx_cqe *cqe;
891 struct eth_fast_path_rx_cqe *cqe_fp;
893 #ifdef BNX2X_STOP_ON_ERROR
894 if (unlikely(bp->panic))
900 bd_cons = fp->rx_bd_cons;
901 bd_prod = fp->rx_bd_prod;
902 bd_prod_fw = bd_prod;
903 sw_comp_cons = fp->rx_comp_cons;
904 sw_comp_prod = fp->rx_comp_prod;
906 comp_ring_cons = RCQ_BD(sw_comp_cons);
907 cqe = &fp->rx_comp_ring[comp_ring_cons];
908 cqe_fp = &cqe->fast_path_cqe;
910 DP(NETIF_MSG_RX_STATUS,
911 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
913 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
914 struct sw_rx_bd *rx_buf = NULL;
917 enum eth_rx_cqe_type cqe_fp_type;
921 enum pkt_hash_types rxhash_type;
923 #ifdef BNX2X_STOP_ON_ERROR
924 if (unlikely(bp->panic))
928 bd_prod = RX_BD(bd_prod);
929 bd_cons = RX_BD(bd_cons);
931 /* A rmb() is required to ensure that the CQE is not read
932 * before it is written by the adapter DMA. PCI ordering
933 * rules will make sure the other fields are written before
934 * the marker at the end of struct eth_fast_path_rx_cqe
935 * but without rmb() a weakly ordered processor can process
936 * stale data. Without the barrier TPA state-machine might
937 * enter inconsistent state and kernel stack might be
938 * provided with incorrect packet description - these lead
939 * to various kernel crashed.
943 cqe_fp_flags = cqe_fp->type_error_flags;
944 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
946 DP(NETIF_MSG_RX_STATUS,
947 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
948 CQE_TYPE(cqe_fp_flags),
949 cqe_fp_flags, cqe_fp->status_flags,
950 le32_to_cpu(cqe_fp->rss_hash_result),
951 le16_to_cpu(cqe_fp->vlan_tag),
952 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
954 /* is this a slowpath msg? */
955 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
956 bnx2x_sp_event(fp, cqe);
960 rx_buf = &fp->rx_buf_ring[bd_cons];
963 if (!CQE_TYPE_FAST(cqe_fp_type)) {
964 struct bnx2x_agg_info *tpa_info;
965 u16 frag_size, pages;
966 #ifdef BNX2X_STOP_ON_ERROR
968 if (fp->mode == TPA_MODE_DISABLED &&
969 (CQE_TYPE_START(cqe_fp_type) ||
970 CQE_TYPE_STOP(cqe_fp_type)))
971 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
972 CQE_TYPE(cqe_fp_type));
975 if (CQE_TYPE_START(cqe_fp_type)) {
976 u16 queue = cqe_fp->queue_index;
977 DP(NETIF_MSG_RX_STATUS,
978 "calling tpa_start on queue %d\n",
981 bnx2x_tpa_start(fp, queue,
987 queue = cqe->end_agg_cqe.queue_index;
988 tpa_info = &fp->tpa_info[queue];
989 DP(NETIF_MSG_RX_STATUS,
990 "calling tpa_stop on queue %d\n",
993 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
996 if (fp->mode == TPA_MODE_GRO)
997 pages = (frag_size + tpa_info->full_page - 1) /
1000 pages = SGE_PAGE_ALIGN(frag_size) >>
1003 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1004 &cqe->end_agg_cqe, comp_ring_cons);
1005 #ifdef BNX2X_STOP_ON_ERROR
1010 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1014 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1015 pad = cqe_fp->placement_offset;
1016 dma_sync_single_for_cpu(&bp->pdev->dev,
1017 dma_unmap_addr(rx_buf, mapping),
1018 pad + RX_COPY_THRESH,
1021 prefetch(data + pad); /* speedup eth_type_trans() */
1022 /* is this an error packet? */
1023 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1024 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1025 "ERROR flags %x rx packet %u\n",
1026 cqe_fp_flags, sw_comp_cons);
1027 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1031 /* Since we don't have a jumbo ring
1032 * copy small packets if mtu > 1500
1034 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1035 (len <= RX_COPY_THRESH)) {
1036 skb = napi_alloc_skb(&fp->napi, len);
1038 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1039 "ERROR packet dropped because of alloc failure\n");
1040 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1043 memcpy(skb->data, data + pad, len);
1044 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1046 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1047 GFP_ATOMIC) == 0)) {
1048 dma_unmap_single(&bp->pdev->dev,
1049 dma_unmap_addr(rx_buf, mapping),
1052 skb = build_skb(data, fp->rx_frag_size);
1053 if (unlikely(!skb)) {
1054 bnx2x_frag_free(fp, data);
1055 bnx2x_fp_qstats(bp, fp)->
1056 rx_skb_alloc_failed++;
1059 skb_reserve(skb, pad);
1061 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1062 "ERROR packet dropped because of alloc failure\n");
1063 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1065 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1071 skb->protocol = eth_type_trans(skb, bp->dev);
1073 /* Set Toeplitz hash for a none-LRO skb */
1074 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1075 skb_set_hash(skb, rxhash, rxhash_type);
1077 skb_checksum_none_assert(skb);
1079 if (bp->dev->features & NETIF_F_RXCSUM)
1080 bnx2x_csum_validate(skb, cqe, fp,
1081 bnx2x_fp_qstats(bp, fp));
1083 skb_record_rx_queue(skb, fp->rx_queue);
1085 /* Check if this packet was timestamped */
1086 if (unlikely(cqe->fast_path_cqe.type_error_flags &
1087 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1088 bnx2x_set_rx_ts(bp, skb);
1090 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1092 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1093 le16_to_cpu(cqe_fp->vlan_tag));
1095 skb_mark_napi_id(skb, &fp->napi);
1097 if (bnx2x_fp_ll_polling(fp))
1098 netif_receive_skb(skb);
1100 napi_gro_receive(&fp->napi, skb);
1102 rx_buf->data = NULL;
1104 bd_cons = NEXT_RX_IDX(bd_cons);
1105 bd_prod = NEXT_RX_IDX(bd_prod);
1106 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1109 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1110 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1112 /* mark CQE as free */
1113 BNX2X_SEED_CQE(cqe_fp);
1115 if (rx_pkt == budget)
1118 comp_ring_cons = RCQ_BD(sw_comp_cons);
1119 cqe = &fp->rx_comp_ring[comp_ring_cons];
1120 cqe_fp = &cqe->fast_path_cqe;
1123 fp->rx_bd_cons = bd_cons;
1124 fp->rx_bd_prod = bd_prod_fw;
1125 fp->rx_comp_cons = sw_comp_cons;
1126 fp->rx_comp_prod = sw_comp_prod;
1128 /* Update producers */
1129 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1132 fp->rx_pkt += rx_pkt;
1138 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1140 struct bnx2x_fastpath *fp = fp_cookie;
1141 struct bnx2x *bp = fp->bp;
1145 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1146 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1148 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1150 #ifdef BNX2X_STOP_ON_ERROR
1151 if (unlikely(bp->panic))
1155 /* Handle Rx and Tx according to MSI-X vector */
1156 for_each_cos_in_tx_queue(fp, cos)
1157 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1159 prefetch(&fp->sb_running_index[SM_RX_ID]);
1160 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1165 /* HW Lock for shared dual port PHYs */
1166 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1168 mutex_lock(&bp->port.phy_mutex);
1170 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1173 void bnx2x_release_phy_lock(struct bnx2x *bp)
1175 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1177 mutex_unlock(&bp->port.phy_mutex);
1180 /* calculates MF speed according to current linespeed and MF configuration */
1181 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1183 u16 line_speed = bp->link_vars.line_speed;
1185 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1186 bp->mf_config[BP_VN(bp)]);
1188 /* Calculate the current MAX line speed limit for the MF
1192 line_speed = (line_speed * maxCfg) / 100;
1193 else { /* SD mode */
1194 u16 vn_max_rate = maxCfg * 100;
1196 if (vn_max_rate < line_speed)
1197 line_speed = vn_max_rate;
1205 * bnx2x_fill_report_data - fill link report data to report
1207 * @bp: driver handle
1208 * @data: link state to update
1210 * It uses a none-atomic bit operations because is called under the mutex.
1212 static void bnx2x_fill_report_data(struct bnx2x *bp,
1213 struct bnx2x_link_report_data *data)
1215 memset(data, 0, sizeof(*data));
1218 /* Fill the report data: effective line speed */
1219 data->line_speed = bnx2x_get_mf_speed(bp);
1222 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1223 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1224 &data->link_report_flags);
1226 if (!BNX2X_NUM_ETH_QUEUES(bp))
1227 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1228 &data->link_report_flags);
1231 if (bp->link_vars.duplex == DUPLEX_FULL)
1232 __set_bit(BNX2X_LINK_REPORT_FD,
1233 &data->link_report_flags);
1235 /* Rx Flow Control is ON */
1236 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1237 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1238 &data->link_report_flags);
1240 /* Tx Flow Control is ON */
1241 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1242 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1243 &data->link_report_flags);
1245 *data = bp->vf_link_vars;
1250 * bnx2x_link_report - report link status to OS.
1252 * @bp: driver handle
1254 * Calls the __bnx2x_link_report() under the same locking scheme
1255 * as a link/PHY state managing code to ensure a consistent link
1259 void bnx2x_link_report(struct bnx2x *bp)
1261 bnx2x_acquire_phy_lock(bp);
1262 __bnx2x_link_report(bp);
1263 bnx2x_release_phy_lock(bp);
1267 * __bnx2x_link_report - report link status to OS.
1269 * @bp: driver handle
1271 * None atomic implementation.
1272 * Should be called under the phy_lock.
1274 void __bnx2x_link_report(struct bnx2x *bp)
1276 struct bnx2x_link_report_data cur_data;
1279 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1280 bnx2x_read_mf_cfg(bp);
1282 /* Read the current link report info */
1283 bnx2x_fill_report_data(bp, &cur_data);
1285 /* Don't report link down or exactly the same link status twice */
1286 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1287 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1288 &bp->last_reported_link.link_report_flags) &&
1289 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1290 &cur_data.link_report_flags)))
1295 /* We are going to report a new link parameters now -
1296 * remember the current data for the next time.
1298 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1300 /* propagate status to VFs */
1302 bnx2x_iov_link_update(bp);
1304 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1305 &cur_data.link_report_flags)) {
1306 netif_carrier_off(bp->dev);
1307 netdev_err(bp->dev, "NIC Link is Down\n");
1313 netif_carrier_on(bp->dev);
1315 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1316 &cur_data.link_report_flags))
1321 /* Handle the FC at the end so that only these flags would be
1322 * possibly set. This way we may easily check if there is no FC
1325 if (cur_data.link_report_flags) {
1326 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1327 &cur_data.link_report_flags)) {
1328 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1329 &cur_data.link_report_flags))
1330 flow = "ON - receive & transmit";
1332 flow = "ON - receive";
1334 flow = "ON - transmit";
1339 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1340 cur_data.line_speed, duplex, flow);
1344 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1348 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1349 struct eth_rx_sge *sge;
1351 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1353 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1354 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1357 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1358 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1362 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1363 struct bnx2x_fastpath *fp, int last)
1367 for (i = 0; i < last; i++) {
1368 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1369 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1370 u8 *data = first_buf->data;
1373 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1376 if (tpa_info->tpa_state == BNX2X_TPA_START)
1377 dma_unmap_single(&bp->pdev->dev,
1378 dma_unmap_addr(first_buf, mapping),
1379 fp->rx_buf_size, DMA_FROM_DEVICE);
1380 bnx2x_frag_free(fp, data);
1381 first_buf->data = NULL;
1385 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1389 for_each_rx_queue_cnic(bp, j) {
1390 struct bnx2x_fastpath *fp = &bp->fp[j];
1394 /* Activate BD ring */
1396 * this will generate an interrupt (to the TSTORM)
1397 * must only be done after chip is initialized
1399 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1404 void bnx2x_init_rx_rings(struct bnx2x *bp)
1406 int func = BP_FUNC(bp);
1410 /* Allocate TPA resources */
1411 for_each_eth_queue(bp, j) {
1412 struct bnx2x_fastpath *fp = &bp->fp[j];
1415 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1417 if (fp->mode != TPA_MODE_DISABLED) {
1418 /* Fill the per-aggregation pool */
1419 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1420 struct bnx2x_agg_info *tpa_info =
1422 struct sw_rx_bd *first_buf =
1423 &tpa_info->first_buf;
1426 bnx2x_frag_alloc(fp, GFP_KERNEL);
1427 if (!first_buf->data) {
1428 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1430 bnx2x_free_tpa_pool(bp, fp, i);
1431 fp->mode = TPA_MODE_DISABLED;
1434 dma_unmap_addr_set(first_buf, mapping, 0);
1435 tpa_info->tpa_state = BNX2X_TPA_STOP;
1438 /* "next page" elements initialization */
1439 bnx2x_set_next_page_sgl(fp);
1441 /* set SGEs bit mask */
1442 bnx2x_init_sge_ring_bit_mask(fp);
1444 /* Allocate SGEs and initialize the ring elements */
1445 for (i = 0, ring_prod = 0;
1446 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1448 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1450 BNX2X_ERR("was only able to allocate %d rx sges\n",
1452 BNX2X_ERR("disabling TPA for queue[%d]\n",
1454 /* Cleanup already allocated elements */
1455 bnx2x_free_rx_sge_range(bp, fp,
1457 bnx2x_free_tpa_pool(bp, fp,
1459 fp->mode = TPA_MODE_DISABLED;
1463 ring_prod = NEXT_SGE_IDX(ring_prod);
1466 fp->rx_sge_prod = ring_prod;
1470 for_each_eth_queue(bp, j) {
1471 struct bnx2x_fastpath *fp = &bp->fp[j];
1475 /* Activate BD ring */
1477 * this will generate an interrupt (to the TSTORM)
1478 * must only be done after chip is initialized
1480 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1486 if (CHIP_IS_E1(bp)) {
1487 REG_WR(bp, BAR_USTRORM_INTMEM +
1488 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1489 U64_LO(fp->rx_comp_mapping));
1490 REG_WR(bp, BAR_USTRORM_INTMEM +
1491 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1492 U64_HI(fp->rx_comp_mapping));
1497 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1500 struct bnx2x *bp = fp->bp;
1502 for_each_cos_in_tx_queue(fp, cos) {
1503 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1504 unsigned pkts_compl = 0, bytes_compl = 0;
1506 u16 sw_prod = txdata->tx_pkt_prod;
1507 u16 sw_cons = txdata->tx_pkt_cons;
1509 while (sw_cons != sw_prod) {
1510 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1511 &pkts_compl, &bytes_compl);
1515 netdev_tx_reset_queue(
1516 netdev_get_tx_queue(bp->dev,
1517 txdata->txq_index));
1521 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1525 for_each_tx_queue_cnic(bp, i) {
1526 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1530 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1534 for_each_eth_queue(bp, i) {
1535 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1539 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1541 struct bnx2x *bp = fp->bp;
1544 /* ring wasn't allocated */
1545 if (fp->rx_buf_ring == NULL)
1548 for (i = 0; i < NUM_RX_BD; i++) {
1549 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1550 u8 *data = rx_buf->data;
1554 dma_unmap_single(&bp->pdev->dev,
1555 dma_unmap_addr(rx_buf, mapping),
1556 fp->rx_buf_size, DMA_FROM_DEVICE);
1558 rx_buf->data = NULL;
1559 bnx2x_frag_free(fp, data);
1563 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1567 for_each_rx_queue_cnic(bp, j) {
1568 bnx2x_free_rx_bds(&bp->fp[j]);
1572 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1576 for_each_eth_queue(bp, j) {
1577 struct bnx2x_fastpath *fp = &bp->fp[j];
1579 bnx2x_free_rx_bds(fp);
1581 if (fp->mode != TPA_MODE_DISABLED)
1582 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1586 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1588 bnx2x_free_tx_skbs_cnic(bp);
1589 bnx2x_free_rx_skbs_cnic(bp);
1592 void bnx2x_free_skbs(struct bnx2x *bp)
1594 bnx2x_free_tx_skbs(bp);
1595 bnx2x_free_rx_skbs(bp);
1598 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1600 /* load old values */
1601 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1603 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1604 /* leave all but MAX value */
1605 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1607 /* set new MAX value */
1608 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1609 & FUNC_MF_CFG_MAX_BW_MASK;
1611 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1616 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1618 * @bp: driver handle
1619 * @nvecs: number of vectors to be released
1621 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1625 if (nvecs == offset)
1628 /* VFs don't have a default SB */
1630 free_irq(bp->msix_table[offset].vector, bp->dev);
1631 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1632 bp->msix_table[offset].vector);
1636 if (CNIC_SUPPORT(bp)) {
1637 if (nvecs == offset)
1642 for_each_eth_queue(bp, i) {
1643 if (nvecs == offset)
1645 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1646 i, bp->msix_table[offset].vector);
1648 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1652 void bnx2x_free_irq(struct bnx2x *bp)
1654 if (bp->flags & USING_MSIX_FLAG &&
1655 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1656 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1658 /* vfs don't have a default status block */
1662 bnx2x_free_msix_irqs(bp, nvecs);
1664 free_irq(bp->dev->irq, bp->dev);
1668 int bnx2x_enable_msix(struct bnx2x *bp)
1670 int msix_vec = 0, i, rc;
1672 /* VFs don't have a default status block */
1674 bp->msix_table[msix_vec].entry = msix_vec;
1675 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1676 bp->msix_table[0].entry);
1680 /* Cnic requires an msix vector for itself */
1681 if (CNIC_SUPPORT(bp)) {
1682 bp->msix_table[msix_vec].entry = msix_vec;
1683 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1684 msix_vec, bp->msix_table[msix_vec].entry);
1688 /* We need separate vectors for ETH queues only (not FCoE) */
1689 for_each_eth_queue(bp, i) {
1690 bp->msix_table[msix_vec].entry = msix_vec;
1691 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1692 msix_vec, msix_vec, i);
1696 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1699 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1700 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1702 * reconfigure number of tx/rx queues according to available
1705 if (rc == -ENOSPC) {
1706 /* Get by with single vector */
1707 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1709 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1714 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1715 bp->flags |= USING_SINGLE_MSIX_FLAG;
1717 BNX2X_DEV_INFO("set number of queues to 1\n");
1718 bp->num_ethernet_queues = 1;
1719 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1720 } else if (rc < 0) {
1721 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1723 } else if (rc < msix_vec) {
1724 /* how less vectors we will have? */
1725 int diff = msix_vec - rc;
1727 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1730 * decrease number of queues by number of unallocated entries
1732 bp->num_ethernet_queues -= diff;
1733 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1735 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1739 bp->flags |= USING_MSIX_FLAG;
1744 /* fall to INTx if not enough memory */
1746 bp->flags |= DISABLE_MSI_FLAG;
1751 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1753 int i, rc, offset = 0;
1755 /* no default status block for vf */
1757 rc = request_irq(bp->msix_table[offset++].vector,
1758 bnx2x_msix_sp_int, 0,
1759 bp->dev->name, bp->dev);
1761 BNX2X_ERR("request sp irq failed\n");
1766 if (CNIC_SUPPORT(bp))
1769 for_each_eth_queue(bp, i) {
1770 struct bnx2x_fastpath *fp = &bp->fp[i];
1771 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1774 rc = request_irq(bp->msix_table[offset].vector,
1775 bnx2x_msix_fp_int, 0, fp->name, fp);
1777 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1778 bp->msix_table[offset].vector, rc);
1779 bnx2x_free_msix_irqs(bp, offset);
1786 i = BNX2X_NUM_ETH_QUEUES(bp);
1788 offset = 1 + CNIC_SUPPORT(bp);
1789 netdev_info(bp->dev,
1790 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1791 bp->msix_table[0].vector,
1792 0, bp->msix_table[offset].vector,
1793 i - 1, bp->msix_table[offset + i - 1].vector);
1795 offset = CNIC_SUPPORT(bp);
1796 netdev_info(bp->dev,
1797 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1798 0, bp->msix_table[offset].vector,
1799 i - 1, bp->msix_table[offset + i - 1].vector);
1804 int bnx2x_enable_msi(struct bnx2x *bp)
1808 rc = pci_enable_msi(bp->pdev);
1810 BNX2X_DEV_INFO("MSI is not attainable\n");
1813 bp->flags |= USING_MSI_FLAG;
1818 static int bnx2x_req_irq(struct bnx2x *bp)
1820 unsigned long flags;
1823 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1826 flags = IRQF_SHARED;
1828 if (bp->flags & USING_MSIX_FLAG)
1829 irq = bp->msix_table[0].vector;
1831 irq = bp->pdev->irq;
1833 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1836 static int bnx2x_setup_irqs(struct bnx2x *bp)
1839 if (bp->flags & USING_MSIX_FLAG &&
1840 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1841 rc = bnx2x_req_msix_irqs(bp);
1845 rc = bnx2x_req_irq(bp);
1847 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1850 if (bp->flags & USING_MSI_FLAG) {
1851 bp->dev->irq = bp->pdev->irq;
1852 netdev_info(bp->dev, "using MSI IRQ %d\n",
1855 if (bp->flags & USING_MSIX_FLAG) {
1856 bp->dev->irq = bp->msix_table[0].vector;
1857 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1865 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1869 for_each_rx_queue_cnic(bp, i) {
1870 bnx2x_fp_busy_poll_init(&bp->fp[i]);
1871 napi_enable(&bnx2x_fp(bp, i, napi));
1875 static void bnx2x_napi_enable(struct bnx2x *bp)
1879 for_each_eth_queue(bp, i) {
1880 bnx2x_fp_busy_poll_init(&bp->fp[i]);
1881 napi_enable(&bnx2x_fp(bp, i, napi));
1885 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1889 for_each_rx_queue_cnic(bp, i) {
1890 napi_disable(&bnx2x_fp(bp, i, napi));
1891 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1892 usleep_range(1000, 2000);
1896 static void bnx2x_napi_disable(struct bnx2x *bp)
1900 for_each_eth_queue(bp, i) {
1901 napi_disable(&bnx2x_fp(bp, i, napi));
1902 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1903 usleep_range(1000, 2000);
1907 void bnx2x_netif_start(struct bnx2x *bp)
1909 if (netif_running(bp->dev)) {
1910 bnx2x_napi_enable(bp);
1911 if (CNIC_LOADED(bp))
1912 bnx2x_napi_enable_cnic(bp);
1913 bnx2x_int_enable(bp);
1914 if (bp->state == BNX2X_STATE_OPEN)
1915 netif_tx_wake_all_queues(bp->dev);
1919 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1921 bnx2x_int_disable_sync(bp, disable_hw);
1922 bnx2x_napi_disable(bp);
1923 if (CNIC_LOADED(bp))
1924 bnx2x_napi_disable_cnic(bp);
1927 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1928 void *accel_priv, select_queue_fallback_t fallback)
1930 struct bnx2x *bp = netdev_priv(dev);
1932 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1933 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1934 u16 ether_type = ntohs(hdr->h_proto);
1936 /* Skip VLAN tag if present */
1937 if (ether_type == ETH_P_8021Q) {
1938 struct vlan_ethhdr *vhdr =
1939 (struct vlan_ethhdr *)skb->data;
1941 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1944 /* If ethertype is FCoE or FIP - use FCoE ring */
1945 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1946 return bnx2x_fcoe_tx(bp, txq_index);
1949 /* select a non-FCoE queue */
1950 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1953 void bnx2x_set_num_queues(struct bnx2x *bp)
1956 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1958 /* override in STORAGE SD modes */
1959 if (IS_MF_STORAGE_ONLY(bp))
1960 bp->num_ethernet_queues = 1;
1962 /* Add special queues */
1963 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1964 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1966 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1970 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1972 * @bp: Driver handle
1974 * We currently support for at most 16 Tx queues for each CoS thus we will
1975 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1978 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1979 * index after all ETH L2 indices.
1981 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1982 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1983 * 16..31,...) with indices that are not coupled with any real Tx queue.
1985 * The proper configuration of skb->queue_mapping is handled by
1986 * bnx2x_select_queue() and __skb_tx_hash().
1988 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1989 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1991 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1995 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1996 rx = BNX2X_NUM_ETH_QUEUES(bp);
1998 /* account for fcoe queue */
1999 if (include_cnic && !NO_FCOE(bp)) {
2004 rc = netif_set_real_num_tx_queues(bp->dev, tx);
2006 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
2009 rc = netif_set_real_num_rx_queues(bp->dev, rx);
2011 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2015 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2021 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2025 for_each_queue(bp, i) {
2026 struct bnx2x_fastpath *fp = &bp->fp[i];
2029 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2032 * Although there are no IP frames expected to arrive to
2033 * this ring we still want to add an
2034 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2037 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2040 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2041 IP_HEADER_ALIGNMENT_PADDING +
2044 BNX2X_FW_RX_ALIGN_END;
2045 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2046 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2047 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2049 fp->rx_frag_size = 0;
2053 static int bnx2x_init_rss(struct bnx2x *bp)
2056 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2058 /* Prepare the initial contents for the indirection table if RSS is
2061 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2062 bp->rss_conf_obj.ind_table[i] =
2064 ethtool_rxfh_indir_default(i, num_eth_queues);
2067 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2068 * per-port, so if explicit configuration is needed , do it only
2071 * For 57712 and newer on the other hand it's a per-function
2074 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2077 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2078 bool config_hash, bool enable)
2080 struct bnx2x_config_rss_params params = {NULL};
2082 /* Although RSS is meaningless when there is a single HW queue we
2083 * still need it enabled in order to have HW Rx hash generated.
2085 * if (!is_eth_multi(bp))
2086 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2089 params.rss_obj = rss_obj;
2091 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2094 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2096 /* RSS configuration */
2097 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2098 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2099 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2100 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2101 if (rss_obj->udp_rss_v4)
2102 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2103 if (rss_obj->udp_rss_v6)
2104 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2106 if (!CHIP_IS_E1x(bp))
2107 /* valid only for TUNN_MODE_GRE tunnel mode */
2108 __set_bit(BNX2X_RSS_GRE_INNER_HDRS, ¶ms.rss_flags);
2110 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2114 params.rss_result_mask = MULTI_MASK;
2116 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2120 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2121 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2125 return bnx2x_config_rss(bp, ¶ms);
2127 return bnx2x_vfpf_config_rss(bp, ¶ms);
2130 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2132 struct bnx2x_func_state_params func_params = {NULL};
2134 /* Prepare parameters for function state transitions */
2135 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2137 func_params.f_obj = &bp->func_obj;
2138 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2140 func_params.params.hw_init.load_phase = load_code;
2142 return bnx2x_func_state_change(bp, &func_params);
2146 * Cleans the object that have internal lists without sending
2147 * ramrods. Should be run when interrupts are disabled.
2149 void bnx2x_squeeze_objects(struct bnx2x *bp)
2152 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2153 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2154 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2156 /***************** Cleanup MACs' object first *************************/
2158 /* Wait for completion of requested */
2159 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2160 /* Perform a dry cleanup */
2161 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2163 /* Clean ETH primary MAC */
2164 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2165 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2168 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2170 /* Cleanup UC list */
2172 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2173 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2176 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2178 /***************** Now clean mcast object *****************************/
2179 rparam.mcast_obj = &bp->mcast_obj;
2180 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2182 /* Add a DEL command... - Since we're doing a driver cleanup only,
2183 * we take a lock surrounding both the initial send and the CONTs,
2184 * as we don't want a true completion to disrupt us in the middle.
2186 netif_addr_lock_bh(bp->dev);
2187 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2189 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2192 /* ...and wait until all pending commands are cleared */
2193 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2196 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2198 netif_addr_unlock_bh(bp->dev);
2202 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2204 netif_addr_unlock_bh(bp->dev);
2207 #ifndef BNX2X_STOP_ON_ERROR
2208 #define LOAD_ERROR_EXIT(bp, label) \
2210 (bp)->state = BNX2X_STATE_ERROR; \
2214 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2216 bp->cnic_loaded = false; \
2219 #else /*BNX2X_STOP_ON_ERROR*/
2220 #define LOAD_ERROR_EXIT(bp, label) \
2222 (bp)->state = BNX2X_STATE_ERROR; \
2226 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2228 bp->cnic_loaded = false; \
2232 #endif /*BNX2X_STOP_ON_ERROR*/
2234 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2236 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2237 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2241 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2243 int num_groups, vf_headroom = 0;
2244 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2246 /* number of queues for statistics is number of eth queues + FCoE */
2247 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2249 /* Total number of FW statistics requests =
2250 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2251 * and fcoe l2 queue) stats + num of queues (which includes another 1
2252 * for fcoe l2 queue if applicable)
2254 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2256 /* vf stats appear in the request list, but their data is allocated by
2257 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2258 * it is used to determine where to place the vf stats queries in the
2262 vf_headroom = bnx2x_vf_headroom(bp);
2264 /* Request is built from stats_query_header and an array of
2265 * stats_query_cmd_group each of which contains
2266 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2267 * configured in the stats_query_header.
2270 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2271 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2274 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2275 bp->fw_stats_num, vf_headroom, num_groups);
2276 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2277 num_groups * sizeof(struct stats_query_cmd_group);
2279 /* Data for statistics requests + stats_counter
2280 * stats_counter holds per-STORM counters that are incremented
2281 * when STORM has finished with the current request.
2282 * memory for FCoE offloaded statistics are counted anyway,
2283 * even if they will not be sent.
2284 * VF stats are not accounted for here as the data of VF stats is stored
2285 * in memory allocated by the VF, not here.
2287 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2288 sizeof(struct per_pf_stats) +
2289 sizeof(struct fcoe_statistics_params) +
2290 sizeof(struct per_queue_stats) * num_queue_stats +
2291 sizeof(struct stats_counter);
2293 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2294 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2299 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2300 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2301 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2302 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2303 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2304 bp->fw_stats_req_sz;
2306 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2307 U64_HI(bp->fw_stats_req_mapping),
2308 U64_LO(bp->fw_stats_req_mapping));
2309 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2310 U64_HI(bp->fw_stats_data_mapping),
2311 U64_LO(bp->fw_stats_data_mapping));
2315 bnx2x_free_fw_stats_mem(bp);
2316 BNX2X_ERR("Can't allocate FW stats memory\n");
2320 /* send load request to mcp and analyze response */
2321 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2327 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2328 DRV_MSG_SEQ_NUMBER_MASK);
2329 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2331 /* Get current FW pulse sequence */
2332 bp->fw_drv_pulse_wr_seq =
2333 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2334 DRV_PULSE_SEQ_MASK);
2335 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2337 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2339 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2340 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2343 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2345 /* if mcp fails to respond we must abort */
2346 if (!(*load_code)) {
2347 BNX2X_ERR("MCP response failure, aborting\n");
2351 /* If mcp refused (e.g. other port is in diagnostic mode) we
2354 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2355 BNX2X_ERR("MCP refused load request, aborting\n");
2361 /* check whether another PF has already loaded FW to chip. In
2362 * virtualized environments a pf from another VM may have already
2363 * initialized the device including loading FW
2365 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2367 /* is another pf loaded on this engine? */
2368 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2369 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2370 /* build my FW version dword */
2371 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2372 (BCM_5710_FW_MINOR_VERSION << 8) +
2373 (BCM_5710_FW_REVISION_VERSION << 16) +
2374 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2376 /* read loaded FW from chip */
2377 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2379 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2382 /* abort nic load if version mismatch */
2383 if (my_fw != loaded_fw) {
2385 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2388 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2396 /* returns the "mcp load_code" according to global load_count array */
2397 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2399 int path = BP_PATH(bp);
2401 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2402 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2403 bnx2x_load_count[path][2]);
2404 bnx2x_load_count[path][0]++;
2405 bnx2x_load_count[path][1 + port]++;
2406 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2407 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2408 bnx2x_load_count[path][2]);
2409 if (bnx2x_load_count[path][0] == 1)
2410 return FW_MSG_CODE_DRV_LOAD_COMMON;
2411 else if (bnx2x_load_count[path][1 + port] == 1)
2412 return FW_MSG_CODE_DRV_LOAD_PORT;
2414 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2417 /* mark PMF if applicable */
2418 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2420 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2421 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2422 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2424 /* We need the barrier to ensure the ordering between the
2425 * writing to bp->port.pmf here and reading it from the
2426 * bnx2x_periodic_task().
2433 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2436 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2438 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2439 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2440 (bp->common.shmem2_base)) {
2441 if (SHMEM2_HAS(bp, dcc_support))
2442 SHMEM2_WR(bp, dcc_support,
2443 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2444 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2445 if (SHMEM2_HAS(bp, afex_driver_support))
2446 SHMEM2_WR(bp, afex_driver_support,
2447 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2450 /* Set AFEX default VLAN tag to an invalid value */
2451 bp->afex_def_vlan_tag = -1;
2455 * bnx2x_bz_fp - zero content of the fastpath structure.
2457 * @bp: driver handle
2458 * @index: fastpath index to be zeroed
2460 * Makes sure the contents of the bp->fp[index].napi is kept
2463 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2465 struct bnx2x_fastpath *fp = &bp->fp[index];
2467 struct napi_struct orig_napi = fp->napi;
2468 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2470 /* bzero bnx2x_fastpath contents */
2472 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2473 sizeof(struct bnx2x_agg_info));
2474 memset(fp, 0, sizeof(*fp));
2476 /* Restore the NAPI object as it has been already initialized */
2477 fp->napi = orig_napi;
2478 fp->tpa_info = orig_tpa_info;
2482 fp->max_cos = bp->max_cos;
2484 /* Special queues support only one CoS */
2487 /* Init txdata pointers */
2489 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2491 for_each_cos_in_tx_queue(fp, cos)
2492 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2493 BNX2X_NUM_ETH_QUEUES(bp) + index];
2495 /* set the tpa flag for each queue. The tpa flag determines the queue
2496 * minimal size so it must be set prior to queue memory allocation
2498 if (bp->dev->features & NETIF_F_LRO)
2499 fp->mode = TPA_MODE_LRO;
2500 else if (bp->dev->features & NETIF_F_GRO &&
2501 bnx2x_mtu_allows_gro(bp->dev->mtu))
2502 fp->mode = TPA_MODE_GRO;
2504 fp->mode = TPA_MODE_DISABLED;
2506 /* We don't want TPA if it's disabled in bp
2507 * or if this is an FCoE L2 ring.
2509 if (bp->disable_tpa || IS_FCOE_FP(fp))
2510 fp->mode = TPA_MODE_DISABLED;
2513 int bnx2x_load_cnic(struct bnx2x *bp)
2515 int i, rc, port = BP_PORT(bp);
2517 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2519 mutex_init(&bp->cnic_mutex);
2522 rc = bnx2x_alloc_mem_cnic(bp);
2524 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2525 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2529 rc = bnx2x_alloc_fp_mem_cnic(bp);
2531 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2532 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2535 /* Update the number of queues with the cnic queues */
2536 rc = bnx2x_set_real_num_queues(bp, 1);
2538 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2539 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2542 /* Add all CNIC NAPI objects */
2543 bnx2x_add_all_napi_cnic(bp);
2544 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2545 bnx2x_napi_enable_cnic(bp);
2547 rc = bnx2x_init_hw_func_cnic(bp);
2549 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2551 bnx2x_nic_init_cnic(bp);
2554 /* Enable Timer scan */
2555 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2557 /* setup cnic queues */
2558 for_each_cnic_queue(bp, i) {
2559 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2561 BNX2X_ERR("Queue setup failed\n");
2562 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2567 /* Initialize Rx filter. */
2568 bnx2x_set_rx_mode_inner(bp);
2570 /* re-read iscsi info */
2571 bnx2x_get_iscsi_info(bp);
2572 bnx2x_setup_cnic_irq_info(bp);
2573 bnx2x_setup_cnic_info(bp);
2574 bp->cnic_loaded = true;
2575 if (bp->state == BNX2X_STATE_OPEN)
2576 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2578 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2582 #ifndef BNX2X_STOP_ON_ERROR
2584 /* Disable Timer scan */
2585 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2588 bnx2x_napi_disable_cnic(bp);
2589 /* Update the number of queues without the cnic queues */
2590 if (bnx2x_set_real_num_queues(bp, 0))
2591 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2593 BNX2X_ERR("CNIC-related load failed\n");
2594 bnx2x_free_fp_mem_cnic(bp);
2595 bnx2x_free_mem_cnic(bp);
2597 #endif /* ! BNX2X_STOP_ON_ERROR */
2600 /* must be called with rtnl_lock */
2601 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2603 int port = BP_PORT(bp);
2604 int i, rc = 0, load_code = 0;
2606 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2608 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2610 #ifdef BNX2X_STOP_ON_ERROR
2611 if (unlikely(bp->panic)) {
2612 BNX2X_ERR("Can't load NIC when there is panic\n");
2617 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2619 /* zero the structure w/o any lock, before SP handler is initialized */
2620 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2621 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2622 &bp->last_reported_link.link_report_flags);
2625 /* must be called before memory allocation and HW init */
2626 bnx2x_ilt_set_info(bp);
2629 * Zero fastpath structures preserving invariants like napi, which are
2630 * allocated only once, fp index, max_cos, bp pointer.
2631 * Also set fp->mode and txdata_ptr.
2633 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2634 for_each_queue(bp, i)
2636 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2637 bp->num_cnic_queues) *
2638 sizeof(struct bnx2x_fp_txdata));
2640 bp->fcoe_init = false;
2642 /* Set the receive queues buffer size */
2643 bnx2x_set_rx_buf_size(bp);
2646 rc = bnx2x_alloc_mem(bp);
2648 BNX2X_ERR("Unable to allocate bp memory\n");
2653 /* need to be done after alloc mem, since it's self adjusting to amount
2654 * of memory available for RSS queues
2656 rc = bnx2x_alloc_fp_mem(bp);
2658 BNX2X_ERR("Unable to allocate memory for fps\n");
2659 LOAD_ERROR_EXIT(bp, load_error0);
2662 /* Allocated memory for FW statistics */
2663 if (bnx2x_alloc_fw_stats_mem(bp))
2664 LOAD_ERROR_EXIT(bp, load_error0);
2666 /* request pf to initialize status blocks */
2668 rc = bnx2x_vfpf_init(bp);
2670 LOAD_ERROR_EXIT(bp, load_error0);
2673 /* As long as bnx2x_alloc_mem() may possibly update
2674 * bp->num_queues, bnx2x_set_real_num_queues() should always
2675 * come after it. At this stage cnic queues are not counted.
2677 rc = bnx2x_set_real_num_queues(bp, 0);
2679 BNX2X_ERR("Unable to set real_num_queues\n");
2680 LOAD_ERROR_EXIT(bp, load_error0);
2683 /* configure multi cos mappings in kernel.
2684 * this configuration may be overridden by a multi class queue
2685 * discipline or by a dcbx negotiation result.
2687 bnx2x_setup_tc(bp->dev, bp->max_cos);
2689 /* Add all NAPI objects */
2690 bnx2x_add_all_napi(bp);
2691 DP(NETIF_MSG_IFUP, "napi added\n");
2692 bnx2x_napi_enable(bp);
2695 /* set pf load just before approaching the MCP */
2696 bnx2x_set_pf_load(bp);
2698 /* if mcp exists send load request and analyze response */
2699 if (!BP_NOMCP(bp)) {
2700 /* attempt to load pf */
2701 rc = bnx2x_nic_load_request(bp, &load_code);
2703 LOAD_ERROR_EXIT(bp, load_error1);
2705 /* what did mcp say? */
2706 rc = bnx2x_compare_fw_ver(bp, load_code, true);
2708 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2709 LOAD_ERROR_EXIT(bp, load_error2);
2712 load_code = bnx2x_nic_load_no_mcp(bp, port);
2715 /* mark pmf if applicable */
2716 bnx2x_nic_load_pmf(bp, load_code);
2718 /* Init Function state controlling object */
2719 bnx2x__init_func_obj(bp);
2722 rc = bnx2x_init_hw(bp, load_code);
2724 BNX2X_ERR("HW init failed, aborting\n");
2725 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2726 LOAD_ERROR_EXIT(bp, load_error2);
2730 bnx2x_pre_irq_nic_init(bp);
2732 /* Connect to IRQs */
2733 rc = bnx2x_setup_irqs(bp);
2735 BNX2X_ERR("setup irqs failed\n");
2737 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2738 LOAD_ERROR_EXIT(bp, load_error2);
2741 /* Init per-function objects */
2743 /* Setup NIC internals and enable interrupts */
2744 bnx2x_post_irq_nic_init(bp, load_code);
2746 bnx2x_init_bp_objs(bp);
2747 bnx2x_iov_nic_init(bp);
2749 /* Set AFEX default VLAN tag to an invalid value */
2750 bp->afex_def_vlan_tag = -1;
2751 bnx2x_nic_load_afex_dcc(bp, load_code);
2752 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2753 rc = bnx2x_func_start(bp);
2755 BNX2X_ERR("Function start failed!\n");
2756 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2758 LOAD_ERROR_EXIT(bp, load_error3);
2761 /* Send LOAD_DONE command to MCP */
2762 if (!BP_NOMCP(bp)) {
2763 load_code = bnx2x_fw_command(bp,
2764 DRV_MSG_CODE_LOAD_DONE, 0);
2766 BNX2X_ERR("MCP response failure, aborting\n");
2768 LOAD_ERROR_EXIT(bp, load_error3);
2772 /* initialize FW coalescing state machines in RAM */
2773 bnx2x_update_coalesce(bp);
2776 /* setup the leading queue */
2777 rc = bnx2x_setup_leading(bp);
2779 BNX2X_ERR("Setup leading failed!\n");
2780 LOAD_ERROR_EXIT(bp, load_error3);
2783 /* set up the rest of the queues */
2784 for_each_nondefault_eth_queue(bp, i) {
2786 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2788 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2790 BNX2X_ERR("Queue %d setup failed\n", i);
2791 LOAD_ERROR_EXIT(bp, load_error3);
2796 rc = bnx2x_init_rss(bp);
2798 BNX2X_ERR("PF RSS init failed\n");
2799 LOAD_ERROR_EXIT(bp, load_error3);
2802 /* Now when Clients are configured we are ready to work */
2803 bp->state = BNX2X_STATE_OPEN;
2805 /* Configure a ucast MAC */
2807 rc = bnx2x_set_eth_mac(bp, true);
2809 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2812 BNX2X_ERR("Setting Ethernet MAC failed\n");
2813 LOAD_ERROR_EXIT(bp, load_error3);
2816 if (IS_PF(bp) && bp->pending_max) {
2817 bnx2x_update_max_mf_config(bp, bp->pending_max);
2818 bp->pending_max = 0;
2822 rc = bnx2x_initial_phy_init(bp, load_mode);
2824 LOAD_ERROR_EXIT(bp, load_error3);
2826 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2828 /* Start fast path */
2830 /* Initialize Rx filter. */
2831 bnx2x_set_rx_mode_inner(bp);
2833 if (bp->flags & PTP_SUPPORTED) {
2835 bnx2x_configure_ptp_filters(bp);
2838 switch (load_mode) {
2840 /* Tx queue should be only re-enabled */
2841 netif_tx_wake_all_queues(bp->dev);
2845 netif_tx_start_all_queues(bp->dev);
2846 smp_mb__after_atomic();
2850 case LOAD_LOOPBACK_EXT:
2851 bp->state = BNX2X_STATE_DIAG;
2859 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2861 bnx2x__link_status_update(bp);
2863 /* start the timer */
2864 mod_timer(&bp->timer, jiffies + bp->current_interval);
2866 if (CNIC_ENABLED(bp))
2867 bnx2x_load_cnic(bp);
2870 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2872 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2873 /* mark driver is loaded in shmem2 */
2875 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2876 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2877 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2878 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2881 /* Wait for all pending SP commands to complete */
2882 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2883 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2884 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2888 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2889 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2890 bnx2x_dcbx_init(bp, false);
2892 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2896 #ifndef BNX2X_STOP_ON_ERROR
2899 bnx2x_int_disable_sync(bp, 1);
2901 /* Clean queueable objects */
2902 bnx2x_squeeze_objects(bp);
2905 /* Free SKBs, SGEs, TPA pool and driver internals */
2906 bnx2x_free_skbs(bp);
2907 for_each_rx_queue(bp, i)
2908 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2913 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2914 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2915 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2920 bnx2x_napi_disable(bp);
2921 bnx2x_del_all_napi(bp);
2923 /* clear pf_load status, as it was already set */
2925 bnx2x_clear_pf_load(bp);
2927 bnx2x_free_fw_stats_mem(bp);
2928 bnx2x_free_fp_mem(bp);
2932 #endif /* ! BNX2X_STOP_ON_ERROR */
2935 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2939 /* Wait until tx fastpath tasks complete */
2940 for_each_tx_queue(bp, i) {
2941 struct bnx2x_fastpath *fp = &bp->fp[i];
2943 for_each_cos_in_tx_queue(fp, cos)
2944 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2951 /* must be called with rtnl_lock */
2952 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2955 bool global = false;
2957 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2959 /* mark driver is unloaded in shmem2 */
2960 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2962 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2963 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2964 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2967 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2968 (bp->state == BNX2X_STATE_CLOSED ||
2969 bp->state == BNX2X_STATE_ERROR)) {
2970 /* We can get here if the driver has been unloaded
2971 * during parity error recovery and is either waiting for a
2972 * leader to complete or for other functions to unload and
2973 * then ifdown has been issued. In this case we want to
2974 * unload and let other functions to complete a recovery
2977 bp->recovery_state = BNX2X_RECOVERY_DONE;
2979 bnx2x_release_leader_lock(bp);
2982 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2983 BNX2X_ERR("Can't unload in closed or error state\n");
2987 /* Nothing to do during unload if previous bnx2x_nic_load()
2988 * have not completed successfully - all resources are released.
2990 * we can get here only after unsuccessful ndo_* callback, during which
2991 * dev->IFF_UP flag is still on.
2993 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2996 /* It's important to set the bp->state to the value different from
2997 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2998 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3000 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3003 /* indicate to VFs that the PF is going down */
3004 bnx2x_iov_channel_down(bp);
3006 if (CNIC_LOADED(bp))
3007 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3010 bnx2x_tx_disable(bp);
3011 netdev_reset_tc(bp->dev);
3013 bp->rx_mode = BNX2X_RX_MODE_NONE;
3015 del_timer_sync(&bp->timer);
3018 /* Set ALWAYS_ALIVE bit in shmem */
3019 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3020 bnx2x_drv_pulse(bp);
3021 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3022 bnx2x_save_statistics(bp);
3025 /* wait till consumers catch up with producers in all queues */
3026 bnx2x_drain_tx_queues(bp);
3028 /* if VF indicate to PF this function is going down (PF will delete sp
3029 * elements and clear initializations
3032 bnx2x_vfpf_close_vf(bp);
3033 else if (unload_mode != UNLOAD_RECOVERY)
3034 /* if this is a normal/close unload need to clean up chip*/
3035 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3037 /* Send the UNLOAD_REQUEST to the MCP */
3038 bnx2x_send_unload_req(bp, unload_mode);
3040 /* Prevent transactions to host from the functions on the
3041 * engine that doesn't reset global blocks in case of global
3042 * attention once global blocks are reset and gates are opened
3043 * (the engine which leader will perform the recovery
3046 if (!CHIP_IS_E1x(bp))
3047 bnx2x_pf_disable(bp);
3049 /* Disable HW interrupts, NAPI */
3050 bnx2x_netif_stop(bp, 1);
3051 /* Delete all NAPI objects */
3052 bnx2x_del_all_napi(bp);
3053 if (CNIC_LOADED(bp))
3054 bnx2x_del_all_napi_cnic(bp);
3058 /* Report UNLOAD_DONE to MCP */
3059 bnx2x_send_unload_done(bp, false);
3063 * At this stage no more interrupts will arrive so we may safely clean
3064 * the queueable objects here in case they failed to get cleaned so far.
3067 bnx2x_squeeze_objects(bp);
3069 /* There should be no more pending SP commands at this stage */
3074 /* clear pending work in rtnl task */
3075 bp->sp_rtnl_state = 0;
3078 /* Free SKBs, SGEs, TPA pool and driver internals */
3079 bnx2x_free_skbs(bp);
3080 if (CNIC_LOADED(bp))
3081 bnx2x_free_skbs_cnic(bp);
3082 for_each_rx_queue(bp, i)
3083 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3085 bnx2x_free_fp_mem(bp);
3086 if (CNIC_LOADED(bp))
3087 bnx2x_free_fp_mem_cnic(bp);
3090 if (CNIC_LOADED(bp))
3091 bnx2x_free_mem_cnic(bp);
3095 bp->state = BNX2X_STATE_CLOSED;
3096 bp->cnic_loaded = false;
3098 /* Clear driver version indication in shmem */
3100 bnx2x_update_mng_version(bp);
3102 /* Check if there are pending parity attentions. If there are - set
3103 * RECOVERY_IN_PROGRESS.
3105 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3106 bnx2x_set_reset_in_progress(bp);
3108 /* Set RESET_IS_GLOBAL if needed */
3110 bnx2x_set_reset_global(bp);
3113 /* The last driver must disable a "close the gate" if there is no
3114 * parity attention or "process kill" pending.
3117 !bnx2x_clear_pf_load(bp) &&
3118 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3119 bnx2x_disable_close_the_gate(bp);
3121 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3126 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3130 /* If there is no power capability, silently succeed */
3131 if (!bp->pdev->pm_cap) {
3132 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3136 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3140 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3141 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3142 PCI_PM_CTRL_PME_STATUS));
3144 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3145 /* delay required during transition out of D3hot */
3150 /* If there are other clients above don't
3151 shut down the power */
3152 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3154 /* Don't shut down the power for emulation and FPGA */
3155 if (CHIP_REV_IS_SLOW(bp))
3158 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3162 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3164 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3167 /* No more memory access after this point until
3168 * device is brought back to D0.
3173 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3180 * net_device service functions
3182 static int bnx2x_poll(struct napi_struct *napi, int budget)
3186 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3188 struct bnx2x *bp = fp->bp;
3191 #ifdef BNX2X_STOP_ON_ERROR
3192 if (unlikely(bp->panic)) {
3193 napi_complete(napi);
3197 if (!bnx2x_fp_lock_napi(fp))
3200 for_each_cos_in_tx_queue(fp, cos)
3201 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3202 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3204 if (bnx2x_has_rx_work(fp)) {
3205 work_done += bnx2x_rx_int(fp, budget - work_done);
3207 /* must not complete if we consumed full budget */
3208 if (work_done >= budget) {
3209 bnx2x_fp_unlock_napi(fp);
3214 bnx2x_fp_unlock_napi(fp);
3216 /* Fall out from the NAPI loop if needed */
3217 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3219 /* No need to update SB for FCoE L2 ring as long as
3220 * it's connected to the default SB and the SB
3221 * has been updated when NAPI was scheduled.
3223 if (IS_FCOE_FP(fp)) {
3224 napi_complete(napi);
3227 bnx2x_update_fpsb_idx(fp);
3228 /* bnx2x_has_rx_work() reads the status block,
3229 * thus we need to ensure that status block indices
3230 * have been actually read (bnx2x_update_fpsb_idx)
3231 * prior to this check (bnx2x_has_rx_work) so that
3232 * we won't write the "newer" value of the status block
3233 * to IGU (if there was a DMA right after
3234 * bnx2x_has_rx_work and if there is no rmb, the memory
3235 * reading (bnx2x_update_fpsb_idx) may be postponed
3236 * to right before bnx2x_ack_sb). In this case there
3237 * will never be another interrupt until there is
3238 * another update of the status block, while there
3239 * is still unhandled work.
3243 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3244 napi_complete(napi);
3245 /* Re-enable interrupts */
3246 DP(NETIF_MSG_RX_STATUS,
3247 "Update index to %d\n", fp->fp_hc_idx);
3248 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3249 le16_to_cpu(fp->fp_hc_idx),
3259 #ifdef CONFIG_NET_RX_BUSY_POLL
3260 /* must be called with local_bh_disable()d */
3261 int bnx2x_low_latency_recv(struct napi_struct *napi)
3263 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3265 struct bnx2x *bp = fp->bp;
3268 if ((bp->state == BNX2X_STATE_CLOSED) ||
3269 (bp->state == BNX2X_STATE_ERROR) ||
3270 (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO)))
3271 return LL_FLUSH_FAILED;
3273 if (!bnx2x_fp_lock_poll(fp))
3274 return LL_FLUSH_BUSY;
3276 if (bnx2x_has_rx_work(fp))
3277 found = bnx2x_rx_int(fp, 4);
3279 bnx2x_fp_unlock_poll(fp);
3285 /* we split the first BD into headers and data BDs
3286 * to ease the pain of our fellow microcode engineers
3287 * we use one mapping for both BDs
3289 static u16 bnx2x_tx_split(struct bnx2x *bp,
3290 struct bnx2x_fp_txdata *txdata,
3291 struct sw_tx_bd *tx_buf,
3292 struct eth_tx_start_bd **tx_bd, u16 hlen,
3295 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3296 struct eth_tx_bd *d_tx_bd;
3298 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3300 /* first fix first BD */
3301 h_tx_bd->nbytes = cpu_to_le16(hlen);
3303 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3304 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3306 /* now get a new data BD
3307 * (after the pbd) and fill it */
3308 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3309 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3311 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3312 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3314 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3315 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3316 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3318 /* this marks the BD as one that has no individual mapping */
3319 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3321 DP(NETIF_MSG_TX_QUEUED,
3322 "TSO split data size is %d (%x:%x)\n",
3323 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3326 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3331 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3332 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3333 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3335 __sum16 tsum = (__force __sum16) csum;
3338 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3339 csum_partial(t_header - fix, fix, 0)));
3342 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3343 csum_partial(t_header, -fix, 0)));
3345 return bswab16(tsum);
3348 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3354 if (skb->ip_summed != CHECKSUM_PARTIAL)
3357 protocol = vlan_get_protocol(skb);
3358 if (protocol == htons(ETH_P_IPV6)) {
3360 prot = ipv6_hdr(skb)->nexthdr;
3363 prot = ip_hdr(skb)->protocol;
3366 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3367 if (inner_ip_hdr(skb)->version == 6) {
3368 rc |= XMIT_CSUM_ENC_V6;
3369 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3370 rc |= XMIT_CSUM_TCP;
3372 rc |= XMIT_CSUM_ENC_V4;
3373 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3374 rc |= XMIT_CSUM_TCP;
3377 if (prot == IPPROTO_TCP)
3378 rc |= XMIT_CSUM_TCP;
3380 if (skb_is_gso(skb)) {
3381 if (skb_is_gso_v6(skb)) {
3382 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3383 if (rc & XMIT_CSUM_ENC)
3384 rc |= XMIT_GSO_ENC_V6;
3386 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3387 if (rc & XMIT_CSUM_ENC)
3388 rc |= XMIT_GSO_ENC_V4;
3395 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3396 /* check if packet requires linearization (packet is too fragmented)
3397 no need to check fragmentation if page size > 8K (there will be no
3398 violation to FW restrictions) */
3399 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3404 int first_bd_sz = 0;
3406 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3407 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3409 if (xmit_type & XMIT_GSO) {
3410 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3411 /* Check if LSO packet needs to be copied:
3412 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3413 int wnd_size = MAX_FETCH_BD - 3;
3414 /* Number of windows to check */
3415 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3420 /* Headers length */
3421 if (xmit_type & XMIT_GSO_ENC)
3422 hlen = (int)(skb_inner_transport_header(skb) -
3424 inner_tcp_hdrlen(skb);
3426 hlen = (int)(skb_transport_header(skb) -
3427 skb->data) + tcp_hdrlen(skb);
3429 /* Amount of data (w/o headers) on linear part of SKB*/
3430 first_bd_sz = skb_headlen(skb) - hlen;
3432 wnd_sum = first_bd_sz;
3434 /* Calculate the first sum - it's special */
3435 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3437 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3439 /* If there was data on linear skb data - check it */
3440 if (first_bd_sz > 0) {
3441 if (unlikely(wnd_sum < lso_mss)) {
3446 wnd_sum -= first_bd_sz;
3449 /* Others are easier: run through the frag list and
3450 check all windows */
3451 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3453 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3455 if (unlikely(wnd_sum < lso_mss)) {
3460 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3463 /* in non-LSO too fragmented packet should always
3470 if (unlikely(to_copy))
3471 DP(NETIF_MSG_TX_QUEUED,
3472 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3473 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3474 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3481 * bnx2x_set_pbd_gso - update PBD in GSO case.
3485 * @xmit_type: xmit flags
3487 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3488 struct eth_tx_parse_bd_e1x *pbd,
3491 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3492 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3493 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3495 if (xmit_type & XMIT_GSO_V4) {
3496 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3497 pbd->tcp_pseudo_csum =
3498 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3500 0, IPPROTO_TCP, 0));
3502 pbd->tcp_pseudo_csum =
3503 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3504 &ipv6_hdr(skb)->daddr,
3505 0, IPPROTO_TCP, 0));
3509 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3513 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3515 * @bp: driver handle
3517 * @parsing_data: data to be updated
3518 * @xmit_type: xmit flags
3520 * 57712/578xx related, when skb has encapsulation
3522 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3523 u32 *parsing_data, u32 xmit_type)
3526 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3527 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3528 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3530 if (xmit_type & XMIT_CSUM_TCP) {
3531 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3532 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3533 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3535 return skb_inner_transport_header(skb) +
3536 inner_tcp_hdrlen(skb) - skb->data;
3539 /* We support checksum offload for TCP and UDP only.
3540 * No need to pass the UDP header length - it's a constant.
3542 return skb_inner_transport_header(skb) +
3543 sizeof(struct udphdr) - skb->data;
3547 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3549 * @bp: driver handle
3551 * @parsing_data: data to be updated
3552 * @xmit_type: xmit flags
3554 * 57712/578xx related
3556 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3557 u32 *parsing_data, u32 xmit_type)
3560 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3561 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3562 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3564 if (xmit_type & XMIT_CSUM_TCP) {
3565 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3566 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3567 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3569 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3571 /* We support checksum offload for TCP and UDP only.
3572 * No need to pass the UDP header length - it's a constant.
3574 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3577 /* set FW indication according to inner or outer protocols if tunneled */
3578 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3579 struct eth_tx_start_bd *tx_start_bd,
3582 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3584 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3585 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3587 if (!(xmit_type & XMIT_CSUM_TCP))
3588 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3592 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3594 * @bp: driver handle
3596 * @pbd: parse BD to be updated
3597 * @xmit_type: xmit flags
3599 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3600 struct eth_tx_parse_bd_e1x *pbd,
3603 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3605 /* for now NS flag is not used in Linux */
3608 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3609 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3611 pbd->ip_hlen_w = (skb_transport_header(skb) -
3612 skb_network_header(skb)) >> 1;
3614 hlen += pbd->ip_hlen_w;
3616 /* We support checksum offload for TCP and UDP only */
3617 if (xmit_type & XMIT_CSUM_TCP)
3618 hlen += tcp_hdrlen(skb) / 2;
3620 hlen += sizeof(struct udphdr) / 2;
3622 pbd->total_hlen_w = cpu_to_le16(hlen);
3625 if (xmit_type & XMIT_CSUM_TCP) {
3626 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3629 s8 fix = SKB_CS_OFF(skb); /* signed! */
3631 DP(NETIF_MSG_TX_QUEUED,
3632 "hlen %d fix %d csum before fix %x\n",
3633 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3635 /* HW bug: fixup the CSUM */
3636 pbd->tcp_pseudo_csum =
3637 bnx2x_csum_fix(skb_transport_header(skb),
3640 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3641 pbd->tcp_pseudo_csum);
3647 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3648 struct eth_tx_parse_bd_e2 *pbd_e2,
3649 struct eth_tx_parse_2nd_bd *pbd2,
3654 u8 outerip_off, outerip_len = 0;
3656 /* from outer IP to transport */
3657 hlen_w = (skb_inner_transport_header(skb) -
3658 skb_network_header(skb)) >> 1;
3661 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3663 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3665 /* outer IP header info */
3666 if (xmit_type & XMIT_CSUM_V4) {
3667 struct iphdr *iph = ip_hdr(skb);
3668 u32 csum = (__force u32)(~iph->check) -
3669 (__force u32)iph->tot_len -
3670 (__force u32)iph->frag_off;
3672 outerip_len = iph->ihl << 1;
3674 pbd2->fw_ip_csum_wo_len_flags_frag =
3675 bswab16(csum_fold((__force __wsum)csum));
3677 pbd2->fw_ip_hdr_to_payload_w =
3678 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3679 pbd_e2->data.tunnel_data.flags |=
3680 ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
3683 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3685 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3687 /* inner IP header info */
3688 if (xmit_type & XMIT_CSUM_ENC_V4) {
3689 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3691 pbd_e2->data.tunnel_data.pseudo_csum =
3692 bswab16(~csum_tcpudp_magic(
3693 inner_ip_hdr(skb)->saddr,
3694 inner_ip_hdr(skb)->daddr,
3695 0, IPPROTO_TCP, 0));
3697 pbd_e2->data.tunnel_data.pseudo_csum =
3698 bswab16(~csum_ipv6_magic(
3699 &inner_ipv6_hdr(skb)->saddr,
3700 &inner_ipv6_hdr(skb)->daddr,
3701 0, IPPROTO_TCP, 0));
3704 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3709 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3710 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3711 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3713 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3714 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3715 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3719 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3722 struct ipv6hdr *ipv6;
3724 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3727 if (xmit_type & XMIT_GSO_ENC_V6)
3728 ipv6 = inner_ipv6_hdr(skb);
3729 else /* XMIT_GSO_V6 */
3730 ipv6 = ipv6_hdr(skb);
3732 if (ipv6->nexthdr == NEXTHDR_IPV6)
3733 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3736 /* called with netif_tx_lock
3737 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3738 * netif_wake_queue()
3740 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3742 struct bnx2x *bp = netdev_priv(dev);
3744 struct netdev_queue *txq;
3745 struct bnx2x_fp_txdata *txdata;
3746 struct sw_tx_bd *tx_buf;
3747 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3748 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3749 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3750 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3751 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3752 u32 pbd_e2_parsing_data = 0;
3753 u16 pkt_prod, bd_prod;
3756 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3759 __le16 pkt_size = 0;
3761 u8 mac_type = UNICAST_ADDRESS;
3763 #ifdef BNX2X_STOP_ON_ERROR
3764 if (unlikely(bp->panic))
3765 return NETDEV_TX_BUSY;
3768 txq_index = skb_get_queue_mapping(skb);
3769 txq = netdev_get_tx_queue(dev, txq_index);
3771 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3773 txdata = &bp->bnx2x_txq[txq_index];
3775 /* enable this debug print to view the transmission queue being used
3776 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3777 txq_index, fp_index, txdata_index); */
3779 /* enable this debug print to view the transmission details
3780 DP(NETIF_MSG_TX_QUEUED,
3781 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3782 txdata->cid, fp_index, txdata_index, txdata, fp); */
3784 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3785 skb_shinfo(skb)->nr_frags +
3787 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3788 /* Handle special storage cases separately */
3789 if (txdata->tx_ring_size == 0) {
3790 struct bnx2x_eth_q_stats *q_stats =
3791 bnx2x_fp_qstats(bp, txdata->parent_fp);
3792 q_stats->driver_filtered_tx_pkt++;
3794 return NETDEV_TX_OK;
3796 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3797 netif_tx_stop_queue(txq);
3798 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3800 return NETDEV_TX_BUSY;
3803 DP(NETIF_MSG_TX_QUEUED,
3804 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3805 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3806 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3809 eth = (struct ethhdr *)skb->data;
3811 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3812 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3813 if (is_broadcast_ether_addr(eth->h_dest))
3814 mac_type = BROADCAST_ADDRESS;
3816 mac_type = MULTICAST_ADDRESS;
3819 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3820 /* First, check if we need to linearize the skb (due to FW
3821 restrictions). No need to check fragmentation if page size > 8K
3822 (there will be no violation to FW restrictions) */
3823 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3824 /* Statistics of linearization */
3826 if (skb_linearize(skb) != 0) {
3827 DP(NETIF_MSG_TX_QUEUED,
3828 "SKB linearization failed - silently dropping this SKB\n");
3829 dev_kfree_skb_any(skb);
3830 return NETDEV_TX_OK;
3834 /* Map skb linear data for DMA */
3835 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3836 skb_headlen(skb), DMA_TO_DEVICE);
3837 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3838 DP(NETIF_MSG_TX_QUEUED,
3839 "SKB mapping failed - silently dropping this SKB\n");
3840 dev_kfree_skb_any(skb);
3841 return NETDEV_TX_OK;
3844 Please read carefully. First we use one BD which we mark as start,
3845 then we have a parsing info BD (used for TSO or xsum),
3846 and only then we have the rest of the TSO BDs.
3847 (don't forget to mark the last one as last,
3848 and to unmap only AFTER you write to the BD ...)
3849 And above all, all pdb sizes are in words - NOT DWORDS!
3852 /* get current pkt produced now - advance it just before sending packet
3853 * since mapping of pages may fail and cause packet to be dropped
3855 pkt_prod = txdata->tx_pkt_prod;
3856 bd_prod = TX_BD(txdata->tx_bd_prod);
3858 /* get a tx_buf and first BD
3859 * tx_start_bd may be changed during SPLIT,
3860 * but first_bd will always stay first
3862 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3863 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3864 first_bd = tx_start_bd;
3866 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3868 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3869 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3870 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3871 } else if (bp->ptp_tx_skb) {
3872 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3874 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3875 /* schedule check for Tx timestamp */
3876 bp->ptp_tx_skb = skb_get(skb);
3877 bp->ptp_tx_start = jiffies;
3878 schedule_work(&bp->ptp_task);
3882 /* header nbd: indirectly zero other flags! */
3883 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3885 /* remember the first BD of the packet */
3886 tx_buf->first_bd = txdata->tx_bd_prod;
3890 DP(NETIF_MSG_TX_QUEUED,
3891 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3892 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3894 if (skb_vlan_tag_present(skb)) {
3895 tx_start_bd->vlan_or_ethertype =
3896 cpu_to_le16(skb_vlan_tag_get(skb));
3897 tx_start_bd->bd_flags.as_bitfield |=
3898 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3900 /* when transmitting in a vf, start bd must hold the ethertype
3901 * for fw to enforce it
3903 #ifndef BNX2X_STOP_ON_ERROR
3906 tx_start_bd->vlan_or_ethertype =
3907 cpu_to_le16(ntohs(eth->h_proto));
3908 #ifndef BNX2X_STOP_ON_ERROR
3910 /* used by FW for packet accounting */
3911 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3915 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3917 /* turn on parsing and get a BD */
3918 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3920 if (xmit_type & XMIT_CSUM)
3921 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3923 if (!CHIP_IS_E1x(bp)) {
3924 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3925 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3927 if (xmit_type & XMIT_CSUM_ENC) {
3928 u16 global_data = 0;
3930 /* Set PBD in enc checksum offload case */
3931 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3932 &pbd_e2_parsing_data,
3935 /* turn on 2nd parsing and get a BD */
3936 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3938 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3940 memset(pbd2, 0, sizeof(*pbd2));
3942 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3943 (skb_inner_network_header(skb) -
3946 if (xmit_type & XMIT_GSO_ENC)
3947 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3951 pbd2->global_data = cpu_to_le16(global_data);
3953 /* add addition parse BD indication to start BD */
3954 SET_FLAG(tx_start_bd->general_data,
3955 ETH_TX_START_BD_PARSE_NBDS, 1);
3956 /* set encapsulation flag in start BD */
3957 SET_FLAG(tx_start_bd->general_data,
3958 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3960 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3963 } else if (xmit_type & XMIT_CSUM) {
3964 /* Set PBD in checksum offload case w/o encapsulation */
3965 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3966 &pbd_e2_parsing_data,
3970 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3971 /* Add the macs to the parsing BD if this is a vf or if
3972 * Tx Switching is enabled.
3975 /* override GRE parameters in BD */
3976 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3977 &pbd_e2->data.mac_addr.src_mid,
3978 &pbd_e2->data.mac_addr.src_lo,
3981 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3982 &pbd_e2->data.mac_addr.dst_mid,
3983 &pbd_e2->data.mac_addr.dst_lo,
3986 if (bp->flags & TX_SWITCHING)
3987 bnx2x_set_fw_mac_addr(
3988 &pbd_e2->data.mac_addr.dst_hi,
3989 &pbd_e2->data.mac_addr.dst_mid,
3990 &pbd_e2->data.mac_addr.dst_lo,
3992 #ifdef BNX2X_STOP_ON_ERROR
3993 /* Enforce security is always set in Stop on Error -
3994 * source mac should be present in the parsing BD
3996 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3997 &pbd_e2->data.mac_addr.src_mid,
3998 &pbd_e2->data.mac_addr.src_lo,
4003 SET_FLAG(pbd_e2_parsing_data,
4004 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4006 u16 global_data = 0;
4007 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4008 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4009 /* Set PBD in checksum offload case */
4010 if (xmit_type & XMIT_CSUM)
4011 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4013 SET_FLAG(global_data,
4014 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4015 pbd_e1x->global_data |= cpu_to_le16(global_data);
4018 /* Setup the data pointer of the first BD of the packet */
4019 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4020 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4021 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4022 pkt_size = tx_start_bd->nbytes;
4024 DP(NETIF_MSG_TX_QUEUED,
4025 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
4026 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4027 le16_to_cpu(tx_start_bd->nbytes),
4028 tx_start_bd->bd_flags.as_bitfield,
4029 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4031 if (xmit_type & XMIT_GSO) {
4033 DP(NETIF_MSG_TX_QUEUED,
4034 "TSO packet len %d hlen %d total len %d tso size %d\n",
4035 skb->len, hlen, skb_headlen(skb),
4036 skb_shinfo(skb)->gso_size);
4038 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4040 if (unlikely(skb_headlen(skb) > hlen)) {
4042 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4046 if (!CHIP_IS_E1x(bp))
4047 pbd_e2_parsing_data |=
4048 (skb_shinfo(skb)->gso_size <<
4049 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4050 ETH_TX_PARSE_BD_E2_LSO_MSS;
4052 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4055 /* Set the PBD's parsing_data field if not zero
4056 * (for the chips newer than 57711).
4058 if (pbd_e2_parsing_data)
4059 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4061 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4063 /* Handle fragmented skb */
4064 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4065 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4067 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4068 skb_frag_size(frag), DMA_TO_DEVICE);
4069 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4070 unsigned int pkts_compl = 0, bytes_compl = 0;
4072 DP(NETIF_MSG_TX_QUEUED,
4073 "Unable to map page - dropping packet...\n");
4075 /* we need unmap all buffers already mapped
4077 * first_bd->nbd need to be properly updated
4078 * before call to bnx2x_free_tx_pkt
4080 first_bd->nbd = cpu_to_le16(nbd);
4081 bnx2x_free_tx_pkt(bp, txdata,
4082 TX_BD(txdata->tx_pkt_prod),
4083 &pkts_compl, &bytes_compl);
4084 return NETDEV_TX_OK;
4087 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4088 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4089 if (total_pkt_bd == NULL)
4090 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4092 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4093 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4094 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4095 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4098 DP(NETIF_MSG_TX_QUEUED,
4099 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4100 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4101 le16_to_cpu(tx_data_bd->nbytes));
4104 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4106 /* update with actual num BDs */
4107 first_bd->nbd = cpu_to_le16(nbd);
4109 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4111 /* now send a tx doorbell, counting the next BD
4112 * if the packet contains or ends with it
4114 if (TX_BD_POFF(bd_prod) < nbd)
4117 /* total_pkt_bytes should be set on the first data BD if
4118 * it's not an LSO packet and there is more than one
4119 * data BD. In this case pkt_size is limited by an MTU value.
4120 * However we prefer to set it for an LSO packet (while we don't
4121 * have to) in order to save some CPU cycles in a none-LSO
4122 * case, when we much more care about them.
4124 if (total_pkt_bd != NULL)
4125 total_pkt_bd->total_pkt_bytes = pkt_size;
4128 DP(NETIF_MSG_TX_QUEUED,
4129 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4130 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4131 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4132 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4133 le16_to_cpu(pbd_e1x->total_hlen_w));
4135 DP(NETIF_MSG_TX_QUEUED,
4136 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4138 pbd_e2->data.mac_addr.dst_hi,
4139 pbd_e2->data.mac_addr.dst_mid,
4140 pbd_e2->data.mac_addr.dst_lo,
4141 pbd_e2->data.mac_addr.src_hi,
4142 pbd_e2->data.mac_addr.src_mid,
4143 pbd_e2->data.mac_addr.src_lo,
4144 pbd_e2->parsing_data);
4145 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4147 netdev_tx_sent_queue(txq, skb->len);
4149 skb_tx_timestamp(skb);
4151 txdata->tx_pkt_prod++;
4153 * Make sure that the BD data is updated before updating the producer
4154 * since FW might read the BD right after the producer is updated.
4155 * This is only applicable for weak-ordered memory model archs such
4156 * as IA-64. The following barrier is also mandatory since FW will
4157 * assumes packets must have BDs.
4161 txdata->tx_db.data.prod += nbd;
4164 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4168 txdata->tx_bd_prod += nbd;
4170 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4171 netif_tx_stop_queue(txq);
4173 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4174 * ordering of set_bit() in netif_tx_stop_queue() and read of
4178 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4179 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4180 netif_tx_wake_queue(txq);
4184 return NETDEV_TX_OK;
4188 * bnx2x_setup_tc - routine to configure net_device for multi tc
4190 * @netdev: net device to configure
4191 * @tc: number of traffic classes to enable
4193 * callback connected to the ndo_setup_tc function pointer
4195 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4197 int cos, prio, count, offset;
4198 struct bnx2x *bp = netdev_priv(dev);
4200 /* setup tc must be called under rtnl lock */
4203 /* no traffic classes requested. Aborting */
4205 netdev_reset_tc(dev);
4209 /* requested to support too many traffic classes */
4210 if (num_tc > bp->max_cos) {
4211 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4212 num_tc, bp->max_cos);
4216 /* declare amount of supported traffic classes */
4217 if (netdev_set_num_tc(dev, num_tc)) {
4218 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4222 /* configure priority to traffic class mapping */
4223 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4224 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4225 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4226 "mapping priority %d to tc %d\n",
4227 prio, bp->prio_to_cos[prio]);
4230 /* Use this configuration to differentiate tc0 from other COSes
4231 This can be used for ets or pfc, and save the effort of setting
4232 up a multio class queue disc or negotiating DCBX with a switch
4233 netdev_set_prio_tc_map(dev, 0, 0);
4234 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4235 for (prio = 1; prio < 16; prio++) {
4236 netdev_set_prio_tc_map(dev, prio, 1);
4237 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4240 /* configure traffic class to transmission queue mapping */
4241 for (cos = 0; cos < bp->max_cos; cos++) {
4242 count = BNX2X_NUM_ETH_QUEUES(bp);
4243 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4244 netdev_set_tc_queue(dev, cos, count, offset);
4245 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4246 "mapping tc %d to offset %d count %d\n",
4247 cos, offset, count);
4253 /* called with rtnl_lock */
4254 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4256 struct sockaddr *addr = p;
4257 struct bnx2x *bp = netdev_priv(dev);
4260 if (!is_valid_ether_addr(addr->sa_data)) {
4261 BNX2X_ERR("Requested MAC address is not valid\n");
4265 if (IS_MF_STORAGE_ONLY(bp)) {
4266 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4270 if (netif_running(dev)) {
4271 rc = bnx2x_set_eth_mac(bp, false);
4276 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4278 if (netif_running(dev))
4279 rc = bnx2x_set_eth_mac(bp, true);
4284 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4286 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4287 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4292 if (IS_FCOE_IDX(fp_index)) {
4293 memset(sb, 0, sizeof(union host_hc_status_block));
4294 fp->status_blk_mapping = 0;
4297 if (!CHIP_IS_E1x(bp))
4298 BNX2X_PCI_FREE(sb->e2_sb,
4299 bnx2x_fp(bp, fp_index,
4300 status_blk_mapping),
4301 sizeof(struct host_hc_status_block_e2));
4303 BNX2X_PCI_FREE(sb->e1x_sb,
4304 bnx2x_fp(bp, fp_index,
4305 status_blk_mapping),
4306 sizeof(struct host_hc_status_block_e1x));
4310 if (!skip_rx_queue(bp, fp_index)) {
4311 bnx2x_free_rx_bds(fp);
4313 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4314 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4315 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4316 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4317 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4319 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4320 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4321 sizeof(struct eth_fast_path_rx_cqe) *
4325 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4326 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4327 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4328 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4332 if (!skip_tx_queue(bp, fp_index)) {
4333 /* fastpath tx rings: tx_buf tx_desc */
4334 for_each_cos_in_tx_queue(fp, cos) {
4335 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4337 DP(NETIF_MSG_IFDOWN,
4338 "freeing tx memory of fp %d cos %d cid %d\n",
4339 fp_index, cos, txdata->cid);
4341 BNX2X_FREE(txdata->tx_buf_ring);
4342 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4343 txdata->tx_desc_mapping,
4344 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4347 /* end of fastpath */
4350 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4353 for_each_cnic_queue(bp, i)
4354 bnx2x_free_fp_mem_at(bp, i);
4357 void bnx2x_free_fp_mem(struct bnx2x *bp)
4360 for_each_eth_queue(bp, i)
4361 bnx2x_free_fp_mem_at(bp, i);
4364 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4366 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4367 if (!CHIP_IS_E1x(bp)) {
4368 bnx2x_fp(bp, index, sb_index_values) =
4369 (__le16 *)status_blk.e2_sb->sb.index_values;
4370 bnx2x_fp(bp, index, sb_running_index) =
4371 (__le16 *)status_blk.e2_sb->sb.running_index;
4373 bnx2x_fp(bp, index, sb_index_values) =
4374 (__le16 *)status_blk.e1x_sb->sb.index_values;
4375 bnx2x_fp(bp, index, sb_running_index) =
4376 (__le16 *)status_blk.e1x_sb->sb.running_index;
4380 /* Returns the number of actually allocated BDs */
4381 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4384 struct bnx2x *bp = fp->bp;
4385 u16 ring_prod, cqe_ring_prod;
4386 int i, failure_cnt = 0;
4388 fp->rx_comp_cons = 0;
4389 cqe_ring_prod = ring_prod = 0;
4391 /* This routine is called only during fo init so
4392 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4394 for (i = 0; i < rx_ring_size; i++) {
4395 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4399 ring_prod = NEXT_RX_IDX(ring_prod);
4400 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4401 WARN_ON(ring_prod <= (i - failure_cnt));
4405 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4406 i - failure_cnt, fp->index);
4408 fp->rx_bd_prod = ring_prod;
4409 /* Limit the CQE producer by the CQE ring size */
4410 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4412 fp->rx_pkt = fp->rx_calls = 0;
4414 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4416 return i - failure_cnt;
4419 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4423 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4424 struct eth_rx_cqe_next_page *nextpg;
4426 nextpg = (struct eth_rx_cqe_next_page *)
4427 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4429 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4430 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4432 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4433 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4437 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4439 union host_hc_status_block *sb;
4440 struct bnx2x_fastpath *fp = &bp->fp[index];
4443 int rx_ring_size = 0;
4445 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4446 rx_ring_size = MIN_RX_SIZE_NONTPA;
4447 bp->rx_ring_size = rx_ring_size;
4448 } else if (!bp->rx_ring_size) {
4449 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4451 if (CHIP_IS_E3(bp)) {
4452 u32 cfg = SHMEM_RD(bp,
4453 dev_info.port_hw_config[BP_PORT(bp)].
4456 /* Decrease ring size for 1G functions */
4457 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4458 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4462 /* allocate at least number of buffers required by FW */
4463 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4464 MIN_RX_SIZE_TPA, rx_ring_size);
4466 bp->rx_ring_size = rx_ring_size;
4467 } else /* if rx_ring_size specified - use it */
4468 rx_ring_size = bp->rx_ring_size;
4470 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4473 sb = &bnx2x_fp(bp, index, status_blk);
4475 if (!IS_FCOE_IDX(index)) {
4477 if (!CHIP_IS_E1x(bp)) {
4478 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4479 sizeof(struct host_hc_status_block_e2));
4483 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4484 sizeof(struct host_hc_status_block_e1x));
4490 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4491 * set shortcuts for it.
4493 if (!IS_FCOE_IDX(index))
4494 set_sb_shortcuts(bp, index);
4497 if (!skip_tx_queue(bp, index)) {
4498 /* fastpath tx rings: tx_buf tx_desc */
4499 for_each_cos_in_tx_queue(fp, cos) {
4500 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4503 "allocating tx memory of fp %d cos %d\n",
4506 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4507 sizeof(struct sw_tx_bd),
4509 if (!txdata->tx_buf_ring)
4511 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4512 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4513 if (!txdata->tx_desc_ring)
4519 if (!skip_rx_queue(bp, index)) {
4520 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4521 bnx2x_fp(bp, index, rx_buf_ring) =
4522 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4523 if (!bnx2x_fp(bp, index, rx_buf_ring))
4525 bnx2x_fp(bp, index, rx_desc_ring) =
4526 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4527 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4528 if (!bnx2x_fp(bp, index, rx_desc_ring))
4531 /* Seed all CQEs by 1s */
4532 bnx2x_fp(bp, index, rx_comp_ring) =
4533 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4534 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4535 if (!bnx2x_fp(bp, index, rx_comp_ring))
4539 bnx2x_fp(bp, index, rx_page_ring) =
4540 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4542 if (!bnx2x_fp(bp, index, rx_page_ring))
4544 bnx2x_fp(bp, index, rx_sge_ring) =
4545 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4546 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4547 if (!bnx2x_fp(bp, index, rx_sge_ring))
4550 bnx2x_set_next_page_rx_bd(fp);
4553 bnx2x_set_next_page_rx_cq(fp);
4556 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4557 if (ring_size < rx_ring_size)
4563 /* handles low memory cases */
4565 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4567 /* FW will drop all packets if queue is not big enough,
4568 * In these cases we disable the queue
4569 * Min size is different for OOO, TPA and non-TPA queues
4571 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4572 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4573 /* release memory allocated for this queue */
4574 bnx2x_free_fp_mem_at(bp, index);
4580 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4584 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4585 /* we will fail load process instead of mark
4593 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4597 /* 1. Allocate FP for leading - fatal if error
4598 * 2. Allocate RSS - fix number of queues if error
4602 if (bnx2x_alloc_fp_mem_at(bp, 0))
4606 for_each_nondefault_eth_queue(bp, i)
4607 if (bnx2x_alloc_fp_mem_at(bp, i))
4610 /* handle memory failures */
4611 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4612 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4615 bnx2x_shrink_eth_fp(bp, delta);
4616 if (CNIC_SUPPORT(bp))
4617 /* move non eth FPs next to last eth FP
4618 * must be done in that order
4619 * FCOE_IDX < FWD_IDX < OOO_IDX
4622 /* move FCoE fp even NO_FCOE_FLAG is on */
4623 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4624 bp->num_ethernet_queues -= delta;
4625 bp->num_queues = bp->num_ethernet_queues +
4626 bp->num_cnic_queues;
4627 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4628 bp->num_queues + delta, bp->num_queues);
4634 void bnx2x_free_mem_bp(struct bnx2x *bp)
4638 for (i = 0; i < bp->fp_array_size; i++)
4639 kfree(bp->fp[i].tpa_info);
4642 kfree(bp->fp_stats);
4643 kfree(bp->bnx2x_txq);
4644 kfree(bp->msix_table);
4648 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4650 struct bnx2x_fastpath *fp;
4651 struct msix_entry *tbl;
4652 struct bnx2x_ilt *ilt;
4653 int msix_table_size = 0;
4654 int fp_array_size, txq_array_size;
4658 * The biggest MSI-X table we might need is as a maximum number of fast
4659 * path IGU SBs plus default SB (for PF only).
4661 msix_table_size = bp->igu_sb_cnt;
4664 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4666 /* fp array: RSS plus CNIC related L2 queues */
4667 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4668 bp->fp_array_size = fp_array_size;
4669 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4671 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4674 for (i = 0; i < bp->fp_array_size; i++) {
4676 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4677 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4678 if (!(fp[i].tpa_info))
4684 /* allocate sp objs */
4685 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4690 /* allocate fp_stats */
4691 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4696 /* Allocate memory for the transmission queues array */
4698 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4699 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4701 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4707 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4710 bp->msix_table = tbl;
4713 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4720 bnx2x_free_mem_bp(bp);
4724 int bnx2x_reload_if_running(struct net_device *dev)
4726 struct bnx2x *bp = netdev_priv(dev);
4728 if (unlikely(!netif_running(dev)))
4731 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4732 return bnx2x_nic_load(bp, LOAD_NORMAL);
4735 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4737 u32 sel_phy_idx = 0;
4738 if (bp->link_params.num_phys <= 1)
4741 if (bp->link_vars.link_up) {
4742 sel_phy_idx = EXT_PHY1;
4743 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4744 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4745 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4746 sel_phy_idx = EXT_PHY2;
4749 switch (bnx2x_phy_selection(&bp->link_params)) {
4750 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4751 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4752 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4753 sel_phy_idx = EXT_PHY1;
4755 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4756 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4757 sel_phy_idx = EXT_PHY2;
4764 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4766 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4768 * The selected activated PHY is always after swapping (in case PHY
4769 * swapping is enabled). So when swapping is enabled, we need to reverse
4773 if (bp->link_params.multi_phy_config &
4774 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4775 if (sel_phy_idx == EXT_PHY1)
4776 sel_phy_idx = EXT_PHY2;
4777 else if (sel_phy_idx == EXT_PHY2)
4778 sel_phy_idx = EXT_PHY1;
4780 return LINK_CONFIG_IDX(sel_phy_idx);
4783 #ifdef NETDEV_FCOE_WWNN
4784 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4786 struct bnx2x *bp = netdev_priv(dev);
4787 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4790 case NETDEV_FCOE_WWNN:
4791 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4792 cp->fcoe_wwn_node_name_lo);
4794 case NETDEV_FCOE_WWPN:
4795 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4796 cp->fcoe_wwn_port_name_lo);
4799 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4807 /* called with rtnl_lock */
4808 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4810 struct bnx2x *bp = netdev_priv(dev);
4812 if (pci_num_vf(bp->pdev)) {
4813 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4817 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4818 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4822 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4823 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4824 BNX2X_ERR("Can't support requested MTU size\n");
4828 /* This does not race with packet allocation
4829 * because the actual alloc size is
4830 * only updated as part of load
4834 return bnx2x_reload_if_running(dev);
4837 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4838 netdev_features_t features)
4840 struct bnx2x *bp = netdev_priv(dev);
4842 if (pci_num_vf(bp->pdev)) {
4843 netdev_features_t changed = dev->features ^ features;
4845 /* Revert the requested changes in features if they
4846 * would require internal reload of PF in bnx2x_set_features().
4848 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4849 features &= ~NETIF_F_RXCSUM;
4850 features |= dev->features & NETIF_F_RXCSUM;
4853 if (changed & NETIF_F_LOOPBACK) {
4854 features &= ~NETIF_F_LOOPBACK;
4855 features |= dev->features & NETIF_F_LOOPBACK;
4859 /* TPA requires Rx CSUM offloading */
4860 if (!(features & NETIF_F_RXCSUM)) {
4861 features &= ~NETIF_F_LRO;
4862 features &= ~NETIF_F_GRO;
4868 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4870 struct bnx2x *bp = netdev_priv(dev);
4871 netdev_features_t changes = features ^ dev->features;
4872 bool bnx2x_reload = false;
4875 /* VFs or non SRIOV PFs should be able to change loopback feature */
4876 if (!pci_num_vf(bp->pdev)) {
4877 if (features & NETIF_F_LOOPBACK) {
4878 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4879 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4880 bnx2x_reload = true;
4883 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4884 bp->link_params.loopback_mode = LOOPBACK_NONE;
4885 bnx2x_reload = true;
4890 /* if GRO is changed while LRO is enabled, don't force a reload */
4891 if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
4892 changes &= ~NETIF_F_GRO;
4894 /* if GRO is changed while HW TPA is off, don't force a reload */
4895 if ((changes & NETIF_F_GRO) && bp->disable_tpa)
4896 changes &= ~NETIF_F_GRO;
4899 bnx2x_reload = true;
4902 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4903 dev->features = features;
4904 rc = bnx2x_reload_if_running(dev);
4907 /* else: bnx2x_nic_load() will be called at end of recovery */
4913 void bnx2x_tx_timeout(struct net_device *dev)
4915 struct bnx2x *bp = netdev_priv(dev);
4917 #ifdef BNX2X_STOP_ON_ERROR
4922 /* This allows the netif to be shutdown gracefully before resetting */
4923 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4926 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4928 struct net_device *dev = pci_get_drvdata(pdev);
4932 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4935 bp = netdev_priv(dev);
4939 pci_save_state(pdev);
4941 if (!netif_running(dev)) {
4946 netif_device_detach(dev);
4948 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4950 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4957 int bnx2x_resume(struct pci_dev *pdev)
4959 struct net_device *dev = pci_get_drvdata(pdev);
4964 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4967 bp = netdev_priv(dev);
4969 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4970 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4976 pci_restore_state(pdev);
4978 if (!netif_running(dev)) {
4983 bnx2x_set_power_state(bp, PCI_D0);
4984 netif_device_attach(dev);
4986 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4993 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4997 BNX2X_ERR("bad context pointer %p\n", cxt);
5001 /* ustorm cxt validation */
5002 cxt->ustorm_ag_context.cdu_usage =
5003 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5004 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5005 /* xcontext validation */
5006 cxt->xstorm_ag_context.cdu_reserved =
5007 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5008 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5011 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5012 u8 fw_sb_id, u8 sb_index,
5015 u32 addr = BAR_CSTRORM_INTMEM +
5016 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5017 REG_WR8(bp, addr, ticks);
5019 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5020 port, fw_sb_id, sb_index, ticks);
5023 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5024 u16 fw_sb_id, u8 sb_index,
5027 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5028 u32 addr = BAR_CSTRORM_INTMEM +
5029 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5030 u8 flags = REG_RD8(bp, addr);
5032 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5033 flags |= enable_flag;
5034 REG_WR8(bp, addr, flags);
5036 "port %x fw_sb_id %d sb_index %d disable %d\n",
5037 port, fw_sb_id, sb_index, disable);
5040 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5041 u8 sb_index, u8 disable, u16 usec)
5043 int port = BP_PORT(bp);
5044 u8 ticks = usec / BNX2X_BTR;
5046 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5048 disable = disable ? 1 : (usec ? 0 : 1);
5049 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5052 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5055 smp_mb__before_atomic();
5056 set_bit(flag, &bp->sp_rtnl_state);
5057 smp_mb__after_atomic();
5058 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5060 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5062 EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);