1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
26 #include <net/ip6_checksum.h>
27 #include <net/busy_poll.h>
28 #include <linux/prefetch.h>
29 #include "bnx2x_cmn.h"
30 #include "bnx2x_init.h"
33 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
34 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
35 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
36 static int bnx2x_poll(struct napi_struct *napi, int budget);
38 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42 /* Add NAPI objects */
43 for_each_rx_queue_cnic(bp, i) {
44 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
45 bnx2x_poll, NAPI_POLL_WEIGHT);
46 napi_hash_add(&bnx2x_fp(bp, i, napi));
50 static void bnx2x_add_all_napi(struct bnx2x *bp)
54 /* Add NAPI objects */
55 for_each_eth_queue(bp, i) {
56 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
57 bnx2x_poll, NAPI_POLL_WEIGHT);
58 napi_hash_add(&bnx2x_fp(bp, i, napi));
62 static int bnx2x_calc_num_queues(struct bnx2x *bp)
64 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
66 /* Reduce memory usage in kdump environment by using only one queue */
70 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
75 * bnx2x_move_fp - move content of the fastpath structure.
78 * @from: source FP index
79 * @to: destination FP index
81 * Makes sure the contents of the bp->fp[to].napi is kept
82 * intact. This is done by first copying the napi struct from
83 * the target to the source, and then mem copying the entire
84 * source onto the target. Update txdata pointers and related
87 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89 struct bnx2x_fastpath *from_fp = &bp->fp[from];
90 struct bnx2x_fastpath *to_fp = &bp->fp[to];
91 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
92 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
93 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
94 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
95 int old_max_eth_txqs, new_max_eth_txqs;
96 int old_txdata_index = 0, new_txdata_index = 0;
97 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
99 /* Copy the NAPI object as it has been already initialized */
100 from_fp->napi = to_fp->napi;
102 /* Move bnx2x_fastpath contents */
103 memcpy(to_fp, from_fp, sizeof(*to_fp));
106 /* Retain the tpa_info of the original `to' version as we don't want
107 * 2 FPs to contain the same tpa_info pointer.
109 to_fp->tpa_info = old_tpa_info;
111 /* move sp_objs contents as well, as their indices match fp ones */
112 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114 /* move fp_stats contents as well, as their indices match fp ones */
115 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117 /* Update txdata pointers in fp and move txdata content accordingly:
118 * Each fp consumes 'max_cos' txdata structures, so the index should be
119 * decremented by max_cos x delta.
122 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
123 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125 if (from == FCOE_IDX(bp)) {
126 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
127 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
130 memcpy(&bp->bnx2x_txq[new_txdata_index],
131 &bp->bnx2x_txq[old_txdata_index],
132 sizeof(struct bnx2x_fp_txdata));
133 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
137 * bnx2x_fill_fw_str - Fill buffer with FW version string.
140 * @buf: character buffer to fill with the fw name
141 * @buf_len: length of the above buffer
144 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
147 u8 phy_fw_ver[PHY_FW_VER_LEN];
149 phy_fw_ver[0] = '\0';
150 bnx2x_get_ext_phy_fw_version(&bp->link_params,
151 phy_fw_ver, PHY_FW_VER_LEN);
152 strlcpy(buf, bp->fw_ver, buf_len);
153 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155 (bp->common.bc_ver & 0xff0000) >> 16,
156 (bp->common.bc_ver & 0xff00) >> 8,
157 (bp->common.bc_ver & 0xff),
158 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
165 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
168 * @delta: number of eth queues which were not allocated
170 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
175 * backward along the array could cause memory to be overridden
177 for (cos = 1; cos < bp->max_cos; cos++) {
178 for (i = 0; i < old_eth_num - delta; i++) {
179 struct bnx2x_fastpath *fp = &bp->fp[i];
180 int new_idx = cos * (old_eth_num - delta) + i;
182 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
183 sizeof(struct bnx2x_fp_txdata));
184 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
189 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
191 /* free skb in the packet ring at pos idx
192 * return idx of last bd freed
194 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
195 u16 idx, unsigned int *pkts_compl,
196 unsigned int *bytes_compl)
198 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
199 struct eth_tx_start_bd *tx_start_bd;
200 struct eth_tx_bd *tx_data_bd;
201 struct sk_buff *skb = tx_buf->skb;
202 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204 u16 split_bd_len = 0;
206 /* prefetch skb end pointer to speedup dev_kfree_skb() */
209 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
210 txdata->txq_index, idx, tx_buf, skb);
212 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
214 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
215 #ifdef BNX2X_STOP_ON_ERROR
216 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
217 BNX2X_ERR("BAD nbd!\n");
221 new_cons = nbd + tx_buf->first_bd;
223 /* Get the next bd */
224 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226 /* Skip a parse bd... */
228 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
231 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
232 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
233 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
235 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
239 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
240 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
246 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
247 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
248 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
250 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
257 (*bytes_compl) += skb->len;
260 dev_kfree_skb_any(skb);
261 tx_buf->first_bd = 0;
267 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
269 struct netdev_queue *txq;
270 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
271 unsigned int pkts_compl = 0, bytes_compl = 0;
273 #ifdef BNX2X_STOP_ON_ERROR
274 if (unlikely(bp->panic))
278 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
279 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
280 sw_cons = txdata->tx_pkt_cons;
282 while (sw_cons != hw_cons) {
285 pkt_cons = TX_BD(sw_cons);
287 DP(NETIF_MSG_TX_DONE,
288 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
289 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
291 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
292 &pkts_compl, &bytes_compl);
297 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
299 txdata->tx_pkt_cons = sw_cons;
300 txdata->tx_bd_cons = bd_cons;
302 /* Need to make the tx_bd_cons update visible to start_xmit()
303 * before checking for netif_tx_queue_stopped(). Without the
304 * memory barrier, there is a small possibility that
305 * start_xmit() will miss it and cause the queue to be stopped
307 * On the other hand we need an rmb() here to ensure the proper
308 * ordering of bit testing in the following
309 * netif_tx_queue_stopped(txq) call.
313 if (unlikely(netif_tx_queue_stopped(txq))) {
314 /* Taking tx_lock() is needed to prevent re-enabling the queue
315 * while it's empty. This could have happen if rx_action() gets
316 * suspended in bnx2x_tx_int() after the condition before
317 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
319 * stops the queue->sees fresh tx_bd_cons->releases the queue->
320 * sends some packets consuming the whole queue again->
324 __netif_tx_lock(txq, smp_processor_id());
326 if ((netif_tx_queue_stopped(txq)) &&
327 (bp->state == BNX2X_STATE_OPEN) &&
328 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
329 netif_tx_wake_queue(txq);
331 __netif_tx_unlock(txq);
336 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
339 u16 last_max = fp->last_max_sge;
341 if (SUB_S16(idx, last_max) > 0)
342 fp->last_max_sge = idx;
345 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
347 struct eth_end_agg_rx_cqe *cqe)
349 struct bnx2x *bp = fp->bp;
350 u16 last_max, last_elem, first_elem;
357 /* First mark all used pages */
358 for (i = 0; i < sge_len; i++)
359 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
360 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
362 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
363 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
365 /* Here we assume that the last SGE index is the biggest */
366 prefetch((void *)(fp->sge_mask));
367 bnx2x_update_last_max_sge(fp,
368 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
370 last_max = RX_SGE(fp->last_max_sge);
371 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
372 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
374 /* If ring is not full */
375 if (last_elem + 1 != first_elem)
378 /* Now update the prod */
379 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
380 if (likely(fp->sge_mask[i]))
383 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
384 delta += BIT_VEC64_ELEM_SZ;
388 fp->rx_sge_prod += delta;
389 /* clear page-end entries */
390 bnx2x_clear_sge_mask_next_elems(fp);
393 DP(NETIF_MSG_RX_STATUS,
394 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
395 fp->last_max_sge, fp->rx_sge_prod);
398 /* Get Toeplitz hash value in the skb using the value from the
399 * CQE (calculated by HW).
401 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
402 const struct eth_fast_path_rx_cqe *cqe,
403 enum pkt_hash_types *rxhash_type)
405 /* Get Toeplitz hash from CQE */
406 if ((bp->dev->features & NETIF_F_RXHASH) &&
407 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
408 enum eth_rss_hash_type htype;
410 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
411 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
412 (htype == TCP_IPV6_HASH_TYPE)) ?
413 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
415 return le32_to_cpu(cqe->rss_hash_result);
417 *rxhash_type = PKT_HASH_TYPE_NONE;
421 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
423 struct eth_fast_path_rx_cqe *cqe)
425 struct bnx2x *bp = fp->bp;
426 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
427 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
428 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
430 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
431 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
433 /* print error if current state != stop */
434 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
435 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
437 /* Try to map an empty data buffer from the aggregation info */
438 mapping = dma_map_single(&bp->pdev->dev,
439 first_buf->data + NET_SKB_PAD,
440 fp->rx_buf_size, DMA_FROM_DEVICE);
442 * ...if it fails - move the skb from the consumer to the producer
443 * and set the current aggregation state as ERROR to drop it
444 * when TPA_STOP arrives.
447 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
448 /* Move the BD from the consumer to the producer */
449 bnx2x_reuse_rx_data(fp, cons, prod);
450 tpa_info->tpa_state = BNX2X_TPA_ERROR;
454 /* move empty data from pool to prod */
455 prod_rx_buf->data = first_buf->data;
456 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
457 /* point prod_bd to new data */
458 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
459 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
461 /* move partial skb from cons to pool (don't unmap yet) */
462 *first_buf = *cons_rx_buf;
464 /* mark bin state as START */
465 tpa_info->parsing_flags =
466 le16_to_cpu(cqe->pars_flags.flags);
467 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
468 tpa_info->tpa_state = BNX2X_TPA_START;
469 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
470 tpa_info->placement_offset = cqe->placement_offset;
471 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
472 if (fp->mode == TPA_MODE_GRO) {
473 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
474 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
475 tpa_info->gro_size = gro_size;
478 #ifdef BNX2X_STOP_ON_ERROR
479 fp->tpa_queue_used |= (1 << queue);
480 #ifdef _ASM_GENERIC_INT_L64_H
481 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
483 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
489 /* Timestamp option length allowed for TPA aggregation:
491 * nop nop kind length echo val
493 #define TPA_TSTAMP_OPT_LEN 12
495 * bnx2x_set_gro_params - compute GRO values
498 * @parsing_flags: parsing flags from the START CQE
499 * @len_on_bd: total length of the first packet for the
501 * @pkt_len: length of all segments
503 * Approximate value of the MSS for this aggregation calculated using
504 * the first packet of it.
505 * Compute number of aggregated segments, and gso_type.
507 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
508 u16 len_on_bd, unsigned int pkt_len,
509 u16 num_of_coalesced_segs)
511 /* TPA aggregation won't have either IP options or TCP options
512 * other than timestamp or IPv6 extension headers.
514 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
516 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
517 PRS_FLAG_OVERETH_IPV6) {
518 hdrs_len += sizeof(struct ipv6hdr);
519 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
521 hdrs_len += sizeof(struct iphdr);
522 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
525 /* Check if there was a TCP timestamp, if there is it's will
526 * always be 12 bytes length: nop nop kind length echo val.
528 * Otherwise FW would close the aggregation.
530 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
531 hdrs_len += TPA_TSTAMP_OPT_LEN;
533 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
535 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
536 * to skb_shinfo(skb)->gso_segs
538 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
541 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
542 u16 index, gfp_t gfp_mask)
544 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
545 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
546 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
549 if (unlikely(page == NULL)) {
550 BNX2X_ERR("Can't alloc sge\n");
554 mapping = dma_map_page(&bp->pdev->dev, page, 0,
555 SGE_PAGES, DMA_FROM_DEVICE);
556 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
557 __free_pages(page, PAGES_PER_SGE_SHIFT);
558 BNX2X_ERR("Can't map sge\n");
563 dma_unmap_addr_set(sw_buf, mapping, mapping);
565 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
566 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
571 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
572 struct bnx2x_agg_info *tpa_info,
575 struct eth_end_agg_rx_cqe *cqe,
578 struct sw_rx_page *rx_pg, old_rx_pg;
579 u32 i, frag_len, frag_size;
580 int err, j, frag_id = 0;
581 u16 len_on_bd = tpa_info->len_on_bd;
582 u16 full_page = 0, gro_size = 0;
584 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
586 if (fp->mode == TPA_MODE_GRO) {
587 gro_size = tpa_info->gro_size;
588 full_page = tpa_info->full_page;
591 /* This is needed in order to enable forwarding support */
593 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
594 le16_to_cpu(cqe->pkt_len),
595 le16_to_cpu(cqe->num_of_coalesced_segs));
597 #ifdef BNX2X_STOP_ON_ERROR
598 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
599 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
601 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
607 /* Run through the SGL and compose the fragmented skb */
608 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
609 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
611 /* FW gives the indices of the SGE as if the ring is an array
612 (meaning that "next" element will consume 2 indices) */
613 if (fp->mode == TPA_MODE_GRO)
614 frag_len = min_t(u32, frag_size, (u32)full_page);
616 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
618 rx_pg = &fp->rx_page_ring[sge_idx];
621 /* If we fail to allocate a substitute page, we simply stop
622 where we are and drop the whole packet */
623 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
625 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
629 /* Unmap the page as we're going to pass it to the stack */
630 dma_unmap_page(&bp->pdev->dev,
631 dma_unmap_addr(&old_rx_pg, mapping),
632 SGE_PAGES, DMA_FROM_DEVICE);
633 /* Add one frag and update the appropriate fields in the skb */
634 if (fp->mode == TPA_MODE_LRO)
635 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
639 for (rem = frag_len; rem > 0; rem -= gro_size) {
640 int len = rem > gro_size ? gro_size : rem;
641 skb_fill_page_desc(skb, frag_id++,
642 old_rx_pg.page, offset, len);
644 get_page(old_rx_pg.page);
649 skb->data_len += frag_len;
650 skb->truesize += SGE_PAGES;
651 skb->len += frag_len;
653 frag_size -= frag_len;
659 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
661 if (fp->rx_frag_size)
662 put_page(virt_to_head_page(data));
667 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
669 if (fp->rx_frag_size) {
670 /* GFP_KERNEL allocations are used only during initialization */
671 if (unlikely(gfp_mask & __GFP_WAIT))
672 return (void *)__get_free_page(gfp_mask);
674 return netdev_alloc_frag(fp->rx_frag_size);
677 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
681 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
683 const struct iphdr *iph = ip_hdr(skb);
686 skb_set_transport_header(skb, sizeof(struct iphdr));
689 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
690 iph->saddr, iph->daddr, 0);
693 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
695 struct ipv6hdr *iph = ipv6_hdr(skb);
698 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
701 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
702 &iph->saddr, &iph->daddr, 0);
705 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
706 void (*gro_func)(struct bnx2x*, struct sk_buff*))
708 skb_set_network_header(skb, 0);
710 tcp_gro_complete(skb);
714 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
718 if (skb_shinfo(skb)->gso_size) {
719 switch (be16_to_cpu(skb->protocol)) {
721 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
724 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
727 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
728 be16_to_cpu(skb->protocol));
732 skb_record_rx_queue(skb, fp->rx_queue);
733 napi_gro_receive(&fp->napi, skb);
736 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
737 struct bnx2x_agg_info *tpa_info,
739 struct eth_end_agg_rx_cqe *cqe,
742 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
743 u8 pad = tpa_info->placement_offset;
744 u16 len = tpa_info->len_on_bd;
745 struct sk_buff *skb = NULL;
746 u8 *new_data, *data = rx_buf->data;
747 u8 old_tpa_state = tpa_info->tpa_state;
749 tpa_info->tpa_state = BNX2X_TPA_STOP;
751 /* If we there was an error during the handling of the TPA_START -
752 * drop this aggregation.
754 if (old_tpa_state == BNX2X_TPA_ERROR)
757 /* Try to allocate the new data */
758 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
759 /* Unmap skb in the pool anyway, as we are going to change
760 pool entry status to BNX2X_TPA_STOP even if new skb allocation
762 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
763 fp->rx_buf_size, DMA_FROM_DEVICE);
764 if (likely(new_data))
765 skb = build_skb(data, fp->rx_frag_size);
768 #ifdef BNX2X_STOP_ON_ERROR
769 if (pad + len > fp->rx_buf_size) {
770 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
771 pad, len, fp->rx_buf_size);
777 skb_reserve(skb, pad + NET_SKB_PAD);
779 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
781 skb->protocol = eth_type_trans(skb, bp->dev);
782 skb->ip_summed = CHECKSUM_UNNECESSARY;
784 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
785 skb, cqe, cqe_idx)) {
786 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
787 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
788 bnx2x_gro_receive(bp, fp, skb);
790 DP(NETIF_MSG_RX_STATUS,
791 "Failed to allocate new pages - dropping packet!\n");
792 dev_kfree_skb_any(skb);
795 /* put new data in bin */
796 rx_buf->data = new_data;
801 bnx2x_frag_free(fp, new_data);
803 /* drop the packet and keep the buffer in the bin */
804 DP(NETIF_MSG_RX_STATUS,
805 "Failed to allocate or map a new skb - dropping packet!\n");
806 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
809 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
810 u16 index, gfp_t gfp_mask)
813 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
814 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
817 data = bnx2x_frag_alloc(fp, gfp_mask);
818 if (unlikely(data == NULL))
821 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
824 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
825 bnx2x_frag_free(fp, data);
826 BNX2X_ERR("Can't map rx data\n");
831 dma_unmap_addr_set(rx_buf, mapping, mapping);
833 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
834 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
840 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
841 struct bnx2x_fastpath *fp,
842 struct bnx2x_eth_q_stats *qstats)
844 /* Do nothing if no L4 csum validation was done.
845 * We do not check whether IP csum was validated. For IPv4 we assume
846 * that if the card got as far as validating the L4 csum, it also
847 * validated the IP csum. IPv6 has no IP csum.
849 if (cqe->fast_path_cqe.status_flags &
850 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
853 /* If L4 validation was done, check if an error was found. */
855 if (cqe->fast_path_cqe.type_error_flags &
856 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
857 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
858 qstats->hw_csum_err++;
860 skb->ip_summed = CHECKSUM_UNNECESSARY;
863 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
865 struct bnx2x *bp = fp->bp;
866 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
867 u16 sw_comp_cons, sw_comp_prod;
869 union eth_rx_cqe *cqe;
870 struct eth_fast_path_rx_cqe *cqe_fp;
872 #ifdef BNX2X_STOP_ON_ERROR
873 if (unlikely(bp->panic))
879 bd_cons = fp->rx_bd_cons;
880 bd_prod = fp->rx_bd_prod;
881 bd_prod_fw = bd_prod;
882 sw_comp_cons = fp->rx_comp_cons;
883 sw_comp_prod = fp->rx_comp_prod;
885 comp_ring_cons = RCQ_BD(sw_comp_cons);
886 cqe = &fp->rx_comp_ring[comp_ring_cons];
887 cqe_fp = &cqe->fast_path_cqe;
889 DP(NETIF_MSG_RX_STATUS,
890 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
892 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
893 struct sw_rx_bd *rx_buf = NULL;
896 enum eth_rx_cqe_type cqe_fp_type;
900 enum pkt_hash_types rxhash_type;
902 #ifdef BNX2X_STOP_ON_ERROR
903 if (unlikely(bp->panic))
907 bd_prod = RX_BD(bd_prod);
908 bd_cons = RX_BD(bd_cons);
910 /* A rmb() is required to ensure that the CQE is not read
911 * before it is written by the adapter DMA. PCI ordering
912 * rules will make sure the other fields are written before
913 * the marker at the end of struct eth_fast_path_rx_cqe
914 * but without rmb() a weakly ordered processor can process
915 * stale data. Without the barrier TPA state-machine might
916 * enter inconsistent state and kernel stack might be
917 * provided with incorrect packet description - these lead
918 * to various kernel crashed.
922 cqe_fp_flags = cqe_fp->type_error_flags;
923 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
925 DP(NETIF_MSG_RX_STATUS,
926 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
927 CQE_TYPE(cqe_fp_flags),
928 cqe_fp_flags, cqe_fp->status_flags,
929 le32_to_cpu(cqe_fp->rss_hash_result),
930 le16_to_cpu(cqe_fp->vlan_tag),
931 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
933 /* is this a slowpath msg? */
934 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
935 bnx2x_sp_event(fp, cqe);
939 rx_buf = &fp->rx_buf_ring[bd_cons];
942 if (!CQE_TYPE_FAST(cqe_fp_type)) {
943 struct bnx2x_agg_info *tpa_info;
944 u16 frag_size, pages;
945 #ifdef BNX2X_STOP_ON_ERROR
947 if (fp->disable_tpa &&
948 (CQE_TYPE_START(cqe_fp_type) ||
949 CQE_TYPE_STOP(cqe_fp_type)))
950 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
951 CQE_TYPE(cqe_fp_type));
954 if (CQE_TYPE_START(cqe_fp_type)) {
955 u16 queue = cqe_fp->queue_index;
956 DP(NETIF_MSG_RX_STATUS,
957 "calling tpa_start on queue %d\n",
960 bnx2x_tpa_start(fp, queue,
966 queue = cqe->end_agg_cqe.queue_index;
967 tpa_info = &fp->tpa_info[queue];
968 DP(NETIF_MSG_RX_STATUS,
969 "calling tpa_stop on queue %d\n",
972 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
975 if (fp->mode == TPA_MODE_GRO)
976 pages = (frag_size + tpa_info->full_page - 1) /
979 pages = SGE_PAGE_ALIGN(frag_size) >>
982 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
983 &cqe->end_agg_cqe, comp_ring_cons);
984 #ifdef BNX2X_STOP_ON_ERROR
989 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
993 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
994 pad = cqe_fp->placement_offset;
995 dma_sync_single_for_cpu(&bp->pdev->dev,
996 dma_unmap_addr(rx_buf, mapping),
997 pad + RX_COPY_THRESH,
1000 prefetch(data + pad); /* speedup eth_type_trans() */
1001 /* is this an error packet? */
1002 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1003 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1004 "ERROR flags %x rx packet %u\n",
1005 cqe_fp_flags, sw_comp_cons);
1006 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1010 /* Since we don't have a jumbo ring
1011 * copy small packets if mtu > 1500
1013 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1014 (len <= RX_COPY_THRESH)) {
1015 skb = netdev_alloc_skb_ip_align(bp->dev, len);
1017 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1018 "ERROR packet dropped because of alloc failure\n");
1019 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1022 memcpy(skb->data, data + pad, len);
1023 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1025 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1026 GFP_ATOMIC) == 0)) {
1027 dma_unmap_single(&bp->pdev->dev,
1028 dma_unmap_addr(rx_buf, mapping),
1031 skb = build_skb(data, fp->rx_frag_size);
1032 if (unlikely(!skb)) {
1033 bnx2x_frag_free(fp, data);
1034 bnx2x_fp_qstats(bp, fp)->
1035 rx_skb_alloc_failed++;
1038 skb_reserve(skb, pad);
1040 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1041 "ERROR packet dropped because of alloc failure\n");
1042 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1044 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1050 skb->protocol = eth_type_trans(skb, bp->dev);
1052 /* Set Toeplitz hash for a none-LRO skb */
1053 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1054 skb_set_hash(skb, rxhash, rxhash_type);
1056 skb_checksum_none_assert(skb);
1058 if (bp->dev->features & NETIF_F_RXCSUM)
1059 bnx2x_csum_validate(skb, cqe, fp,
1060 bnx2x_fp_qstats(bp, fp));
1062 skb_record_rx_queue(skb, fp->rx_queue);
1064 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1066 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1067 le16_to_cpu(cqe_fp->vlan_tag));
1069 skb_mark_napi_id(skb, &fp->napi);
1071 if (bnx2x_fp_ll_polling(fp))
1072 netif_receive_skb(skb);
1074 napi_gro_receive(&fp->napi, skb);
1076 rx_buf->data = NULL;
1078 bd_cons = NEXT_RX_IDX(bd_cons);
1079 bd_prod = NEXT_RX_IDX(bd_prod);
1080 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1083 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1084 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1086 /* mark CQE as free */
1087 BNX2X_SEED_CQE(cqe_fp);
1089 if (rx_pkt == budget)
1092 comp_ring_cons = RCQ_BD(sw_comp_cons);
1093 cqe = &fp->rx_comp_ring[comp_ring_cons];
1094 cqe_fp = &cqe->fast_path_cqe;
1097 fp->rx_bd_cons = bd_cons;
1098 fp->rx_bd_prod = bd_prod_fw;
1099 fp->rx_comp_cons = sw_comp_cons;
1100 fp->rx_comp_prod = sw_comp_prod;
1102 /* Update producers */
1103 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1106 fp->rx_pkt += rx_pkt;
1112 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1114 struct bnx2x_fastpath *fp = fp_cookie;
1115 struct bnx2x *bp = fp->bp;
1119 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1120 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1122 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1124 #ifdef BNX2X_STOP_ON_ERROR
1125 if (unlikely(bp->panic))
1129 /* Handle Rx and Tx according to MSI-X vector */
1130 for_each_cos_in_tx_queue(fp, cos)
1131 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1133 prefetch(&fp->sb_running_index[SM_RX_ID]);
1134 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1139 /* HW Lock for shared dual port PHYs */
1140 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1142 mutex_lock(&bp->port.phy_mutex);
1144 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1147 void bnx2x_release_phy_lock(struct bnx2x *bp)
1149 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1151 mutex_unlock(&bp->port.phy_mutex);
1154 /* calculates MF speed according to current linespeed and MF configuration */
1155 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1157 u16 line_speed = bp->link_vars.line_speed;
1159 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1160 bp->mf_config[BP_VN(bp)]);
1162 /* Calculate the current MAX line speed limit for the MF
1166 line_speed = (line_speed * maxCfg) / 100;
1167 else { /* SD mode */
1168 u16 vn_max_rate = maxCfg * 100;
1170 if (vn_max_rate < line_speed)
1171 line_speed = vn_max_rate;
1179 * bnx2x_fill_report_data - fill link report data to report
1181 * @bp: driver handle
1182 * @data: link state to update
1184 * It uses a none-atomic bit operations because is called under the mutex.
1186 static void bnx2x_fill_report_data(struct bnx2x *bp,
1187 struct bnx2x_link_report_data *data)
1189 memset(data, 0, sizeof(*data));
1192 /* Fill the report data: effective line speed */
1193 data->line_speed = bnx2x_get_mf_speed(bp);
1196 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1197 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1198 &data->link_report_flags);
1200 if (!BNX2X_NUM_ETH_QUEUES(bp))
1201 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1202 &data->link_report_flags);
1205 if (bp->link_vars.duplex == DUPLEX_FULL)
1206 __set_bit(BNX2X_LINK_REPORT_FD,
1207 &data->link_report_flags);
1209 /* Rx Flow Control is ON */
1210 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1211 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1212 &data->link_report_flags);
1214 /* Tx Flow Control is ON */
1215 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1216 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1217 &data->link_report_flags);
1219 *data = bp->vf_link_vars;
1224 * bnx2x_link_report - report link status to OS.
1226 * @bp: driver handle
1228 * Calls the __bnx2x_link_report() under the same locking scheme
1229 * as a link/PHY state managing code to ensure a consistent link
1233 void bnx2x_link_report(struct bnx2x *bp)
1235 bnx2x_acquire_phy_lock(bp);
1236 __bnx2x_link_report(bp);
1237 bnx2x_release_phy_lock(bp);
1241 * __bnx2x_link_report - report link status to OS.
1243 * @bp: driver handle
1245 * None atomic implementation.
1246 * Should be called under the phy_lock.
1248 void __bnx2x_link_report(struct bnx2x *bp)
1250 struct bnx2x_link_report_data cur_data;
1253 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1254 bnx2x_read_mf_cfg(bp);
1256 /* Read the current link report info */
1257 bnx2x_fill_report_data(bp, &cur_data);
1259 /* Don't report link down or exactly the same link status twice */
1260 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1261 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1262 &bp->last_reported_link.link_report_flags) &&
1263 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1264 &cur_data.link_report_flags)))
1269 /* We are going to report a new link parameters now -
1270 * remember the current data for the next time.
1272 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1274 /* propagate status to VFs */
1276 bnx2x_iov_link_update(bp);
1278 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1279 &cur_data.link_report_flags)) {
1280 netif_carrier_off(bp->dev);
1281 netdev_err(bp->dev, "NIC Link is Down\n");
1287 netif_carrier_on(bp->dev);
1289 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1290 &cur_data.link_report_flags))
1295 /* Handle the FC at the end so that only these flags would be
1296 * possibly set. This way we may easily check if there is no FC
1299 if (cur_data.link_report_flags) {
1300 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1301 &cur_data.link_report_flags)) {
1302 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1303 &cur_data.link_report_flags))
1304 flow = "ON - receive & transmit";
1306 flow = "ON - receive";
1308 flow = "ON - transmit";
1313 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1314 cur_data.line_speed, duplex, flow);
1318 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1322 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1323 struct eth_rx_sge *sge;
1325 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1327 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1328 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1331 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1332 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1336 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1337 struct bnx2x_fastpath *fp, int last)
1341 for (i = 0; i < last; i++) {
1342 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1343 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1344 u8 *data = first_buf->data;
1347 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1350 if (tpa_info->tpa_state == BNX2X_TPA_START)
1351 dma_unmap_single(&bp->pdev->dev,
1352 dma_unmap_addr(first_buf, mapping),
1353 fp->rx_buf_size, DMA_FROM_DEVICE);
1354 bnx2x_frag_free(fp, data);
1355 first_buf->data = NULL;
1359 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1363 for_each_rx_queue_cnic(bp, j) {
1364 struct bnx2x_fastpath *fp = &bp->fp[j];
1368 /* Activate BD ring */
1370 * this will generate an interrupt (to the TSTORM)
1371 * must only be done after chip is initialized
1373 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1378 void bnx2x_init_rx_rings(struct bnx2x *bp)
1380 int func = BP_FUNC(bp);
1384 /* Allocate TPA resources */
1385 for_each_eth_queue(bp, j) {
1386 struct bnx2x_fastpath *fp = &bp->fp[j];
1389 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1391 if (!fp->disable_tpa) {
1392 /* Fill the per-aggregation pool */
1393 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1394 struct bnx2x_agg_info *tpa_info =
1396 struct sw_rx_bd *first_buf =
1397 &tpa_info->first_buf;
1400 bnx2x_frag_alloc(fp, GFP_KERNEL);
1401 if (!first_buf->data) {
1402 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1404 bnx2x_free_tpa_pool(bp, fp, i);
1405 fp->disable_tpa = 1;
1408 dma_unmap_addr_set(first_buf, mapping, 0);
1409 tpa_info->tpa_state = BNX2X_TPA_STOP;
1412 /* "next page" elements initialization */
1413 bnx2x_set_next_page_sgl(fp);
1415 /* set SGEs bit mask */
1416 bnx2x_init_sge_ring_bit_mask(fp);
1418 /* Allocate SGEs and initialize the ring elements */
1419 for (i = 0, ring_prod = 0;
1420 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1422 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1424 BNX2X_ERR("was only able to allocate %d rx sges\n",
1426 BNX2X_ERR("disabling TPA for queue[%d]\n",
1428 /* Cleanup already allocated elements */
1429 bnx2x_free_rx_sge_range(bp, fp,
1431 bnx2x_free_tpa_pool(bp, fp,
1433 fp->disable_tpa = 1;
1437 ring_prod = NEXT_SGE_IDX(ring_prod);
1440 fp->rx_sge_prod = ring_prod;
1444 for_each_eth_queue(bp, j) {
1445 struct bnx2x_fastpath *fp = &bp->fp[j];
1449 /* Activate BD ring */
1451 * this will generate an interrupt (to the TSTORM)
1452 * must only be done after chip is initialized
1454 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1460 if (CHIP_IS_E1(bp)) {
1461 REG_WR(bp, BAR_USTRORM_INTMEM +
1462 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1463 U64_LO(fp->rx_comp_mapping));
1464 REG_WR(bp, BAR_USTRORM_INTMEM +
1465 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1466 U64_HI(fp->rx_comp_mapping));
1471 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1474 struct bnx2x *bp = fp->bp;
1476 for_each_cos_in_tx_queue(fp, cos) {
1477 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1478 unsigned pkts_compl = 0, bytes_compl = 0;
1480 u16 sw_prod = txdata->tx_pkt_prod;
1481 u16 sw_cons = txdata->tx_pkt_cons;
1483 while (sw_cons != sw_prod) {
1484 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1485 &pkts_compl, &bytes_compl);
1489 netdev_tx_reset_queue(
1490 netdev_get_tx_queue(bp->dev,
1491 txdata->txq_index));
1495 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1499 for_each_tx_queue_cnic(bp, i) {
1500 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1504 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1508 for_each_eth_queue(bp, i) {
1509 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1513 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1515 struct bnx2x *bp = fp->bp;
1518 /* ring wasn't allocated */
1519 if (fp->rx_buf_ring == NULL)
1522 for (i = 0; i < NUM_RX_BD; i++) {
1523 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1524 u8 *data = rx_buf->data;
1528 dma_unmap_single(&bp->pdev->dev,
1529 dma_unmap_addr(rx_buf, mapping),
1530 fp->rx_buf_size, DMA_FROM_DEVICE);
1532 rx_buf->data = NULL;
1533 bnx2x_frag_free(fp, data);
1537 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1541 for_each_rx_queue_cnic(bp, j) {
1542 bnx2x_free_rx_bds(&bp->fp[j]);
1546 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1550 for_each_eth_queue(bp, j) {
1551 struct bnx2x_fastpath *fp = &bp->fp[j];
1553 bnx2x_free_rx_bds(fp);
1555 if (!fp->disable_tpa)
1556 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1560 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1562 bnx2x_free_tx_skbs_cnic(bp);
1563 bnx2x_free_rx_skbs_cnic(bp);
1566 void bnx2x_free_skbs(struct bnx2x *bp)
1568 bnx2x_free_tx_skbs(bp);
1569 bnx2x_free_rx_skbs(bp);
1572 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1574 /* load old values */
1575 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1577 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1578 /* leave all but MAX value */
1579 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1581 /* set new MAX value */
1582 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1583 & FUNC_MF_CFG_MAX_BW_MASK;
1585 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1590 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1592 * @bp: driver handle
1593 * @nvecs: number of vectors to be released
1595 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1599 if (nvecs == offset)
1602 /* VFs don't have a default SB */
1604 free_irq(bp->msix_table[offset].vector, bp->dev);
1605 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1606 bp->msix_table[offset].vector);
1610 if (CNIC_SUPPORT(bp)) {
1611 if (nvecs == offset)
1616 for_each_eth_queue(bp, i) {
1617 if (nvecs == offset)
1619 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1620 i, bp->msix_table[offset].vector);
1622 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1626 void bnx2x_free_irq(struct bnx2x *bp)
1628 if (bp->flags & USING_MSIX_FLAG &&
1629 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1630 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1632 /* vfs don't have a default status block */
1636 bnx2x_free_msix_irqs(bp, nvecs);
1638 free_irq(bp->dev->irq, bp->dev);
1642 int bnx2x_enable_msix(struct bnx2x *bp)
1644 int msix_vec = 0, i, rc;
1646 /* VFs don't have a default status block */
1648 bp->msix_table[msix_vec].entry = msix_vec;
1649 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1650 bp->msix_table[0].entry);
1654 /* Cnic requires an msix vector for itself */
1655 if (CNIC_SUPPORT(bp)) {
1656 bp->msix_table[msix_vec].entry = msix_vec;
1657 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1658 msix_vec, bp->msix_table[msix_vec].entry);
1662 /* We need separate vectors for ETH queues only (not FCoE) */
1663 for_each_eth_queue(bp, i) {
1664 bp->msix_table[msix_vec].entry = msix_vec;
1665 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1666 msix_vec, msix_vec, i);
1670 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1673 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1674 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1676 * reconfigure number of tx/rx queues according to available
1679 if (rc == -ENOSPC) {
1680 /* Get by with single vector */
1681 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1683 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1688 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1689 bp->flags |= USING_SINGLE_MSIX_FLAG;
1691 BNX2X_DEV_INFO("set number of queues to 1\n");
1692 bp->num_ethernet_queues = 1;
1693 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1694 } else if (rc < 0) {
1695 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1697 } else if (rc < msix_vec) {
1698 /* how less vectors we will have? */
1699 int diff = msix_vec - rc;
1701 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1704 * decrease number of queues by number of unallocated entries
1706 bp->num_ethernet_queues -= diff;
1707 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1709 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1713 bp->flags |= USING_MSIX_FLAG;
1718 /* fall to INTx if not enough memory */
1720 bp->flags |= DISABLE_MSI_FLAG;
1725 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1727 int i, rc, offset = 0;
1729 /* no default status block for vf */
1731 rc = request_irq(bp->msix_table[offset++].vector,
1732 bnx2x_msix_sp_int, 0,
1733 bp->dev->name, bp->dev);
1735 BNX2X_ERR("request sp irq failed\n");
1740 if (CNIC_SUPPORT(bp))
1743 for_each_eth_queue(bp, i) {
1744 struct bnx2x_fastpath *fp = &bp->fp[i];
1745 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1748 rc = request_irq(bp->msix_table[offset].vector,
1749 bnx2x_msix_fp_int, 0, fp->name, fp);
1751 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1752 bp->msix_table[offset].vector, rc);
1753 bnx2x_free_msix_irqs(bp, offset);
1760 i = BNX2X_NUM_ETH_QUEUES(bp);
1762 offset = 1 + CNIC_SUPPORT(bp);
1763 netdev_info(bp->dev,
1764 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1765 bp->msix_table[0].vector,
1766 0, bp->msix_table[offset].vector,
1767 i - 1, bp->msix_table[offset + i - 1].vector);
1769 offset = CNIC_SUPPORT(bp);
1770 netdev_info(bp->dev,
1771 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1772 0, bp->msix_table[offset].vector,
1773 i - 1, bp->msix_table[offset + i - 1].vector);
1778 int bnx2x_enable_msi(struct bnx2x *bp)
1782 rc = pci_enable_msi(bp->pdev);
1784 BNX2X_DEV_INFO("MSI is not attainable\n");
1787 bp->flags |= USING_MSI_FLAG;
1792 static int bnx2x_req_irq(struct bnx2x *bp)
1794 unsigned long flags;
1797 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1800 flags = IRQF_SHARED;
1802 if (bp->flags & USING_MSIX_FLAG)
1803 irq = bp->msix_table[0].vector;
1805 irq = bp->pdev->irq;
1807 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1810 static int bnx2x_setup_irqs(struct bnx2x *bp)
1813 if (bp->flags & USING_MSIX_FLAG &&
1814 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1815 rc = bnx2x_req_msix_irqs(bp);
1819 rc = bnx2x_req_irq(bp);
1821 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1824 if (bp->flags & USING_MSI_FLAG) {
1825 bp->dev->irq = bp->pdev->irq;
1826 netdev_info(bp->dev, "using MSI IRQ %d\n",
1829 if (bp->flags & USING_MSIX_FLAG) {
1830 bp->dev->irq = bp->msix_table[0].vector;
1831 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1839 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1843 for_each_rx_queue_cnic(bp, i) {
1844 bnx2x_fp_init_lock(&bp->fp[i]);
1845 napi_enable(&bnx2x_fp(bp, i, napi));
1849 static void bnx2x_napi_enable(struct bnx2x *bp)
1853 for_each_eth_queue(bp, i) {
1854 bnx2x_fp_init_lock(&bp->fp[i]);
1855 napi_enable(&bnx2x_fp(bp, i, napi));
1859 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1863 for_each_rx_queue_cnic(bp, i) {
1864 napi_disable(&bnx2x_fp(bp, i, napi));
1865 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1866 usleep_range(1000, 2000);
1870 static void bnx2x_napi_disable(struct bnx2x *bp)
1874 for_each_eth_queue(bp, i) {
1875 napi_disable(&bnx2x_fp(bp, i, napi));
1876 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1877 usleep_range(1000, 2000);
1881 void bnx2x_netif_start(struct bnx2x *bp)
1883 if (netif_running(bp->dev)) {
1884 bnx2x_napi_enable(bp);
1885 if (CNIC_LOADED(bp))
1886 bnx2x_napi_enable_cnic(bp);
1887 bnx2x_int_enable(bp);
1888 if (bp->state == BNX2X_STATE_OPEN)
1889 netif_tx_wake_all_queues(bp->dev);
1893 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1895 bnx2x_int_disable_sync(bp, disable_hw);
1896 bnx2x_napi_disable(bp);
1897 if (CNIC_LOADED(bp))
1898 bnx2x_napi_disable_cnic(bp);
1901 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1902 void *accel_priv, select_queue_fallback_t fallback)
1904 struct bnx2x *bp = netdev_priv(dev);
1906 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1907 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1908 u16 ether_type = ntohs(hdr->h_proto);
1910 /* Skip VLAN tag if present */
1911 if (ether_type == ETH_P_8021Q) {
1912 struct vlan_ethhdr *vhdr =
1913 (struct vlan_ethhdr *)skb->data;
1915 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1918 /* If ethertype is FCoE or FIP - use FCoE ring */
1919 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1920 return bnx2x_fcoe_tx(bp, txq_index);
1923 /* select a non-FCoE queue */
1924 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1927 void bnx2x_set_num_queues(struct bnx2x *bp)
1930 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1932 /* override in STORAGE SD modes */
1933 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1934 bp->num_ethernet_queues = 1;
1936 /* Add special queues */
1937 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1938 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1940 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1944 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1946 * @bp: Driver handle
1948 * We currently support for at most 16 Tx queues for each CoS thus we will
1949 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1952 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1953 * index after all ETH L2 indices.
1955 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1956 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1957 * 16..31,...) with indices that are not coupled with any real Tx queue.
1959 * The proper configuration of skb->queue_mapping is handled by
1960 * bnx2x_select_queue() and __skb_tx_hash().
1962 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1963 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1965 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1969 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1970 rx = BNX2X_NUM_ETH_QUEUES(bp);
1972 /* account for fcoe queue */
1973 if (include_cnic && !NO_FCOE(bp)) {
1978 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1980 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1983 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1985 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1989 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1995 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1999 for_each_queue(bp, i) {
2000 struct bnx2x_fastpath *fp = &bp->fp[i];
2003 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2006 * Although there are no IP frames expected to arrive to
2007 * this ring we still want to add an
2008 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2011 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2014 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2015 IP_HEADER_ALIGNMENT_PADDING +
2018 BNX2X_FW_RX_ALIGN_END;
2019 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2020 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2021 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2023 fp->rx_frag_size = 0;
2027 static int bnx2x_init_rss(struct bnx2x *bp)
2030 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2032 /* Prepare the initial contents for the indirection table if RSS is
2035 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2036 bp->rss_conf_obj.ind_table[i] =
2038 ethtool_rxfh_indir_default(i, num_eth_queues);
2041 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2042 * per-port, so if explicit configuration is needed , do it only
2045 * For 57712 and newer on the other hand it's a per-function
2048 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2051 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2052 bool config_hash, bool enable)
2054 struct bnx2x_config_rss_params params = {NULL};
2056 /* Although RSS is meaningless when there is a single HW queue we
2057 * still need it enabled in order to have HW Rx hash generated.
2059 * if (!is_eth_multi(bp))
2060 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2063 params.rss_obj = rss_obj;
2065 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2068 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2070 /* RSS configuration */
2071 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2072 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2073 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2074 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2075 if (rss_obj->udp_rss_v4)
2076 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2077 if (rss_obj->udp_rss_v6)
2078 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2080 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2084 params.rss_result_mask = MULTI_MASK;
2086 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2090 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
2091 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2095 return bnx2x_config_rss(bp, ¶ms);
2097 return bnx2x_vfpf_config_rss(bp, ¶ms);
2100 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2102 struct bnx2x_func_state_params func_params = {NULL};
2104 /* Prepare parameters for function state transitions */
2105 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2107 func_params.f_obj = &bp->func_obj;
2108 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2110 func_params.params.hw_init.load_phase = load_code;
2112 return bnx2x_func_state_change(bp, &func_params);
2116 * Cleans the object that have internal lists without sending
2117 * ramrods. Should be run when interrupts are disabled.
2119 void bnx2x_squeeze_objects(struct bnx2x *bp)
2122 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2123 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2124 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2126 /***************** Cleanup MACs' object first *************************/
2128 /* Wait for completion of requested */
2129 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2130 /* Perform a dry cleanup */
2131 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2133 /* Clean ETH primary MAC */
2134 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2135 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2138 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2140 /* Cleanup UC list */
2142 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2143 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2146 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2148 /***************** Now clean mcast object *****************************/
2149 rparam.mcast_obj = &bp->mcast_obj;
2150 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2152 /* Add a DEL command... - Since we're doing a driver cleanup only,
2153 * we take a lock surrounding both the initial send and the CONTs,
2154 * as we don't want a true completion to disrupt us in the middle.
2156 netif_addr_lock_bh(bp->dev);
2157 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2159 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2162 /* ...and wait until all pending commands are cleared */
2163 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2166 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2168 netif_addr_unlock_bh(bp->dev);
2172 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2174 netif_addr_unlock_bh(bp->dev);
2177 #ifndef BNX2X_STOP_ON_ERROR
2178 #define LOAD_ERROR_EXIT(bp, label) \
2180 (bp)->state = BNX2X_STATE_ERROR; \
2184 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2186 bp->cnic_loaded = false; \
2189 #else /*BNX2X_STOP_ON_ERROR*/
2190 #define LOAD_ERROR_EXIT(bp, label) \
2192 (bp)->state = BNX2X_STATE_ERROR; \
2196 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2198 bp->cnic_loaded = false; \
2202 #endif /*BNX2X_STOP_ON_ERROR*/
2204 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2206 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2207 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2211 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2213 int num_groups, vf_headroom = 0;
2214 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2216 /* number of queues for statistics is number of eth queues + FCoE */
2217 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2219 /* Total number of FW statistics requests =
2220 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2221 * and fcoe l2 queue) stats + num of queues (which includes another 1
2222 * for fcoe l2 queue if applicable)
2224 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2226 /* vf stats appear in the request list, but their data is allocated by
2227 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2228 * it is used to determine where to place the vf stats queries in the
2232 vf_headroom = bnx2x_vf_headroom(bp);
2234 /* Request is built from stats_query_header and an array of
2235 * stats_query_cmd_group each of which contains
2236 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2237 * configured in the stats_query_header.
2240 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2241 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2244 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2245 bp->fw_stats_num, vf_headroom, num_groups);
2246 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2247 num_groups * sizeof(struct stats_query_cmd_group);
2249 /* Data for statistics requests + stats_counter
2250 * stats_counter holds per-STORM counters that are incremented
2251 * when STORM has finished with the current request.
2252 * memory for FCoE offloaded statistics are counted anyway,
2253 * even if they will not be sent.
2254 * VF stats are not accounted for here as the data of VF stats is stored
2255 * in memory allocated by the VF, not here.
2257 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2258 sizeof(struct per_pf_stats) +
2259 sizeof(struct fcoe_statistics_params) +
2260 sizeof(struct per_queue_stats) * num_queue_stats +
2261 sizeof(struct stats_counter);
2263 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2264 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2269 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2270 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2271 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2272 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2273 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2274 bp->fw_stats_req_sz;
2276 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2277 U64_HI(bp->fw_stats_req_mapping),
2278 U64_LO(bp->fw_stats_req_mapping));
2279 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2280 U64_HI(bp->fw_stats_data_mapping),
2281 U64_LO(bp->fw_stats_data_mapping));
2285 bnx2x_free_fw_stats_mem(bp);
2286 BNX2X_ERR("Can't allocate FW stats memory\n");
2290 /* send load request to mcp and analyze response */
2291 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2297 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2298 DRV_MSG_SEQ_NUMBER_MASK);
2299 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2301 /* Get current FW pulse sequence */
2302 bp->fw_drv_pulse_wr_seq =
2303 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2304 DRV_PULSE_SEQ_MASK);
2305 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2307 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2309 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2310 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2313 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2315 /* if mcp fails to respond we must abort */
2316 if (!(*load_code)) {
2317 BNX2X_ERR("MCP response failure, aborting\n");
2321 /* If mcp refused (e.g. other port is in diagnostic mode) we
2324 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2325 BNX2X_ERR("MCP refused load request, aborting\n");
2331 /* check whether another PF has already loaded FW to chip. In
2332 * virtualized environments a pf from another VM may have already
2333 * initialized the device including loading FW
2335 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2337 /* is another pf loaded on this engine? */
2338 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2339 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2340 /* build my FW version dword */
2341 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2342 (BCM_5710_FW_MINOR_VERSION << 8) +
2343 (BCM_5710_FW_REVISION_VERSION << 16) +
2344 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2346 /* read loaded FW from chip */
2347 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2349 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2352 /* abort nic load if version mismatch */
2353 if (my_fw != loaded_fw) {
2355 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2358 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2366 /* returns the "mcp load_code" according to global load_count array */
2367 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2369 int path = BP_PATH(bp);
2371 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2372 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2373 bnx2x_load_count[path][2]);
2374 bnx2x_load_count[path][0]++;
2375 bnx2x_load_count[path][1 + port]++;
2376 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2377 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2378 bnx2x_load_count[path][2]);
2379 if (bnx2x_load_count[path][0] == 1)
2380 return FW_MSG_CODE_DRV_LOAD_COMMON;
2381 else if (bnx2x_load_count[path][1 + port] == 1)
2382 return FW_MSG_CODE_DRV_LOAD_PORT;
2384 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2387 /* mark PMF if applicable */
2388 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2390 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2391 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2392 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2394 /* We need the barrier to ensure the ordering between the
2395 * writing to bp->port.pmf here and reading it from the
2396 * bnx2x_periodic_task().
2403 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2406 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2408 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2409 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2410 (bp->common.shmem2_base)) {
2411 if (SHMEM2_HAS(bp, dcc_support))
2412 SHMEM2_WR(bp, dcc_support,
2413 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2414 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2415 if (SHMEM2_HAS(bp, afex_driver_support))
2416 SHMEM2_WR(bp, afex_driver_support,
2417 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2420 /* Set AFEX default VLAN tag to an invalid value */
2421 bp->afex_def_vlan_tag = -1;
2425 * bnx2x_bz_fp - zero content of the fastpath structure.
2427 * @bp: driver handle
2428 * @index: fastpath index to be zeroed
2430 * Makes sure the contents of the bp->fp[index].napi is kept
2433 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2435 struct bnx2x_fastpath *fp = &bp->fp[index];
2437 struct napi_struct orig_napi = fp->napi;
2438 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2440 /* bzero bnx2x_fastpath contents */
2442 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2443 sizeof(struct bnx2x_agg_info));
2444 memset(fp, 0, sizeof(*fp));
2446 /* Restore the NAPI object as it has been already initialized */
2447 fp->napi = orig_napi;
2448 fp->tpa_info = orig_tpa_info;
2452 fp->max_cos = bp->max_cos;
2454 /* Special queues support only one CoS */
2457 /* Init txdata pointers */
2459 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2461 for_each_cos_in_tx_queue(fp, cos)
2462 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2463 BNX2X_NUM_ETH_QUEUES(bp) + index];
2465 /* set the tpa flag for each queue. The tpa flag determines the queue
2466 * minimal size so it must be set prior to queue memory allocation
2468 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2469 (bp->flags & GRO_ENABLE_FLAG &&
2470 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2471 if (bp->flags & TPA_ENABLE_FLAG)
2472 fp->mode = TPA_MODE_LRO;
2473 else if (bp->flags & GRO_ENABLE_FLAG)
2474 fp->mode = TPA_MODE_GRO;
2476 /* We don't want TPA on an FCoE L2 ring */
2478 fp->disable_tpa = 1;
2481 int bnx2x_load_cnic(struct bnx2x *bp)
2483 int i, rc, port = BP_PORT(bp);
2485 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2487 mutex_init(&bp->cnic_mutex);
2490 rc = bnx2x_alloc_mem_cnic(bp);
2492 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2493 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2497 rc = bnx2x_alloc_fp_mem_cnic(bp);
2499 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2500 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2503 /* Update the number of queues with the cnic queues */
2504 rc = bnx2x_set_real_num_queues(bp, 1);
2506 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2507 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2510 /* Add all CNIC NAPI objects */
2511 bnx2x_add_all_napi_cnic(bp);
2512 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2513 bnx2x_napi_enable_cnic(bp);
2515 rc = bnx2x_init_hw_func_cnic(bp);
2517 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2519 bnx2x_nic_init_cnic(bp);
2522 /* Enable Timer scan */
2523 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2525 /* setup cnic queues */
2526 for_each_cnic_queue(bp, i) {
2527 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2529 BNX2X_ERR("Queue setup failed\n");
2530 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2535 /* Initialize Rx filter. */
2536 bnx2x_set_rx_mode_inner(bp);
2538 /* re-read iscsi info */
2539 bnx2x_get_iscsi_info(bp);
2540 bnx2x_setup_cnic_irq_info(bp);
2541 bnx2x_setup_cnic_info(bp);
2542 bp->cnic_loaded = true;
2543 if (bp->state == BNX2X_STATE_OPEN)
2544 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2546 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2550 #ifndef BNX2X_STOP_ON_ERROR
2552 /* Disable Timer scan */
2553 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2556 bnx2x_napi_disable_cnic(bp);
2557 /* Update the number of queues without the cnic queues */
2558 if (bnx2x_set_real_num_queues(bp, 0))
2559 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2561 BNX2X_ERR("CNIC-related load failed\n");
2562 bnx2x_free_fp_mem_cnic(bp);
2563 bnx2x_free_mem_cnic(bp);
2565 #endif /* ! BNX2X_STOP_ON_ERROR */
2568 /* must be called with rtnl_lock */
2569 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2571 int port = BP_PORT(bp);
2572 int i, rc = 0, load_code = 0;
2574 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2576 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2578 #ifdef BNX2X_STOP_ON_ERROR
2579 if (unlikely(bp->panic)) {
2580 BNX2X_ERR("Can't load NIC when there is panic\n");
2585 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2587 /* zero the structure w/o any lock, before SP handler is initialized */
2588 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2589 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2590 &bp->last_reported_link.link_report_flags);
2593 /* must be called before memory allocation and HW init */
2594 bnx2x_ilt_set_info(bp);
2597 * Zero fastpath structures preserving invariants like napi, which are
2598 * allocated only once, fp index, max_cos, bp pointer.
2599 * Also set fp->disable_tpa and txdata_ptr.
2601 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2602 for_each_queue(bp, i)
2604 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2605 bp->num_cnic_queues) *
2606 sizeof(struct bnx2x_fp_txdata));
2608 bp->fcoe_init = false;
2610 /* Set the receive queues buffer size */
2611 bnx2x_set_rx_buf_size(bp);
2614 rc = bnx2x_alloc_mem(bp);
2616 BNX2X_ERR("Unable to allocate bp memory\n");
2621 /* need to be done after alloc mem, since it's self adjusting to amount
2622 * of memory available for RSS queues
2624 rc = bnx2x_alloc_fp_mem(bp);
2626 BNX2X_ERR("Unable to allocate memory for fps\n");
2627 LOAD_ERROR_EXIT(bp, load_error0);
2630 /* Allocated memory for FW statistics */
2631 if (bnx2x_alloc_fw_stats_mem(bp))
2632 LOAD_ERROR_EXIT(bp, load_error0);
2634 /* request pf to initialize status blocks */
2636 rc = bnx2x_vfpf_init(bp);
2638 LOAD_ERROR_EXIT(bp, load_error0);
2641 /* As long as bnx2x_alloc_mem() may possibly update
2642 * bp->num_queues, bnx2x_set_real_num_queues() should always
2643 * come after it. At this stage cnic queues are not counted.
2645 rc = bnx2x_set_real_num_queues(bp, 0);
2647 BNX2X_ERR("Unable to set real_num_queues\n");
2648 LOAD_ERROR_EXIT(bp, load_error0);
2651 /* configure multi cos mappings in kernel.
2652 * this configuration may be overridden by a multi class queue
2653 * discipline or by a dcbx negotiation result.
2655 bnx2x_setup_tc(bp->dev, bp->max_cos);
2657 /* Add all NAPI objects */
2658 bnx2x_add_all_napi(bp);
2659 DP(NETIF_MSG_IFUP, "napi added\n");
2660 bnx2x_napi_enable(bp);
2663 /* set pf load just before approaching the MCP */
2664 bnx2x_set_pf_load(bp);
2666 /* if mcp exists send load request and analyze response */
2667 if (!BP_NOMCP(bp)) {
2668 /* attempt to load pf */
2669 rc = bnx2x_nic_load_request(bp, &load_code);
2671 LOAD_ERROR_EXIT(bp, load_error1);
2673 /* what did mcp say? */
2674 rc = bnx2x_compare_fw_ver(bp, load_code, true);
2676 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2677 LOAD_ERROR_EXIT(bp, load_error2);
2680 load_code = bnx2x_nic_load_no_mcp(bp, port);
2683 /* mark pmf if applicable */
2684 bnx2x_nic_load_pmf(bp, load_code);
2686 /* Init Function state controlling object */
2687 bnx2x__init_func_obj(bp);
2690 rc = bnx2x_init_hw(bp, load_code);
2692 BNX2X_ERR("HW init failed, aborting\n");
2693 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2694 LOAD_ERROR_EXIT(bp, load_error2);
2698 bnx2x_pre_irq_nic_init(bp);
2700 /* Connect to IRQs */
2701 rc = bnx2x_setup_irqs(bp);
2703 BNX2X_ERR("setup irqs failed\n");
2705 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2706 LOAD_ERROR_EXIT(bp, load_error2);
2709 /* Init per-function objects */
2711 /* Setup NIC internals and enable interrupts */
2712 bnx2x_post_irq_nic_init(bp, load_code);
2714 bnx2x_init_bp_objs(bp);
2715 bnx2x_iov_nic_init(bp);
2717 /* Set AFEX default VLAN tag to an invalid value */
2718 bp->afex_def_vlan_tag = -1;
2719 bnx2x_nic_load_afex_dcc(bp, load_code);
2720 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2721 rc = bnx2x_func_start(bp);
2723 BNX2X_ERR("Function start failed!\n");
2724 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2726 LOAD_ERROR_EXIT(bp, load_error3);
2729 /* Send LOAD_DONE command to MCP */
2730 if (!BP_NOMCP(bp)) {
2731 load_code = bnx2x_fw_command(bp,
2732 DRV_MSG_CODE_LOAD_DONE, 0);
2734 BNX2X_ERR("MCP response failure, aborting\n");
2736 LOAD_ERROR_EXIT(bp, load_error3);
2740 /* initialize FW coalescing state machines in RAM */
2741 bnx2x_update_coalesce(bp);
2744 /* setup the leading queue */
2745 rc = bnx2x_setup_leading(bp);
2747 BNX2X_ERR("Setup leading failed!\n");
2748 LOAD_ERROR_EXIT(bp, load_error3);
2751 /* set up the rest of the queues */
2752 for_each_nondefault_eth_queue(bp, i) {
2754 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2756 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2758 BNX2X_ERR("Queue %d setup failed\n", i);
2759 LOAD_ERROR_EXIT(bp, load_error3);
2764 rc = bnx2x_init_rss(bp);
2766 BNX2X_ERR("PF RSS init failed\n");
2767 LOAD_ERROR_EXIT(bp, load_error3);
2770 /* Now when Clients are configured we are ready to work */
2771 bp->state = BNX2X_STATE_OPEN;
2773 /* Configure a ucast MAC */
2775 rc = bnx2x_set_eth_mac(bp, true);
2777 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2780 BNX2X_ERR("Setting Ethernet MAC failed\n");
2781 LOAD_ERROR_EXIT(bp, load_error3);
2784 if (IS_PF(bp) && bp->pending_max) {
2785 bnx2x_update_max_mf_config(bp, bp->pending_max);
2786 bp->pending_max = 0;
2790 rc = bnx2x_initial_phy_init(bp, load_mode);
2792 LOAD_ERROR_EXIT(bp, load_error3);
2794 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2796 /* Start fast path */
2798 /* Initialize Rx filter. */
2799 bnx2x_set_rx_mode_inner(bp);
2802 switch (load_mode) {
2804 /* Tx queue should be only re-enabled */
2805 netif_tx_wake_all_queues(bp->dev);
2809 netif_tx_start_all_queues(bp->dev);
2810 smp_mb__after_atomic();
2814 case LOAD_LOOPBACK_EXT:
2815 bp->state = BNX2X_STATE_DIAG;
2823 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2825 bnx2x__link_status_update(bp);
2827 /* start the timer */
2828 mod_timer(&bp->timer, jiffies + bp->current_interval);
2830 if (CNIC_ENABLED(bp))
2831 bnx2x_load_cnic(bp);
2834 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2836 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2837 /* mark driver is loaded in shmem2 */
2839 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2840 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2841 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2842 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2845 /* Wait for all pending SP commands to complete */
2846 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2847 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2848 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2852 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2853 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2854 bnx2x_dcbx_init(bp, false);
2856 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2860 #ifndef BNX2X_STOP_ON_ERROR
2863 bnx2x_int_disable_sync(bp, 1);
2865 /* Clean queueable objects */
2866 bnx2x_squeeze_objects(bp);
2869 /* Free SKBs, SGEs, TPA pool and driver internals */
2870 bnx2x_free_skbs(bp);
2871 for_each_rx_queue(bp, i)
2872 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2877 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2878 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2879 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2884 bnx2x_napi_disable(bp);
2885 bnx2x_del_all_napi(bp);
2887 /* clear pf_load status, as it was already set */
2889 bnx2x_clear_pf_load(bp);
2891 bnx2x_free_fw_stats_mem(bp);
2892 bnx2x_free_fp_mem(bp);
2896 #endif /* ! BNX2X_STOP_ON_ERROR */
2899 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2903 /* Wait until tx fastpath tasks complete */
2904 for_each_tx_queue(bp, i) {
2905 struct bnx2x_fastpath *fp = &bp->fp[i];
2907 for_each_cos_in_tx_queue(fp, cos)
2908 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2915 /* must be called with rtnl_lock */
2916 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2919 bool global = false;
2921 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2923 /* mark driver is unloaded in shmem2 */
2924 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2926 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2927 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2928 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2931 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2932 (bp->state == BNX2X_STATE_CLOSED ||
2933 bp->state == BNX2X_STATE_ERROR)) {
2934 /* We can get here if the driver has been unloaded
2935 * during parity error recovery and is either waiting for a
2936 * leader to complete or for other functions to unload and
2937 * then ifdown has been issued. In this case we want to
2938 * unload and let other functions to complete a recovery
2941 bp->recovery_state = BNX2X_RECOVERY_DONE;
2943 bnx2x_release_leader_lock(bp);
2946 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2947 BNX2X_ERR("Can't unload in closed or error state\n");
2951 /* Nothing to do during unload if previous bnx2x_nic_load()
2952 * have not completed successfully - all resources are released.
2954 * we can get here only after unsuccessful ndo_* callback, during which
2955 * dev->IFF_UP flag is still on.
2957 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2960 /* It's important to set the bp->state to the value different from
2961 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2962 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2964 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2967 /* indicate to VFs that the PF is going down */
2968 bnx2x_iov_channel_down(bp);
2970 if (CNIC_LOADED(bp))
2971 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2974 bnx2x_tx_disable(bp);
2975 netdev_reset_tc(bp->dev);
2977 bp->rx_mode = BNX2X_RX_MODE_NONE;
2979 del_timer_sync(&bp->timer);
2982 /* Set ALWAYS_ALIVE bit in shmem */
2983 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2984 bnx2x_drv_pulse(bp);
2985 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2986 bnx2x_save_statistics(bp);
2989 /* wait till consumers catch up with producers in all queues */
2990 bnx2x_drain_tx_queues(bp);
2992 /* if VF indicate to PF this function is going down (PF will delete sp
2993 * elements and clear initializations
2996 bnx2x_vfpf_close_vf(bp);
2997 else if (unload_mode != UNLOAD_RECOVERY)
2998 /* if this is a normal/close unload need to clean up chip*/
2999 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3001 /* Send the UNLOAD_REQUEST to the MCP */
3002 bnx2x_send_unload_req(bp, unload_mode);
3004 /* Prevent transactions to host from the functions on the
3005 * engine that doesn't reset global blocks in case of global
3006 * attention once global blocks are reset and gates are opened
3007 * (the engine which leader will perform the recovery
3010 if (!CHIP_IS_E1x(bp))
3011 bnx2x_pf_disable(bp);
3013 /* Disable HW interrupts, NAPI */
3014 bnx2x_netif_stop(bp, 1);
3015 /* Delete all NAPI objects */
3016 bnx2x_del_all_napi(bp);
3017 if (CNIC_LOADED(bp))
3018 bnx2x_del_all_napi_cnic(bp);
3022 /* Report UNLOAD_DONE to MCP */
3023 bnx2x_send_unload_done(bp, false);
3027 * At this stage no more interrupts will arrive so we may safely clean
3028 * the queueable objects here in case they failed to get cleaned so far.
3031 bnx2x_squeeze_objects(bp);
3033 /* There should be no more pending SP commands at this stage */
3038 /* clear pending work in rtnl task */
3039 bp->sp_rtnl_state = 0;
3042 /* Free SKBs, SGEs, TPA pool and driver internals */
3043 bnx2x_free_skbs(bp);
3044 if (CNIC_LOADED(bp))
3045 bnx2x_free_skbs_cnic(bp);
3046 for_each_rx_queue(bp, i)
3047 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3049 bnx2x_free_fp_mem(bp);
3050 if (CNIC_LOADED(bp))
3051 bnx2x_free_fp_mem_cnic(bp);
3054 if (CNIC_LOADED(bp))
3055 bnx2x_free_mem_cnic(bp);
3059 bp->state = BNX2X_STATE_CLOSED;
3060 bp->cnic_loaded = false;
3062 /* Clear driver version indication in shmem */
3064 bnx2x_update_mng_version(bp);
3066 /* Check if there are pending parity attentions. If there are - set
3067 * RECOVERY_IN_PROGRESS.
3069 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3070 bnx2x_set_reset_in_progress(bp);
3072 /* Set RESET_IS_GLOBAL if needed */
3074 bnx2x_set_reset_global(bp);
3077 /* The last driver must disable a "close the gate" if there is no
3078 * parity attention or "process kill" pending.
3081 !bnx2x_clear_pf_load(bp) &&
3082 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3083 bnx2x_disable_close_the_gate(bp);
3085 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3090 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3094 /* If there is no power capability, silently succeed */
3095 if (!bp->pdev->pm_cap) {
3096 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3100 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3104 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3105 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3106 PCI_PM_CTRL_PME_STATUS));
3108 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3109 /* delay required during transition out of D3hot */
3114 /* If there are other clients above don't
3115 shut down the power */
3116 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3118 /* Don't shut down the power for emulation and FPGA */
3119 if (CHIP_REV_IS_SLOW(bp))
3122 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3126 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3128 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3131 /* No more memory access after this point until
3132 * device is brought back to D0.
3137 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3144 * net_device service functions
3146 static int bnx2x_poll(struct napi_struct *napi, int budget)
3150 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3152 struct bnx2x *bp = fp->bp;
3155 #ifdef BNX2X_STOP_ON_ERROR
3156 if (unlikely(bp->panic)) {
3157 napi_complete(napi);
3161 if (!bnx2x_fp_lock_napi(fp))
3164 for_each_cos_in_tx_queue(fp, cos)
3165 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3166 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3168 if (bnx2x_has_rx_work(fp)) {
3169 work_done += bnx2x_rx_int(fp, budget - work_done);
3171 /* must not complete if we consumed full budget */
3172 if (work_done >= budget) {
3173 bnx2x_fp_unlock_napi(fp);
3178 /* Fall out from the NAPI loop if needed */
3179 if (!bnx2x_fp_unlock_napi(fp) &&
3180 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3182 /* No need to update SB for FCoE L2 ring as long as
3183 * it's connected to the default SB and the SB
3184 * has been updated when NAPI was scheduled.
3186 if (IS_FCOE_FP(fp)) {
3187 napi_complete(napi);
3190 bnx2x_update_fpsb_idx(fp);
3191 /* bnx2x_has_rx_work() reads the status block,
3192 * thus we need to ensure that status block indices
3193 * have been actually read (bnx2x_update_fpsb_idx)
3194 * prior to this check (bnx2x_has_rx_work) so that
3195 * we won't write the "newer" value of the status block
3196 * to IGU (if there was a DMA right after
3197 * bnx2x_has_rx_work and if there is no rmb, the memory
3198 * reading (bnx2x_update_fpsb_idx) may be postponed
3199 * to right before bnx2x_ack_sb). In this case there
3200 * will never be another interrupt until there is
3201 * another update of the status block, while there
3202 * is still unhandled work.
3206 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3207 napi_complete(napi);
3208 /* Re-enable interrupts */
3209 DP(NETIF_MSG_RX_STATUS,
3210 "Update index to %d\n", fp->fp_hc_idx);
3211 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3212 le16_to_cpu(fp->fp_hc_idx),
3222 #ifdef CONFIG_NET_RX_BUSY_POLL
3223 /* must be called with local_bh_disable()d */
3224 int bnx2x_low_latency_recv(struct napi_struct *napi)
3226 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3228 struct bnx2x *bp = fp->bp;
3231 if ((bp->state == BNX2X_STATE_CLOSED) ||
3232 (bp->state == BNX2X_STATE_ERROR) ||
3233 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3234 return LL_FLUSH_FAILED;
3236 if (!bnx2x_fp_lock_poll(fp))
3237 return LL_FLUSH_BUSY;
3239 if (bnx2x_has_rx_work(fp))
3240 found = bnx2x_rx_int(fp, 4);
3242 bnx2x_fp_unlock_poll(fp);
3248 /* we split the first BD into headers and data BDs
3249 * to ease the pain of our fellow microcode engineers
3250 * we use one mapping for both BDs
3252 static u16 bnx2x_tx_split(struct bnx2x *bp,
3253 struct bnx2x_fp_txdata *txdata,
3254 struct sw_tx_bd *tx_buf,
3255 struct eth_tx_start_bd **tx_bd, u16 hlen,
3258 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3259 struct eth_tx_bd *d_tx_bd;
3261 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3263 /* first fix first BD */
3264 h_tx_bd->nbytes = cpu_to_le16(hlen);
3266 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3267 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3269 /* now get a new data BD
3270 * (after the pbd) and fill it */
3271 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3272 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3274 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3275 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3277 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3278 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3279 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3281 /* this marks the BD as one that has no individual mapping */
3282 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3284 DP(NETIF_MSG_TX_QUEUED,
3285 "TSO split data size is %d (%x:%x)\n",
3286 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3289 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3294 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3295 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3296 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3298 __sum16 tsum = (__force __sum16) csum;
3301 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3302 csum_partial(t_header - fix, fix, 0)));
3305 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3306 csum_partial(t_header, -fix, 0)));
3308 return bswab16(tsum);
3311 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3317 if (skb->ip_summed != CHECKSUM_PARTIAL)
3320 protocol = vlan_get_protocol(skb);
3321 if (protocol == htons(ETH_P_IPV6)) {
3323 prot = ipv6_hdr(skb)->nexthdr;
3326 prot = ip_hdr(skb)->protocol;
3329 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3330 if (inner_ip_hdr(skb)->version == 6) {
3331 rc |= XMIT_CSUM_ENC_V6;
3332 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3333 rc |= XMIT_CSUM_TCP;
3335 rc |= XMIT_CSUM_ENC_V4;
3336 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3337 rc |= XMIT_CSUM_TCP;
3340 if (prot == IPPROTO_TCP)
3341 rc |= XMIT_CSUM_TCP;
3343 if (skb_is_gso(skb)) {
3344 if (skb_is_gso_v6(skb)) {
3345 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3346 if (rc & XMIT_CSUM_ENC)
3347 rc |= XMIT_GSO_ENC_V6;
3349 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3350 if (rc & XMIT_CSUM_ENC)
3351 rc |= XMIT_GSO_ENC_V4;
3358 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3359 /* check if packet requires linearization (packet is too fragmented)
3360 no need to check fragmentation if page size > 8K (there will be no
3361 violation to FW restrictions) */
3362 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3367 int first_bd_sz = 0;
3369 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3370 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3372 if (xmit_type & XMIT_GSO) {
3373 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3374 /* Check if LSO packet needs to be copied:
3375 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3376 int wnd_size = MAX_FETCH_BD - 3;
3377 /* Number of windows to check */
3378 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3383 /* Headers length */
3384 hlen = (int)(skb_transport_header(skb) - skb->data) +
3387 /* Amount of data (w/o headers) on linear part of SKB*/
3388 first_bd_sz = skb_headlen(skb) - hlen;
3390 wnd_sum = first_bd_sz;
3392 /* Calculate the first sum - it's special */
3393 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3395 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3397 /* If there was data on linear skb data - check it */
3398 if (first_bd_sz > 0) {
3399 if (unlikely(wnd_sum < lso_mss)) {
3404 wnd_sum -= first_bd_sz;
3407 /* Others are easier: run through the frag list and
3408 check all windows */
3409 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3411 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3413 if (unlikely(wnd_sum < lso_mss)) {
3418 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3421 /* in non-LSO too fragmented packet should always
3428 if (unlikely(to_copy))
3429 DP(NETIF_MSG_TX_QUEUED,
3430 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3431 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3432 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3438 static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3441 struct ipv6hdr *ipv6;
3443 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3444 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3445 ETH_TX_PARSE_BD_E2_LSO_MSS;
3447 if (xmit_type & XMIT_GSO_ENC_V6)
3448 ipv6 = inner_ipv6_hdr(skb);
3449 else if (xmit_type & XMIT_GSO_V6)
3450 ipv6 = ipv6_hdr(skb);
3454 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3455 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3459 * bnx2x_set_pbd_gso - update PBD in GSO case.
3463 * @xmit_type: xmit flags
3465 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3466 struct eth_tx_parse_bd_e1x *pbd,
3467 struct eth_tx_start_bd *tx_start_bd,
3470 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3471 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3472 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3474 if (xmit_type & XMIT_GSO_V4) {
3475 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3476 pbd->tcp_pseudo_csum =
3477 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3479 0, IPPROTO_TCP, 0));
3481 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3482 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3484 pbd->tcp_pseudo_csum =
3485 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3486 &ipv6_hdr(skb)->daddr,
3487 0, IPPROTO_TCP, 0));
3491 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3495 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3497 * @bp: driver handle
3499 * @parsing_data: data to be updated
3500 * @xmit_type: xmit flags
3502 * 57712/578xx related, when skb has encapsulation
3504 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3505 u32 *parsing_data, u32 xmit_type)
3508 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3509 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3510 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3512 if (xmit_type & XMIT_CSUM_TCP) {
3513 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3514 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3515 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3517 return skb_inner_transport_header(skb) +
3518 inner_tcp_hdrlen(skb) - skb->data;
3521 /* We support checksum offload for TCP and UDP only.
3522 * No need to pass the UDP header length - it's a constant.
3524 return skb_inner_transport_header(skb) +
3525 sizeof(struct udphdr) - skb->data;
3529 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3531 * @bp: driver handle
3533 * @parsing_data: data to be updated
3534 * @xmit_type: xmit flags
3536 * 57712/578xx related
3538 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3539 u32 *parsing_data, u32 xmit_type)
3542 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3543 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3544 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3546 if (xmit_type & XMIT_CSUM_TCP) {
3547 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3548 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3549 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3551 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3553 /* We support checksum offload for TCP and UDP only.
3554 * No need to pass the UDP header length - it's a constant.
3556 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3559 /* set FW indication according to inner or outer protocols if tunneled */
3560 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3561 struct eth_tx_start_bd *tx_start_bd,
3564 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3566 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3567 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3569 if (!(xmit_type & XMIT_CSUM_TCP))
3570 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3574 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3576 * @bp: driver handle
3578 * @pbd: parse BD to be updated
3579 * @xmit_type: xmit flags
3581 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3582 struct eth_tx_parse_bd_e1x *pbd,
3585 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3587 /* for now NS flag is not used in Linux */
3590 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3591 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3593 pbd->ip_hlen_w = (skb_transport_header(skb) -
3594 skb_network_header(skb)) >> 1;
3596 hlen += pbd->ip_hlen_w;
3598 /* We support checksum offload for TCP and UDP only */
3599 if (xmit_type & XMIT_CSUM_TCP)
3600 hlen += tcp_hdrlen(skb) / 2;
3602 hlen += sizeof(struct udphdr) / 2;
3604 pbd->total_hlen_w = cpu_to_le16(hlen);
3607 if (xmit_type & XMIT_CSUM_TCP) {
3608 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3611 s8 fix = SKB_CS_OFF(skb); /* signed! */
3613 DP(NETIF_MSG_TX_QUEUED,
3614 "hlen %d fix %d csum before fix %x\n",
3615 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3617 /* HW bug: fixup the CSUM */
3618 pbd->tcp_pseudo_csum =
3619 bnx2x_csum_fix(skb_transport_header(skb),
3622 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3623 pbd->tcp_pseudo_csum);
3629 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3630 struct eth_tx_parse_bd_e2 *pbd_e2,
3631 struct eth_tx_parse_2nd_bd *pbd2,
3636 u8 outerip_off, outerip_len = 0;
3638 /* from outer IP to transport */
3639 hlen_w = (skb_inner_transport_header(skb) -
3640 skb_network_header(skb)) >> 1;
3643 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3645 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3647 /* outer IP header info */
3648 if (xmit_type & XMIT_CSUM_V4) {
3649 struct iphdr *iph = ip_hdr(skb);
3650 u32 csum = (__force u32)(~iph->check) -
3651 (__force u32)iph->tot_len -
3652 (__force u32)iph->frag_off;
3654 pbd2->fw_ip_csum_wo_len_flags_frag =
3655 bswab16(csum_fold((__force __wsum)csum));
3657 pbd2->fw_ip_hdr_to_payload_w =
3658 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3661 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3663 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3665 if (xmit_type & XMIT_GSO_V4) {
3666 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3668 pbd_e2->data.tunnel_data.pseudo_csum =
3669 bswab16(~csum_tcpudp_magic(
3670 inner_ip_hdr(skb)->saddr,
3671 inner_ip_hdr(skb)->daddr,
3672 0, IPPROTO_TCP, 0));
3674 outerip_len = ip_hdr(skb)->ihl << 1;
3676 pbd_e2->data.tunnel_data.pseudo_csum =
3677 bswab16(~csum_ipv6_magic(
3678 &inner_ipv6_hdr(skb)->saddr,
3679 &inner_ipv6_hdr(skb)->daddr,
3680 0, IPPROTO_TCP, 0));
3683 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3687 (!!(xmit_type & XMIT_CSUM_V6) <<
3688 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3690 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3691 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3692 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3694 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3695 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3696 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3700 /* called with netif_tx_lock
3701 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3702 * netif_wake_queue()
3704 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3706 struct bnx2x *bp = netdev_priv(dev);
3708 struct netdev_queue *txq;
3709 struct bnx2x_fp_txdata *txdata;
3710 struct sw_tx_bd *tx_buf;
3711 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3712 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3713 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3714 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3715 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3716 u32 pbd_e2_parsing_data = 0;
3717 u16 pkt_prod, bd_prod;
3720 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3723 __le16 pkt_size = 0;
3725 u8 mac_type = UNICAST_ADDRESS;
3727 #ifdef BNX2X_STOP_ON_ERROR
3728 if (unlikely(bp->panic))
3729 return NETDEV_TX_BUSY;
3732 txq_index = skb_get_queue_mapping(skb);
3733 txq = netdev_get_tx_queue(dev, txq_index);
3735 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3737 txdata = &bp->bnx2x_txq[txq_index];
3739 /* enable this debug print to view the transmission queue being used
3740 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3741 txq_index, fp_index, txdata_index); */
3743 /* enable this debug print to view the transmission details
3744 DP(NETIF_MSG_TX_QUEUED,
3745 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3746 txdata->cid, fp_index, txdata_index, txdata, fp); */
3748 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3749 skb_shinfo(skb)->nr_frags +
3751 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3752 /* Handle special storage cases separately */
3753 if (txdata->tx_ring_size == 0) {
3754 struct bnx2x_eth_q_stats *q_stats =
3755 bnx2x_fp_qstats(bp, txdata->parent_fp);
3756 q_stats->driver_filtered_tx_pkt++;
3758 return NETDEV_TX_OK;
3760 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3761 netif_tx_stop_queue(txq);
3762 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3764 return NETDEV_TX_BUSY;
3767 DP(NETIF_MSG_TX_QUEUED,
3768 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3769 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3770 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3773 eth = (struct ethhdr *)skb->data;
3775 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3776 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3777 if (is_broadcast_ether_addr(eth->h_dest))
3778 mac_type = BROADCAST_ADDRESS;
3780 mac_type = MULTICAST_ADDRESS;
3783 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3784 /* First, check if we need to linearize the skb (due to FW
3785 restrictions). No need to check fragmentation if page size > 8K
3786 (there will be no violation to FW restrictions) */
3787 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3788 /* Statistics of linearization */
3790 if (skb_linearize(skb) != 0) {
3791 DP(NETIF_MSG_TX_QUEUED,
3792 "SKB linearization failed - silently dropping this SKB\n");
3793 dev_kfree_skb_any(skb);
3794 return NETDEV_TX_OK;
3798 /* Map skb linear data for DMA */
3799 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3800 skb_headlen(skb), DMA_TO_DEVICE);
3801 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3802 DP(NETIF_MSG_TX_QUEUED,
3803 "SKB mapping failed - silently dropping this SKB\n");
3804 dev_kfree_skb_any(skb);
3805 return NETDEV_TX_OK;
3808 Please read carefully. First we use one BD which we mark as start,
3809 then we have a parsing info BD (used for TSO or xsum),
3810 and only then we have the rest of the TSO BDs.
3811 (don't forget to mark the last one as last,
3812 and to unmap only AFTER you write to the BD ...)
3813 And above all, all pdb sizes are in words - NOT DWORDS!
3816 /* get current pkt produced now - advance it just before sending packet
3817 * since mapping of pages may fail and cause packet to be dropped
3819 pkt_prod = txdata->tx_pkt_prod;
3820 bd_prod = TX_BD(txdata->tx_bd_prod);
3822 /* get a tx_buf and first BD
3823 * tx_start_bd may be changed during SPLIT,
3824 * but first_bd will always stay first
3826 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3827 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3828 first_bd = tx_start_bd;
3830 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3832 /* header nbd: indirectly zero other flags! */
3833 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3835 /* remember the first BD of the packet */
3836 tx_buf->first_bd = txdata->tx_bd_prod;
3840 DP(NETIF_MSG_TX_QUEUED,
3841 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3842 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3844 if (vlan_tx_tag_present(skb)) {
3845 tx_start_bd->vlan_or_ethertype =
3846 cpu_to_le16(vlan_tx_tag_get(skb));
3847 tx_start_bd->bd_flags.as_bitfield |=
3848 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3850 /* when transmitting in a vf, start bd must hold the ethertype
3851 * for fw to enforce it
3854 tx_start_bd->vlan_or_ethertype =
3855 cpu_to_le16(ntohs(eth->h_proto));
3857 /* used by FW for packet accounting */
3858 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3861 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3863 /* turn on parsing and get a BD */
3864 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3866 if (xmit_type & XMIT_CSUM)
3867 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3869 if (!CHIP_IS_E1x(bp)) {
3870 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3871 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3873 if (xmit_type & XMIT_CSUM_ENC) {
3874 u16 global_data = 0;
3876 /* Set PBD in enc checksum offload case */
3877 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3878 &pbd_e2_parsing_data,
3881 /* turn on 2nd parsing and get a BD */
3882 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3884 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3886 memset(pbd2, 0, sizeof(*pbd2));
3888 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3889 (skb_inner_network_header(skb) -
3892 if (xmit_type & XMIT_GSO_ENC)
3893 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3897 pbd2->global_data = cpu_to_le16(global_data);
3899 /* add addition parse BD indication to start BD */
3900 SET_FLAG(tx_start_bd->general_data,
3901 ETH_TX_START_BD_PARSE_NBDS, 1);
3902 /* set encapsulation flag in start BD */
3903 SET_FLAG(tx_start_bd->general_data,
3904 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3906 } else if (xmit_type & XMIT_CSUM) {
3907 /* Set PBD in checksum offload case w/o encapsulation */
3908 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3909 &pbd_e2_parsing_data,
3913 /* Add the macs to the parsing BD if this is a vf or if
3914 * Tx Switching is enabled.
3917 /* override GRE parameters in BD */
3918 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3919 &pbd_e2->data.mac_addr.src_mid,
3920 &pbd_e2->data.mac_addr.src_lo,
3923 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3924 &pbd_e2->data.mac_addr.dst_mid,
3925 &pbd_e2->data.mac_addr.dst_lo,
3927 } else if (bp->flags & TX_SWITCHING) {
3928 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3929 &pbd_e2->data.mac_addr.dst_mid,
3930 &pbd_e2->data.mac_addr.dst_lo,
3934 SET_FLAG(pbd_e2_parsing_data,
3935 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3937 u16 global_data = 0;
3938 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3939 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3940 /* Set PBD in checksum offload case */
3941 if (xmit_type & XMIT_CSUM)
3942 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3944 SET_FLAG(global_data,
3945 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3946 pbd_e1x->global_data |= cpu_to_le16(global_data);
3949 /* Setup the data pointer of the first BD of the packet */
3950 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3951 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3952 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3953 pkt_size = tx_start_bd->nbytes;
3955 DP(NETIF_MSG_TX_QUEUED,
3956 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
3957 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3958 le16_to_cpu(tx_start_bd->nbytes),
3959 tx_start_bd->bd_flags.as_bitfield,
3960 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3962 if (xmit_type & XMIT_GSO) {
3964 DP(NETIF_MSG_TX_QUEUED,
3965 "TSO packet len %d hlen %d total len %d tso size %d\n",
3966 skb->len, hlen, skb_headlen(skb),
3967 skb_shinfo(skb)->gso_size);
3969 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3971 if (unlikely(skb_headlen(skb) > hlen)) {
3973 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3977 if (!CHIP_IS_E1x(bp))
3978 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3981 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
3984 /* Set the PBD's parsing_data field if not zero
3985 * (for the chips newer than 57711).
3987 if (pbd_e2_parsing_data)
3988 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3990 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3992 /* Handle fragmented skb */
3993 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3994 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3996 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3997 skb_frag_size(frag), DMA_TO_DEVICE);
3998 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3999 unsigned int pkts_compl = 0, bytes_compl = 0;
4001 DP(NETIF_MSG_TX_QUEUED,
4002 "Unable to map page - dropping packet...\n");
4004 /* we need unmap all buffers already mapped
4006 * first_bd->nbd need to be properly updated
4007 * before call to bnx2x_free_tx_pkt
4009 first_bd->nbd = cpu_to_le16(nbd);
4010 bnx2x_free_tx_pkt(bp, txdata,
4011 TX_BD(txdata->tx_pkt_prod),
4012 &pkts_compl, &bytes_compl);
4013 return NETDEV_TX_OK;
4016 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4017 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4018 if (total_pkt_bd == NULL)
4019 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4021 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4022 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4023 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4024 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4027 DP(NETIF_MSG_TX_QUEUED,
4028 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4029 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4030 le16_to_cpu(tx_data_bd->nbytes));
4033 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4035 /* update with actual num BDs */
4036 first_bd->nbd = cpu_to_le16(nbd);
4038 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4040 /* now send a tx doorbell, counting the next BD
4041 * if the packet contains or ends with it
4043 if (TX_BD_POFF(bd_prod) < nbd)
4046 /* total_pkt_bytes should be set on the first data BD if
4047 * it's not an LSO packet and there is more than one
4048 * data BD. In this case pkt_size is limited by an MTU value.
4049 * However we prefer to set it for an LSO packet (while we don't
4050 * have to) in order to save some CPU cycles in a none-LSO
4051 * case, when we much more care about them.
4053 if (total_pkt_bd != NULL)
4054 total_pkt_bd->total_pkt_bytes = pkt_size;
4057 DP(NETIF_MSG_TX_QUEUED,
4058 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4059 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4060 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4061 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4062 le16_to_cpu(pbd_e1x->total_hlen_w));
4064 DP(NETIF_MSG_TX_QUEUED,
4065 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4067 pbd_e2->data.mac_addr.dst_hi,
4068 pbd_e2->data.mac_addr.dst_mid,
4069 pbd_e2->data.mac_addr.dst_lo,
4070 pbd_e2->data.mac_addr.src_hi,
4071 pbd_e2->data.mac_addr.src_mid,
4072 pbd_e2->data.mac_addr.src_lo,
4073 pbd_e2->parsing_data);
4074 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4076 netdev_tx_sent_queue(txq, skb->len);
4078 skb_tx_timestamp(skb);
4080 txdata->tx_pkt_prod++;
4082 * Make sure that the BD data is updated before updating the producer
4083 * since FW might read the BD right after the producer is updated.
4084 * This is only applicable for weak-ordered memory model archs such
4085 * as IA-64. The following barrier is also mandatory since FW will
4086 * assumes packets must have BDs.
4090 txdata->tx_db.data.prod += nbd;
4093 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4097 txdata->tx_bd_prod += nbd;
4099 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4100 netif_tx_stop_queue(txq);
4102 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4103 * ordering of set_bit() in netif_tx_stop_queue() and read of
4107 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4108 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4109 netif_tx_wake_queue(txq);
4113 return NETDEV_TX_OK;
4117 * bnx2x_setup_tc - routine to configure net_device for multi tc
4119 * @netdev: net device to configure
4120 * @tc: number of traffic classes to enable
4122 * callback connected to the ndo_setup_tc function pointer
4124 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4126 int cos, prio, count, offset;
4127 struct bnx2x *bp = netdev_priv(dev);
4129 /* setup tc must be called under rtnl lock */
4132 /* no traffic classes requested. Aborting */
4134 netdev_reset_tc(dev);
4138 /* requested to support too many traffic classes */
4139 if (num_tc > bp->max_cos) {
4140 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4141 num_tc, bp->max_cos);
4145 /* declare amount of supported traffic classes */
4146 if (netdev_set_num_tc(dev, num_tc)) {
4147 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4151 /* configure priority to traffic class mapping */
4152 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4153 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4154 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4155 "mapping priority %d to tc %d\n",
4156 prio, bp->prio_to_cos[prio]);
4159 /* Use this configuration to differentiate tc0 from other COSes
4160 This can be used for ets or pfc, and save the effort of setting
4161 up a multio class queue disc or negotiating DCBX with a switch
4162 netdev_set_prio_tc_map(dev, 0, 0);
4163 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4164 for (prio = 1; prio < 16; prio++) {
4165 netdev_set_prio_tc_map(dev, prio, 1);
4166 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4169 /* configure traffic class to transmission queue mapping */
4170 for (cos = 0; cos < bp->max_cos; cos++) {
4171 count = BNX2X_NUM_ETH_QUEUES(bp);
4172 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4173 netdev_set_tc_queue(dev, cos, count, offset);
4174 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4175 "mapping tc %d to offset %d count %d\n",
4176 cos, offset, count);
4182 /* called with rtnl_lock */
4183 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4185 struct sockaddr *addr = p;
4186 struct bnx2x *bp = netdev_priv(dev);
4189 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4190 BNX2X_ERR("Requested MAC address is not valid\n");
4194 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4195 !is_zero_ether_addr(addr->sa_data)) {
4196 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
4200 if (netif_running(dev)) {
4201 rc = bnx2x_set_eth_mac(bp, false);
4206 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4208 if (netif_running(dev))
4209 rc = bnx2x_set_eth_mac(bp, true);
4214 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4216 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4217 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4222 if (IS_FCOE_IDX(fp_index)) {
4223 memset(sb, 0, sizeof(union host_hc_status_block));
4224 fp->status_blk_mapping = 0;
4227 if (!CHIP_IS_E1x(bp))
4228 BNX2X_PCI_FREE(sb->e2_sb,
4229 bnx2x_fp(bp, fp_index,
4230 status_blk_mapping),
4231 sizeof(struct host_hc_status_block_e2));
4233 BNX2X_PCI_FREE(sb->e1x_sb,
4234 bnx2x_fp(bp, fp_index,
4235 status_blk_mapping),
4236 sizeof(struct host_hc_status_block_e1x));
4240 if (!skip_rx_queue(bp, fp_index)) {
4241 bnx2x_free_rx_bds(fp);
4243 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4244 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4245 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4246 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4247 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4249 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4250 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4251 sizeof(struct eth_fast_path_rx_cqe) *
4255 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4256 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4257 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4258 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4262 if (!skip_tx_queue(bp, fp_index)) {
4263 /* fastpath tx rings: tx_buf tx_desc */
4264 for_each_cos_in_tx_queue(fp, cos) {
4265 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4267 DP(NETIF_MSG_IFDOWN,
4268 "freeing tx memory of fp %d cos %d cid %d\n",
4269 fp_index, cos, txdata->cid);
4271 BNX2X_FREE(txdata->tx_buf_ring);
4272 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4273 txdata->tx_desc_mapping,
4274 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4277 /* end of fastpath */
4280 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4283 for_each_cnic_queue(bp, i)
4284 bnx2x_free_fp_mem_at(bp, i);
4287 void bnx2x_free_fp_mem(struct bnx2x *bp)
4290 for_each_eth_queue(bp, i)
4291 bnx2x_free_fp_mem_at(bp, i);
4294 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4296 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4297 if (!CHIP_IS_E1x(bp)) {
4298 bnx2x_fp(bp, index, sb_index_values) =
4299 (__le16 *)status_blk.e2_sb->sb.index_values;
4300 bnx2x_fp(bp, index, sb_running_index) =
4301 (__le16 *)status_blk.e2_sb->sb.running_index;
4303 bnx2x_fp(bp, index, sb_index_values) =
4304 (__le16 *)status_blk.e1x_sb->sb.index_values;
4305 bnx2x_fp(bp, index, sb_running_index) =
4306 (__le16 *)status_blk.e1x_sb->sb.running_index;
4310 /* Returns the number of actually allocated BDs */
4311 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4314 struct bnx2x *bp = fp->bp;
4315 u16 ring_prod, cqe_ring_prod;
4316 int i, failure_cnt = 0;
4318 fp->rx_comp_cons = 0;
4319 cqe_ring_prod = ring_prod = 0;
4321 /* This routine is called only during fo init so
4322 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4324 for (i = 0; i < rx_ring_size; i++) {
4325 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4329 ring_prod = NEXT_RX_IDX(ring_prod);
4330 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4331 WARN_ON(ring_prod <= (i - failure_cnt));
4335 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4336 i - failure_cnt, fp->index);
4338 fp->rx_bd_prod = ring_prod;
4339 /* Limit the CQE producer by the CQE ring size */
4340 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4342 fp->rx_pkt = fp->rx_calls = 0;
4344 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4346 return i - failure_cnt;
4349 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4353 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4354 struct eth_rx_cqe_next_page *nextpg;
4356 nextpg = (struct eth_rx_cqe_next_page *)
4357 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4359 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4360 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4362 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4363 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4367 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4369 union host_hc_status_block *sb;
4370 struct bnx2x_fastpath *fp = &bp->fp[index];
4373 int rx_ring_size = 0;
4375 if (!bp->rx_ring_size &&
4376 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4377 rx_ring_size = MIN_RX_SIZE_NONTPA;
4378 bp->rx_ring_size = rx_ring_size;
4379 } else if (!bp->rx_ring_size) {
4380 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4382 if (CHIP_IS_E3(bp)) {
4383 u32 cfg = SHMEM_RD(bp,
4384 dev_info.port_hw_config[BP_PORT(bp)].
4387 /* Decrease ring size for 1G functions */
4388 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4389 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4393 /* allocate at least number of buffers required by FW */
4394 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4395 MIN_RX_SIZE_TPA, rx_ring_size);
4397 bp->rx_ring_size = rx_ring_size;
4398 } else /* if rx_ring_size specified - use it */
4399 rx_ring_size = bp->rx_ring_size;
4401 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4404 sb = &bnx2x_fp(bp, index, status_blk);
4406 if (!IS_FCOE_IDX(index)) {
4408 if (!CHIP_IS_E1x(bp)) {
4409 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4410 sizeof(struct host_hc_status_block_e2));
4414 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4415 sizeof(struct host_hc_status_block_e1x));
4421 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4422 * set shortcuts for it.
4424 if (!IS_FCOE_IDX(index))
4425 set_sb_shortcuts(bp, index);
4428 if (!skip_tx_queue(bp, index)) {
4429 /* fastpath tx rings: tx_buf tx_desc */
4430 for_each_cos_in_tx_queue(fp, cos) {
4431 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4434 "allocating tx memory of fp %d cos %d\n",
4437 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4438 sizeof(struct sw_tx_bd),
4440 if (!txdata->tx_buf_ring)
4442 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4443 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4444 if (!txdata->tx_desc_ring)
4450 if (!skip_rx_queue(bp, index)) {
4451 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4452 bnx2x_fp(bp, index, rx_buf_ring) =
4453 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4454 if (!bnx2x_fp(bp, index, rx_buf_ring))
4456 bnx2x_fp(bp, index, rx_desc_ring) =
4457 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4458 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4459 if (!bnx2x_fp(bp, index, rx_desc_ring))
4462 /* Seed all CQEs by 1s */
4463 bnx2x_fp(bp, index, rx_comp_ring) =
4464 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4465 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4466 if (!bnx2x_fp(bp, index, rx_comp_ring))
4470 bnx2x_fp(bp, index, rx_page_ring) =
4471 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4473 if (!bnx2x_fp(bp, index, rx_page_ring))
4475 bnx2x_fp(bp, index, rx_sge_ring) =
4476 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4477 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4478 if (!bnx2x_fp(bp, index, rx_sge_ring))
4481 bnx2x_set_next_page_rx_bd(fp);
4484 bnx2x_set_next_page_rx_cq(fp);
4487 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4488 if (ring_size < rx_ring_size)
4494 /* handles low memory cases */
4496 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4498 /* FW will drop all packets if queue is not big enough,
4499 * In these cases we disable the queue
4500 * Min size is different for OOO, TPA and non-TPA queues
4502 if (ring_size < (fp->disable_tpa ?
4503 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4504 /* release memory allocated for this queue */
4505 bnx2x_free_fp_mem_at(bp, index);
4511 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4515 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4516 /* we will fail load process instead of mark
4524 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4528 /* 1. Allocate FP for leading - fatal if error
4529 * 2. Allocate RSS - fix number of queues if error
4533 if (bnx2x_alloc_fp_mem_at(bp, 0))
4537 for_each_nondefault_eth_queue(bp, i)
4538 if (bnx2x_alloc_fp_mem_at(bp, i))
4541 /* handle memory failures */
4542 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4543 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4546 bnx2x_shrink_eth_fp(bp, delta);
4547 if (CNIC_SUPPORT(bp))
4548 /* move non eth FPs next to last eth FP
4549 * must be done in that order
4550 * FCOE_IDX < FWD_IDX < OOO_IDX
4553 /* move FCoE fp even NO_FCOE_FLAG is on */
4554 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4555 bp->num_ethernet_queues -= delta;
4556 bp->num_queues = bp->num_ethernet_queues +
4557 bp->num_cnic_queues;
4558 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4559 bp->num_queues + delta, bp->num_queues);
4565 void bnx2x_free_mem_bp(struct bnx2x *bp)
4569 for (i = 0; i < bp->fp_array_size; i++)
4570 kfree(bp->fp[i].tpa_info);
4573 kfree(bp->fp_stats);
4574 kfree(bp->bnx2x_txq);
4575 kfree(bp->msix_table);
4579 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4581 struct bnx2x_fastpath *fp;
4582 struct msix_entry *tbl;
4583 struct bnx2x_ilt *ilt;
4584 int msix_table_size = 0;
4585 int fp_array_size, txq_array_size;
4589 * The biggest MSI-X table we might need is as a maximum number of fast
4590 * path IGU SBs plus default SB (for PF only).
4592 msix_table_size = bp->igu_sb_cnt;
4595 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4597 /* fp array: RSS plus CNIC related L2 queues */
4598 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4599 bp->fp_array_size = fp_array_size;
4600 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4602 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4605 for (i = 0; i < bp->fp_array_size; i++) {
4607 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4608 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4609 if (!(fp[i].tpa_info))
4615 /* allocate sp objs */
4616 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4621 /* allocate fp_stats */
4622 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4627 /* Allocate memory for the transmission queues array */
4629 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4630 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4632 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4638 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4641 bp->msix_table = tbl;
4644 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4651 bnx2x_free_mem_bp(bp);
4655 int bnx2x_reload_if_running(struct net_device *dev)
4657 struct bnx2x *bp = netdev_priv(dev);
4659 if (unlikely(!netif_running(dev)))
4662 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4663 return bnx2x_nic_load(bp, LOAD_NORMAL);
4666 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4668 u32 sel_phy_idx = 0;
4669 if (bp->link_params.num_phys <= 1)
4672 if (bp->link_vars.link_up) {
4673 sel_phy_idx = EXT_PHY1;
4674 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4675 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4676 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4677 sel_phy_idx = EXT_PHY2;
4680 switch (bnx2x_phy_selection(&bp->link_params)) {
4681 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4682 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4683 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4684 sel_phy_idx = EXT_PHY1;
4686 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4687 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4688 sel_phy_idx = EXT_PHY2;
4695 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4697 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4699 * The selected activated PHY is always after swapping (in case PHY
4700 * swapping is enabled). So when swapping is enabled, we need to reverse
4704 if (bp->link_params.multi_phy_config &
4705 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4706 if (sel_phy_idx == EXT_PHY1)
4707 sel_phy_idx = EXT_PHY2;
4708 else if (sel_phy_idx == EXT_PHY2)
4709 sel_phy_idx = EXT_PHY1;
4711 return LINK_CONFIG_IDX(sel_phy_idx);
4714 #ifdef NETDEV_FCOE_WWNN
4715 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4717 struct bnx2x *bp = netdev_priv(dev);
4718 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4721 case NETDEV_FCOE_WWNN:
4722 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4723 cp->fcoe_wwn_node_name_lo);
4725 case NETDEV_FCOE_WWPN:
4726 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4727 cp->fcoe_wwn_port_name_lo);
4730 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4738 /* called with rtnl_lock */
4739 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4741 struct bnx2x *bp = netdev_priv(dev);
4743 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4744 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4748 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4749 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4750 BNX2X_ERR("Can't support requested MTU size\n");
4754 /* This does not race with packet allocation
4755 * because the actual alloc size is
4756 * only updated as part of load
4760 return bnx2x_reload_if_running(dev);
4763 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4764 netdev_features_t features)
4766 struct bnx2x *bp = netdev_priv(dev);
4768 /* TPA requires Rx CSUM offloading */
4769 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4770 features &= ~NETIF_F_LRO;
4771 features &= ~NETIF_F_GRO;
4777 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4779 struct bnx2x *bp = netdev_priv(dev);
4780 u32 flags = bp->flags;
4782 bool bnx2x_reload = false;
4784 if (features & NETIF_F_LRO)
4785 flags |= TPA_ENABLE_FLAG;
4787 flags &= ~TPA_ENABLE_FLAG;
4789 if (features & NETIF_F_GRO)
4790 flags |= GRO_ENABLE_FLAG;
4792 flags &= ~GRO_ENABLE_FLAG;
4794 if (features & NETIF_F_LOOPBACK) {
4795 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4796 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4797 bnx2x_reload = true;
4800 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4801 bp->link_params.loopback_mode = LOOPBACK_NONE;
4802 bnx2x_reload = true;
4806 changes = flags ^ bp->flags;
4808 /* if GRO is changed while LRO is enabled, don't force a reload */
4809 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4810 changes &= ~GRO_ENABLE_FLAG;
4813 bnx2x_reload = true;
4818 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4819 return bnx2x_reload_if_running(dev);
4820 /* else: bnx2x_nic_load() will be called at end of recovery */
4826 void bnx2x_tx_timeout(struct net_device *dev)
4828 struct bnx2x *bp = netdev_priv(dev);
4830 #ifdef BNX2X_STOP_ON_ERROR
4835 /* This allows the netif to be shutdown gracefully before resetting */
4836 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4839 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4841 struct net_device *dev = pci_get_drvdata(pdev);
4845 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4848 bp = netdev_priv(dev);
4852 pci_save_state(pdev);
4854 if (!netif_running(dev)) {
4859 netif_device_detach(dev);
4861 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4863 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4870 int bnx2x_resume(struct pci_dev *pdev)
4872 struct net_device *dev = pci_get_drvdata(pdev);
4877 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4880 bp = netdev_priv(dev);
4882 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4883 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4889 pci_restore_state(pdev);
4891 if (!netif_running(dev)) {
4896 bnx2x_set_power_state(bp, PCI_D0);
4897 netif_device_attach(dev);
4899 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4906 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4910 BNX2X_ERR("bad context pointer %p\n", cxt);
4914 /* ustorm cxt validation */
4915 cxt->ustorm_ag_context.cdu_usage =
4916 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4917 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4918 /* xcontext validation */
4919 cxt->xstorm_ag_context.cdu_reserved =
4920 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4921 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4924 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4925 u8 fw_sb_id, u8 sb_index,
4928 u32 addr = BAR_CSTRORM_INTMEM +
4929 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4930 REG_WR8(bp, addr, ticks);
4932 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4933 port, fw_sb_id, sb_index, ticks);
4936 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4937 u16 fw_sb_id, u8 sb_index,
4940 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4941 u32 addr = BAR_CSTRORM_INTMEM +
4942 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4943 u8 flags = REG_RD8(bp, addr);
4945 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4946 flags |= enable_flag;
4947 REG_WR8(bp, addr, flags);
4949 "port %x fw_sb_id %d sb_index %d disable %d\n",
4950 port, fw_sb_id, sb_index, disable);
4953 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4954 u8 sb_index, u8 disable, u16 usec)
4956 int port = BP_PORT(bp);
4957 u8 ticks = usec / BNX2X_BTR;
4959 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4961 disable = disable ? 1 : (usec ? 0 : 1);
4962 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4965 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
4968 smp_mb__before_atomic();
4969 set_bit(flag, &bp->sp_rtnl_state);
4970 smp_mb__after_atomic();
4971 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
4973 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4975 EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);