1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2012 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/firmware.h>
27 #include <linux/prefetch.h>
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_init.h"
35 * bnx2x_move_fp - move content of the fastpath structure.
38 * @from: source FP index
39 * @to: destination FP index
41 * Makes sure the contents of the bp->fp[to].napi is kept
42 * intact. This is done by first copying the napi struct from
43 * the target to the source, and then mem copying the entire
44 * source onto the target
46 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
51 /* Copy the NAPI object as it has been already initialized */
52 from_fp->napi = to_fp->napi;
54 /* Move bnx2x_fastpath contents */
55 memcpy(to_fp, from_fp, sizeof(*to_fp));
59 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
61 /* free skb in the packet ring at pos idx
62 * return idx of last bd freed
64 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
65 u16 idx, unsigned int *pkts_compl,
66 unsigned int *bytes_compl)
68 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
69 struct eth_tx_start_bd *tx_start_bd;
70 struct eth_tx_bd *tx_data_bd;
71 struct sk_buff *skb = tx_buf->skb;
72 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
75 /* prefetch skb end pointer to speedup dev_kfree_skb() */
78 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
79 txdata->txq_index, idx, tx_buf, skb);
82 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
83 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
84 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
85 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
88 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
89 #ifdef BNX2X_STOP_ON_ERROR
90 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
91 BNX2X_ERR("BAD nbd!\n");
95 new_cons = nbd + tx_buf->first_bd;
98 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
100 /* Skip a parse bd... */
102 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
104 /* ...and the TSO split header bd since they have no mapping */
105 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
107 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
113 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
114 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
115 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
116 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
118 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
125 (*bytes_compl) += skb->len;
127 dev_kfree_skb_any(skb);
128 tx_buf->first_bd = 0;
134 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
136 struct netdev_queue *txq;
137 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
138 unsigned int pkts_compl = 0, bytes_compl = 0;
140 #ifdef BNX2X_STOP_ON_ERROR
141 if (unlikely(bp->panic))
145 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
146 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
147 sw_cons = txdata->tx_pkt_cons;
149 while (sw_cons != hw_cons) {
152 pkt_cons = TX_BD(sw_cons);
154 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
156 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
158 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
159 &pkts_compl, &bytes_compl);
164 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
166 txdata->tx_pkt_cons = sw_cons;
167 txdata->tx_bd_cons = bd_cons;
169 /* Need to make the tx_bd_cons update visible to start_xmit()
170 * before checking for netif_tx_queue_stopped(). Without the
171 * memory barrier, there is a small possibility that
172 * start_xmit() will miss it and cause the queue to be stopped
174 * On the other hand we need an rmb() here to ensure the proper
175 * ordering of bit testing in the following
176 * netif_tx_queue_stopped(txq) call.
180 if (unlikely(netif_tx_queue_stopped(txq))) {
181 /* Taking tx_lock() is needed to prevent reenabling the queue
182 * while it's empty. This could have happen if rx_action() gets
183 * suspended in bnx2x_tx_int() after the condition before
184 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
186 * stops the queue->sees fresh tx_bd_cons->releases the queue->
187 * sends some packets consuming the whole queue again->
191 __netif_tx_lock(txq, smp_processor_id());
193 if ((netif_tx_queue_stopped(txq)) &&
194 (bp->state == BNX2X_STATE_OPEN) &&
195 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
196 netif_tx_wake_queue(txq);
198 __netif_tx_unlock(txq);
203 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
206 u16 last_max = fp->last_max_sge;
208 if (SUB_S16(idx, last_max) > 0)
209 fp->last_max_sge = idx;
212 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
214 struct eth_end_agg_rx_cqe *cqe)
216 struct bnx2x *bp = fp->bp;
217 u16 last_max, last_elem, first_elem;
224 /* First mark all used pages */
225 for (i = 0; i < sge_len; i++)
226 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
227 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
229 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
230 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
232 /* Here we assume that the last SGE index is the biggest */
233 prefetch((void *)(fp->sge_mask));
234 bnx2x_update_last_max_sge(fp,
235 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
237 last_max = RX_SGE(fp->last_max_sge);
238 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
239 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
241 /* If ring is not full */
242 if (last_elem + 1 != first_elem)
245 /* Now update the prod */
246 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
247 if (likely(fp->sge_mask[i]))
250 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
251 delta += BIT_VEC64_ELEM_SZ;
255 fp->rx_sge_prod += delta;
256 /* clear page-end entries */
257 bnx2x_clear_sge_mask_next_elems(fp);
260 DP(NETIF_MSG_RX_STATUS,
261 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
262 fp->last_max_sge, fp->rx_sge_prod);
265 /* Set Toeplitz hash value in the skb using the value from the
266 * CQE (calculated by HW).
268 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
269 const struct eth_fast_path_rx_cqe *cqe)
271 /* Set Toeplitz hash from CQE */
272 if ((bp->dev->features & NETIF_F_RXHASH) &&
273 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
274 return le32_to_cpu(cqe->rss_hash_result);
278 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
280 struct eth_fast_path_rx_cqe *cqe)
282 struct bnx2x *bp = fp->bp;
283 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
284 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
285 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
287 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
288 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
290 /* print error if current state != stop */
291 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
292 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
294 /* Try to map an empty data buffer from the aggregation info */
295 mapping = dma_map_single(&bp->pdev->dev,
296 first_buf->data + NET_SKB_PAD,
297 fp->rx_buf_size, DMA_FROM_DEVICE);
299 * ...if it fails - move the skb from the consumer to the producer
300 * and set the current aggregation state as ERROR to drop it
301 * when TPA_STOP arrives.
304 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
305 /* Move the BD from the consumer to the producer */
306 bnx2x_reuse_rx_data(fp, cons, prod);
307 tpa_info->tpa_state = BNX2X_TPA_ERROR;
311 /* move empty data from pool to prod */
312 prod_rx_buf->data = first_buf->data;
313 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
314 /* point prod_bd to new data */
315 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
316 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
318 /* move partial skb from cons to pool (don't unmap yet) */
319 *first_buf = *cons_rx_buf;
321 /* mark bin state as START */
322 tpa_info->parsing_flags =
323 le16_to_cpu(cqe->pars_flags.flags);
324 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
325 tpa_info->tpa_state = BNX2X_TPA_START;
326 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
327 tpa_info->placement_offset = cqe->placement_offset;
328 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
329 if (fp->mode == TPA_MODE_GRO) {
330 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
331 tpa_info->full_page =
332 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
334 * FW 7.2.16 BUG workaround:
335 * if SGE size is (exactly) multiple gro_size
336 * fw will place one less frag on SGE.
337 * the calculation is done only for potentially
340 if (unlikely(bp->gro_check))
341 if (!(SGE_PAGE_SIZE * PAGES_PER_SGE % gro_size))
342 tpa_info->full_page -= gro_size;
343 tpa_info->gro_size = gro_size;
346 #ifdef BNX2X_STOP_ON_ERROR
347 fp->tpa_queue_used |= (1 << queue);
348 #ifdef _ASM_GENERIC_INT_L64_H
349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
357 /* Timestamp option length allowed for TPA aggregation:
359 * nop nop kind length echo val
361 #define TPA_TSTAMP_OPT_LEN 12
363 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
366 * @parsing_flags: parsing flags from the START CQE
367 * @len_on_bd: total length of the first packet for the
370 * Approximate value of the MSS for this aggregation calculated using
371 * the first packet of it.
373 static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
377 * TPA arrgregation won't have either IP options or TCP options
378 * other than timestamp or IPv6 extension headers.
380 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
382 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
383 PRS_FLAG_OVERETH_IPV6)
384 hdrs_len += sizeof(struct ipv6hdr);
386 hdrs_len += sizeof(struct iphdr);
389 /* Check if there was a TCP timestamp, if there is it's will
390 * always be 12 bytes length: nop nop kind length echo val.
392 * Otherwise FW would close the aggregation.
394 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
395 hdrs_len += TPA_TSTAMP_OPT_LEN;
397 return len_on_bd - hdrs_len;
400 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
401 struct bnx2x_agg_info *tpa_info,
404 struct eth_end_agg_rx_cqe *cqe,
407 struct sw_rx_page *rx_pg, old_rx_pg;
408 u32 i, frag_len, frag_size;
409 int err, j, frag_id = 0;
410 u16 len_on_bd = tpa_info->len_on_bd;
411 u16 full_page = 0, gro_size = 0;
413 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
415 if (fp->mode == TPA_MODE_GRO) {
416 gro_size = tpa_info->gro_size;
417 full_page = tpa_info->full_page;
420 /* This is needed in order to enable forwarding support */
422 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
423 tpa_info->parsing_flags, len_on_bd);
426 if (fp->mode == TPA_MODE_GRO)
427 skb_shinfo(skb)->gso_type =
428 (GET_FLAG(tpa_info->parsing_flags,
429 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
430 PRS_FLAG_OVERETH_IPV6) ?
431 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
435 #ifdef BNX2X_STOP_ON_ERROR
436 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
437 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
439 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
445 /* Run through the SGL and compose the fragmented skb */
446 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
447 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
449 /* FW gives the indices of the SGE as if the ring is an array
450 (meaning that "next" element will consume 2 indices) */
451 if (fp->mode == TPA_MODE_GRO)
452 frag_len = min_t(u32, frag_size, (u32)full_page);
454 frag_len = min_t(u32, frag_size,
455 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
457 rx_pg = &fp->rx_page_ring[sge_idx];
460 /* If we fail to allocate a substitute page, we simply stop
461 where we are and drop the whole packet */
462 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
464 fp->eth_q_stats.rx_skb_alloc_failed++;
468 /* Unmap the page as we r going to pass it to the stack */
469 dma_unmap_page(&bp->pdev->dev,
470 dma_unmap_addr(&old_rx_pg, mapping),
471 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
472 /* Add one frag and update the appropriate fields in the skb */
473 if (fp->mode == TPA_MODE_LRO)
474 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
478 for (rem = frag_len; rem > 0; rem -= gro_size) {
479 int len = rem > gro_size ? gro_size : rem;
480 skb_fill_page_desc(skb, frag_id++,
481 old_rx_pg.page, offset, len);
483 get_page(old_rx_pg.page);
488 skb->data_len += frag_len;
489 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
490 skb->len += frag_len;
492 frag_size -= frag_len;
498 static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
499 struct bnx2x_agg_info *tpa_info,
501 struct eth_end_agg_rx_cqe *cqe,
504 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
505 u8 pad = tpa_info->placement_offset;
506 u16 len = tpa_info->len_on_bd;
507 struct sk_buff *skb = NULL;
508 u8 *new_data, *data = rx_buf->data;
509 u8 old_tpa_state = tpa_info->tpa_state;
511 tpa_info->tpa_state = BNX2X_TPA_STOP;
513 /* If we there was an error during the handling of the TPA_START -
514 * drop this aggregation.
516 if (old_tpa_state == BNX2X_TPA_ERROR)
519 /* Try to allocate the new data */
520 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
522 /* Unmap skb in the pool anyway, as we are going to change
523 pool entry status to BNX2X_TPA_STOP even if new skb allocation
525 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
526 fp->rx_buf_size, DMA_FROM_DEVICE);
527 if (likely(new_data))
528 skb = build_skb(data);
531 #ifdef BNX2X_STOP_ON_ERROR
532 if (pad + len > fp->rx_buf_size) {
533 BNX2X_ERR("skb_put is about to fail... "
534 "pad %d len %d rx_buf_size %d\n",
535 pad, len, fp->rx_buf_size);
541 skb_reserve(skb, pad + NET_SKB_PAD);
543 skb->rxhash = tpa_info->rxhash;
545 skb->protocol = eth_type_trans(skb, bp->dev);
546 skb->ip_summed = CHECKSUM_UNNECESSARY;
548 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
549 skb, cqe, cqe_idx)) {
550 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
551 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
552 napi_gro_receive(&fp->napi, skb);
554 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
555 " - dropping packet!\n");
556 dev_kfree_skb_any(skb);
560 /* put new data in bin */
561 rx_buf->data = new_data;
567 /* drop the packet and keep the buffer in the bin */
568 DP(NETIF_MSG_RX_STATUS,
569 "Failed to allocate or map a new skb - dropping packet!\n");
570 fp->eth_q_stats.rx_skb_alloc_failed++;
574 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
576 struct bnx2x *bp = fp->bp;
577 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
578 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
581 #ifdef BNX2X_STOP_ON_ERROR
582 if (unlikely(bp->panic))
586 /* CQ "next element" is of the size of the regular element,
587 that's why it's ok here */
588 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
589 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
592 bd_cons = fp->rx_bd_cons;
593 bd_prod = fp->rx_bd_prod;
594 bd_prod_fw = bd_prod;
595 sw_comp_cons = fp->rx_comp_cons;
596 sw_comp_prod = fp->rx_comp_prod;
598 /* Memory barrier necessary as speculative reads of the rx
599 * buffer can be ahead of the index in the status block
603 DP(NETIF_MSG_RX_STATUS,
604 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
605 fp->index, hw_comp_cons, sw_comp_cons);
607 while (sw_comp_cons != hw_comp_cons) {
608 struct sw_rx_bd *rx_buf = NULL;
610 union eth_rx_cqe *cqe;
611 struct eth_fast_path_rx_cqe *cqe_fp;
613 enum eth_rx_cqe_type cqe_fp_type;
617 #ifdef BNX2X_STOP_ON_ERROR
618 if (unlikely(bp->panic))
622 comp_ring_cons = RCQ_BD(sw_comp_cons);
623 bd_prod = RX_BD(bd_prod);
624 bd_cons = RX_BD(bd_cons);
626 cqe = &fp->rx_comp_ring[comp_ring_cons];
627 cqe_fp = &cqe->fast_path_cqe;
628 cqe_fp_flags = cqe_fp->type_error_flags;
629 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
631 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
632 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
633 cqe_fp_flags, cqe_fp->status_flags,
634 le32_to_cpu(cqe_fp->rss_hash_result),
635 le16_to_cpu(cqe_fp->vlan_tag),
636 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
638 /* is this a slowpath msg? */
639 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
640 bnx2x_sp_event(fp, cqe);
644 rx_buf = &fp->rx_buf_ring[bd_cons];
647 if (!CQE_TYPE_FAST(cqe_fp_type)) {
648 struct bnx2x_agg_info *tpa_info;
649 u16 frag_size, pages;
650 #ifdef BNX2X_STOP_ON_ERROR
652 if (fp->disable_tpa &&
653 (CQE_TYPE_START(cqe_fp_type) ||
654 CQE_TYPE_STOP(cqe_fp_type)))
655 BNX2X_ERR("START/STOP packet while "
656 "disable_tpa type %x\n",
657 CQE_TYPE(cqe_fp_type));
660 if (CQE_TYPE_START(cqe_fp_type)) {
661 u16 queue = cqe_fp->queue_index;
662 DP(NETIF_MSG_RX_STATUS,
663 "calling tpa_start on queue %d\n",
666 bnx2x_tpa_start(fp, queue,
673 queue = cqe->end_agg_cqe.queue_index;
674 tpa_info = &fp->tpa_info[queue];
675 DP(NETIF_MSG_RX_STATUS,
676 "calling tpa_stop on queue %d\n",
679 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
682 if (fp->mode == TPA_MODE_GRO)
683 pages = (frag_size + tpa_info->full_page - 1) /
686 pages = SGE_PAGE_ALIGN(frag_size) >>
689 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
690 &cqe->end_agg_cqe, comp_ring_cons);
691 #ifdef BNX2X_STOP_ON_ERROR
696 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
700 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
701 pad = cqe_fp->placement_offset;
702 dma_sync_single_for_cpu(&bp->pdev->dev,
703 dma_unmap_addr(rx_buf, mapping),
704 pad + RX_COPY_THRESH,
707 prefetch(data + pad); /* speedup eth_type_trans() */
708 /* is this an error packet? */
709 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
711 "ERROR flags %x rx packet %u\n",
712 cqe_fp_flags, sw_comp_cons);
713 fp->eth_q_stats.rx_err_discard_pkt++;
717 /* Since we don't have a jumbo ring
718 * copy small packets if mtu > 1500
720 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
721 (len <= RX_COPY_THRESH)) {
722 skb = netdev_alloc_skb_ip_align(bp->dev, len);
725 "ERROR packet dropped because of alloc failure\n");
726 fp->eth_q_stats.rx_skb_alloc_failed++;
729 memcpy(skb->data, data + pad, len);
730 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
732 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
733 dma_unmap_single(&bp->pdev->dev,
734 dma_unmap_addr(rx_buf, mapping),
737 skb = build_skb(data);
738 if (unlikely(!skb)) {
740 fp->eth_q_stats.rx_skb_alloc_failed++;
743 skb_reserve(skb, pad);
746 "ERROR packet dropped because "
747 "of alloc failure\n");
748 fp->eth_q_stats.rx_skb_alloc_failed++;
750 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
756 skb->protocol = eth_type_trans(skb, bp->dev);
758 /* Set Toeplitz hash for a none-LRO skb */
759 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
761 skb_checksum_none_assert(skb);
763 if (bp->dev->features & NETIF_F_RXCSUM) {
765 if (likely(BNX2X_RX_CSUM_OK(cqe)))
766 skb->ip_summed = CHECKSUM_UNNECESSARY;
768 fp->eth_q_stats.hw_csum_err++;
771 skb_record_rx_queue(skb, fp->rx_queue);
773 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
775 __vlan_hwaccel_put_tag(skb,
776 le16_to_cpu(cqe_fp->vlan_tag));
777 napi_gro_receive(&fp->napi, skb);
783 bd_cons = NEXT_RX_IDX(bd_cons);
784 bd_prod = NEXT_RX_IDX(bd_prod);
785 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
788 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
789 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
791 if (rx_pkt == budget)
795 fp->rx_bd_cons = bd_cons;
796 fp->rx_bd_prod = bd_prod_fw;
797 fp->rx_comp_cons = sw_comp_cons;
798 fp->rx_comp_prod = sw_comp_prod;
800 /* Update producers */
801 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
804 fp->rx_pkt += rx_pkt;
810 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
812 struct bnx2x_fastpath *fp = fp_cookie;
813 struct bnx2x *bp = fp->bp;
816 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
817 "[fp %d fw_sd %d igusb %d]\n",
818 fp->index, fp->fw_sb_id, fp->igu_sb_id);
819 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
821 #ifdef BNX2X_STOP_ON_ERROR
822 if (unlikely(bp->panic))
826 /* Handle Rx and Tx according to MSI-X vector */
827 prefetch(fp->rx_cons_sb);
829 for_each_cos_in_tx_queue(fp, cos)
830 prefetch(fp->txdata[cos].tx_cons_sb);
832 prefetch(&fp->sb_running_index[SM_RX_ID]);
833 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
838 /* HW Lock for shared dual port PHYs */
839 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
841 mutex_lock(&bp->port.phy_mutex);
843 if (bp->port.need_hw_lock)
844 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
847 void bnx2x_release_phy_lock(struct bnx2x *bp)
849 if (bp->port.need_hw_lock)
850 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
852 mutex_unlock(&bp->port.phy_mutex);
855 /* calculates MF speed according to current linespeed and MF configuration */
856 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
858 u16 line_speed = bp->link_vars.line_speed;
860 u16 maxCfg = bnx2x_extract_max_cfg(bp,
861 bp->mf_config[BP_VN(bp)]);
863 /* Calculate the current MAX line speed limit for the MF
867 line_speed = (line_speed * maxCfg) / 100;
869 u16 vn_max_rate = maxCfg * 100;
871 if (vn_max_rate < line_speed)
872 line_speed = vn_max_rate;
880 * bnx2x_fill_report_data - fill link report data to report
883 * @data: link state to update
885 * It uses a none-atomic bit operations because is called under the mutex.
887 static inline void bnx2x_fill_report_data(struct bnx2x *bp,
888 struct bnx2x_link_report_data *data)
890 u16 line_speed = bnx2x_get_mf_speed(bp);
892 memset(data, 0, sizeof(*data));
894 /* Fill the report data: efective line speed */
895 data->line_speed = line_speed;
898 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
899 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
900 &data->link_report_flags);
903 if (bp->link_vars.duplex == DUPLEX_FULL)
904 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
906 /* Rx Flow Control is ON */
907 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
908 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
910 /* Tx Flow Control is ON */
911 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
912 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
916 * bnx2x_link_report - report link status to OS.
920 * Calls the __bnx2x_link_report() under the same locking scheme
921 * as a link/PHY state managing code to ensure a consistent link
925 void bnx2x_link_report(struct bnx2x *bp)
927 bnx2x_acquire_phy_lock(bp);
928 __bnx2x_link_report(bp);
929 bnx2x_release_phy_lock(bp);
933 * __bnx2x_link_report - report link status to OS.
937 * None atomic inmlementation.
938 * Should be called under the phy_lock.
940 void __bnx2x_link_report(struct bnx2x *bp)
942 struct bnx2x_link_report_data cur_data;
946 bnx2x_read_mf_cfg(bp);
948 /* Read the current link report info */
949 bnx2x_fill_report_data(bp, &cur_data);
951 /* Don't report link down or exactly the same link status twice */
952 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
953 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
954 &bp->last_reported_link.link_report_flags) &&
955 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
956 &cur_data.link_report_flags)))
961 /* We are going to report a new link parameters now -
962 * remember the current data for the next time.
964 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
966 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
967 &cur_data.link_report_flags)) {
968 netif_carrier_off(bp->dev);
969 netdev_err(bp->dev, "NIC Link is Down\n");
975 netif_carrier_on(bp->dev);
977 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
978 &cur_data.link_report_flags))
983 /* Handle the FC at the end so that only these flags would be
984 * possibly set. This way we may easily check if there is no FC
987 if (cur_data.link_report_flags) {
988 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
989 &cur_data.link_report_flags)) {
990 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
991 &cur_data.link_report_flags))
992 flow = "ON - receive & transmit";
994 flow = "ON - receive";
996 flow = "ON - transmit";
1001 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1002 cur_data.line_speed, duplex, flow);
1006 void bnx2x_init_rx_rings(struct bnx2x *bp)
1008 int func = BP_FUNC(bp);
1012 /* Allocate TPA resources */
1013 for_each_rx_queue(bp, j) {
1014 struct bnx2x_fastpath *fp = &bp->fp[j];
1017 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1019 if (!fp->disable_tpa) {
1020 /* Fill the per-aggregtion pool */
1021 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1022 struct bnx2x_agg_info *tpa_info =
1024 struct sw_rx_bd *first_buf =
1025 &tpa_info->first_buf;
1027 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1029 if (!first_buf->data) {
1030 BNX2X_ERR("Failed to allocate TPA "
1031 "skb pool for queue[%d] - "
1032 "disabling TPA on this "
1034 bnx2x_free_tpa_pool(bp, fp, i);
1035 fp->disable_tpa = 1;
1038 dma_unmap_addr_set(first_buf, mapping, 0);
1039 tpa_info->tpa_state = BNX2X_TPA_STOP;
1042 /* "next page" elements initialization */
1043 bnx2x_set_next_page_sgl(fp);
1045 /* set SGEs bit mask */
1046 bnx2x_init_sge_ring_bit_mask(fp);
1048 /* Allocate SGEs and initialize the ring elements */
1049 for (i = 0, ring_prod = 0;
1050 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1052 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1053 BNX2X_ERR("was only able to allocate "
1055 BNX2X_ERR("disabling TPA for "
1057 /* Cleanup already allocated elements */
1058 bnx2x_free_rx_sge_range(bp, fp,
1060 bnx2x_free_tpa_pool(bp, fp,
1062 fp->disable_tpa = 1;
1066 ring_prod = NEXT_SGE_IDX(ring_prod);
1069 fp->rx_sge_prod = ring_prod;
1073 for_each_rx_queue(bp, j) {
1074 struct bnx2x_fastpath *fp = &bp->fp[j];
1078 /* Activate BD ring */
1080 * this will generate an interrupt (to the TSTORM)
1081 * must only be done after chip is initialized
1083 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1089 if (CHIP_IS_E1(bp)) {
1090 REG_WR(bp, BAR_USTRORM_INTMEM +
1091 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1092 U64_LO(fp->rx_comp_mapping));
1093 REG_WR(bp, BAR_USTRORM_INTMEM +
1094 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1095 U64_HI(fp->rx_comp_mapping));
1100 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1105 for_each_tx_queue(bp, i) {
1106 struct bnx2x_fastpath *fp = &bp->fp[i];
1107 for_each_cos_in_tx_queue(fp, cos) {
1108 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
1109 unsigned pkts_compl = 0, bytes_compl = 0;
1111 u16 sw_prod = txdata->tx_pkt_prod;
1112 u16 sw_cons = txdata->tx_pkt_cons;
1114 while (sw_cons != sw_prod) {
1115 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1116 &pkts_compl, &bytes_compl);
1119 netdev_tx_reset_queue(
1120 netdev_get_tx_queue(bp->dev, txdata->txq_index));
1125 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1127 struct bnx2x *bp = fp->bp;
1130 /* ring wasn't allocated */
1131 if (fp->rx_buf_ring == NULL)
1134 for (i = 0; i < NUM_RX_BD; i++) {
1135 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1136 u8 *data = rx_buf->data;
1140 dma_unmap_single(&bp->pdev->dev,
1141 dma_unmap_addr(rx_buf, mapping),
1142 fp->rx_buf_size, DMA_FROM_DEVICE);
1144 rx_buf->data = NULL;
1149 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1153 for_each_rx_queue(bp, j) {
1154 struct bnx2x_fastpath *fp = &bp->fp[j];
1156 bnx2x_free_rx_bds(fp);
1158 if (!fp->disable_tpa)
1159 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1163 void bnx2x_free_skbs(struct bnx2x *bp)
1165 bnx2x_free_tx_skbs(bp);
1166 bnx2x_free_rx_skbs(bp);
1169 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1171 /* load old values */
1172 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1174 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1175 /* leave all but MAX value */
1176 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1178 /* set new MAX value */
1179 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1180 & FUNC_MF_CFG_MAX_BW_MASK;
1182 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1187 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1189 * @bp: driver handle
1190 * @nvecs: number of vectors to be released
1192 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1196 if (nvecs == offset)
1198 free_irq(bp->msix_table[offset].vector, bp->dev);
1199 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1200 bp->msix_table[offset].vector);
1203 if (nvecs == offset)
1208 for_each_eth_queue(bp, i) {
1209 if (nvecs == offset)
1211 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1212 "irq\n", i, bp->msix_table[offset].vector);
1214 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1218 void bnx2x_free_irq(struct bnx2x *bp)
1220 if (bp->flags & USING_MSIX_FLAG)
1221 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1223 else if (bp->flags & USING_MSI_FLAG)
1224 free_irq(bp->pdev->irq, bp->dev);
1226 free_irq(bp->pdev->irq, bp->dev);
1229 int bnx2x_enable_msix(struct bnx2x *bp)
1231 int msix_vec = 0, i, rc, req_cnt;
1233 bp->msix_table[msix_vec].entry = msix_vec;
1234 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1235 bp->msix_table[0].entry);
1239 bp->msix_table[msix_vec].entry = msix_vec;
1240 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1241 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1244 /* We need separate vectors for ETH queues only (not FCoE) */
1245 for_each_eth_queue(bp, i) {
1246 bp->msix_table[msix_vec].entry = msix_vec;
1247 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1248 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1252 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
1254 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1257 * reconfigure number of tx/rx queues according to available
1260 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1261 /* how less vectors we will have? */
1262 int diff = req_cnt - rc;
1265 "Trying to use less MSI-X vectors: %d\n", rc);
1267 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1271 "MSI-X is not attainable rc %d\n", rc);
1275 * decrease number of queues by number of unallocated entries
1277 bp->num_queues -= diff;
1279 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1282 /* fall to INTx if not enough memory */
1284 bp->flags |= DISABLE_MSI_FLAG;
1285 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1289 bp->flags |= USING_MSIX_FLAG;
1294 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1296 int i, rc, offset = 0;
1298 rc = request_irq(bp->msix_table[offset++].vector,
1299 bnx2x_msix_sp_int, 0,
1300 bp->dev->name, bp->dev);
1302 BNX2X_ERR("request sp irq failed\n");
1309 for_each_eth_queue(bp, i) {
1310 struct bnx2x_fastpath *fp = &bp->fp[i];
1311 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1314 rc = request_irq(bp->msix_table[offset].vector,
1315 bnx2x_msix_fp_int, 0, fp->name, fp);
1317 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1318 bp->msix_table[offset].vector, rc);
1319 bnx2x_free_msix_irqs(bp, offset);
1326 i = BNX2X_NUM_ETH_QUEUES(bp);
1327 offset = 1 + CNIC_PRESENT;
1328 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1330 bp->msix_table[0].vector,
1331 0, bp->msix_table[offset].vector,
1332 i - 1, bp->msix_table[offset + i - 1].vector);
1337 int bnx2x_enable_msi(struct bnx2x *bp)
1341 rc = pci_enable_msi(bp->pdev);
1343 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1346 bp->flags |= USING_MSI_FLAG;
1351 static int bnx2x_req_irq(struct bnx2x *bp)
1353 unsigned long flags;
1356 if (bp->flags & USING_MSI_FLAG)
1359 flags = IRQF_SHARED;
1361 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1362 bp->dev->name, bp->dev);
1366 static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1369 if (bp->flags & USING_MSIX_FLAG) {
1370 rc = bnx2x_req_msix_irqs(bp);
1375 rc = bnx2x_req_irq(bp);
1377 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1380 if (bp->flags & USING_MSI_FLAG) {
1381 bp->dev->irq = bp->pdev->irq;
1382 netdev_info(bp->dev, "using MSI IRQ %d\n",
1390 static inline void bnx2x_napi_enable(struct bnx2x *bp)
1394 for_each_rx_queue(bp, i)
1395 napi_enable(&bnx2x_fp(bp, i, napi));
1398 static inline void bnx2x_napi_disable(struct bnx2x *bp)
1402 for_each_rx_queue(bp, i)
1403 napi_disable(&bnx2x_fp(bp, i, napi));
1406 void bnx2x_netif_start(struct bnx2x *bp)
1408 if (netif_running(bp->dev)) {
1409 bnx2x_napi_enable(bp);
1410 bnx2x_int_enable(bp);
1411 if (bp->state == BNX2X_STATE_OPEN)
1412 netif_tx_wake_all_queues(bp->dev);
1416 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1418 bnx2x_int_disable_sync(bp, disable_hw);
1419 bnx2x_napi_disable(bp);
1422 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1424 struct bnx2x *bp = netdev_priv(dev);
1428 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1429 u16 ether_type = ntohs(hdr->h_proto);
1431 /* Skip VLAN tag if present */
1432 if (ether_type == ETH_P_8021Q) {
1433 struct vlan_ethhdr *vhdr =
1434 (struct vlan_ethhdr *)skb->data;
1436 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1439 /* If ethertype is FCoE or FIP - use FCoE ring */
1440 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1441 return bnx2x_fcoe_tx(bp, txq_index);
1444 /* select a non-FCoE queue */
1445 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1448 void bnx2x_set_num_queues(struct bnx2x *bp)
1450 switch (bp->multi_mode) {
1451 case ETH_RSS_MODE_DISABLED:
1454 case ETH_RSS_MODE_REGULAR:
1455 bp->num_queues = bnx2x_calc_num_queues(bp);
1464 /* override in ISCSI SD mod */
1465 if (IS_MF_ISCSI_SD(bp))
1468 /* Add special queues */
1469 bp->num_queues += NON_ETH_CONTEXT_USE;
1473 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1475 * @bp: Driver handle
1477 * We currently support for at most 16 Tx queues for each CoS thus we will
1478 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1481 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1482 * index after all ETH L2 indices.
1484 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1485 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1486 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1488 * The proper configuration of skb->queue_mapping is handled by
1489 * bnx2x_select_queue() and __skb_tx_hash().
1491 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1492 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1494 static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1498 tx = MAX_TXQS_PER_COS * bp->max_cos;
1499 rx = BNX2X_NUM_ETH_QUEUES(bp);
1501 /* account for fcoe queue */
1509 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1511 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1514 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1516 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1520 DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
1526 static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1530 for_each_queue(bp, i) {
1531 struct bnx2x_fastpath *fp = &bp->fp[i];
1534 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1537 * Although there are no IP frames expected to arrive to
1538 * this ring we still want to add an
1539 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1542 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1545 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1546 IP_HEADER_ALIGNMENT_PADDING +
1549 BNX2X_FW_RX_ALIGN_END;
1550 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1554 static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1557 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1558 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1561 * Prepare the inital contents fo the indirection table if RSS is
1564 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1565 for (i = 0; i < sizeof(ind_table); i++)
1568 ethtool_rxfh_indir_default(i, num_eth_queues);
1572 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1573 * per-port, so if explicit configuration is needed , do it only
1576 * For 57712 and newer on the other hand it's a per-function
1579 return bnx2x_config_rss_pf(bp, ind_table,
1580 bp->port.pmf || !CHIP_IS_E1x(bp));
1583 int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1585 struct bnx2x_config_rss_params params = {0};
1588 /* Although RSS is meaningless when there is a single HW queue we
1589 * still need it enabled in order to have HW Rx hash generated.
1591 * if (!is_eth_multi(bp))
1592 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1595 params.rss_obj = &bp->rss_conf_obj;
1597 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
1600 switch (bp->multi_mode) {
1601 case ETH_RSS_MODE_DISABLED:
1602 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
1604 case ETH_RSS_MODE_REGULAR:
1605 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
1607 case ETH_RSS_MODE_VLAN_PRI:
1608 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, ¶ms.rss_flags);
1610 case ETH_RSS_MODE_E1HOV_PRI:
1611 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, ¶ms.rss_flags);
1613 case ETH_RSS_MODE_IP_DSCP:
1614 __set_bit(BNX2X_RSS_MODE_IP_DSCP, ¶ms.rss_flags);
1617 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1621 /* If RSS is enabled */
1622 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1623 /* RSS configuration */
1624 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
1625 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
1626 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
1627 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
1630 params.rss_result_mask = MULTI_MASK;
1632 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1636 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1637 params.rss_key[i] = random32();
1639 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
1643 return bnx2x_config_rss(bp, ¶ms);
1646 static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1648 struct bnx2x_func_state_params func_params = {0};
1650 /* Prepare parameters for function state transitions */
1651 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1653 func_params.f_obj = &bp->func_obj;
1654 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1656 func_params.params.hw_init.load_phase = load_code;
1658 return bnx2x_func_state_change(bp, &func_params);
1662 * Cleans the object that have internal lists without sending
1663 * ramrods. Should be run when interrutps are disabled.
1665 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1668 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1669 struct bnx2x_mcast_ramrod_params rparam = {0};
1670 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1672 /***************** Cleanup MACs' object first *************************/
1674 /* Wait for completion of requested */
1675 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1676 /* Perform a dry cleanup */
1677 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1679 /* Clean ETH primary MAC */
1680 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1681 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1684 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1686 /* Cleanup UC list */
1688 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1689 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1692 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1694 /***************** Now clean mcast object *****************************/
1695 rparam.mcast_obj = &bp->mcast_obj;
1696 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1698 /* Add a DEL command... */
1699 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1701 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1702 "object: %d\n", rc);
1704 /* ...and wait until all pending commands are cleared */
1705 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1708 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1713 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1717 #ifndef BNX2X_STOP_ON_ERROR
1718 #define LOAD_ERROR_EXIT(bp, label) \
1720 (bp)->state = BNX2X_STATE_ERROR; \
1724 #define LOAD_ERROR_EXIT(bp, label) \
1726 (bp)->state = BNX2X_STATE_ERROR; \
1732 /* must be called with rtnl_lock */
1733 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1735 int port = BP_PORT(bp);
1739 #ifdef BNX2X_STOP_ON_ERROR
1740 if (unlikely(bp->panic))
1744 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1746 /* Set the initial link reported state to link down */
1747 bnx2x_acquire_phy_lock(bp);
1748 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1749 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1750 &bp->last_reported_link.link_report_flags);
1751 bnx2x_release_phy_lock(bp);
1753 /* must be called before memory allocation and HW init */
1754 bnx2x_ilt_set_info(bp);
1757 * Zero fastpath structures preserving invariants like napi, which are
1758 * allocated only once, fp index, max_cos, bp pointer.
1759 * Also set fp->disable_tpa.
1761 for_each_queue(bp, i)
1765 /* Set the receive queues buffer size */
1766 bnx2x_set_rx_buf_size(bp);
1768 if (bnx2x_alloc_mem(bp))
1771 /* As long as bnx2x_alloc_mem() may possibly update
1772 * bp->num_queues, bnx2x_set_real_num_queues() should always
1775 rc = bnx2x_set_real_num_queues(bp);
1777 BNX2X_ERR("Unable to set real_num_queues\n");
1778 LOAD_ERROR_EXIT(bp, load_error0);
1781 /* configure multi cos mappings in kernel.
1782 * this configuration may be overriden by a multi class queue discipline
1783 * or by a dcbx negotiation result.
1785 bnx2x_setup_tc(bp->dev, bp->max_cos);
1787 bnx2x_napi_enable(bp);
1789 /* set pf load just before approaching the MCP */
1790 bnx2x_set_pf_load(bp);
1792 /* Send LOAD_REQUEST command to MCP
1793 * Returns the type of LOAD command:
1794 * if it is the first port to be initialized
1795 * common blocks should be initialized, otherwise - not
1797 if (!BP_NOMCP(bp)) {
1800 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
1801 DRV_MSG_SEQ_NUMBER_MASK);
1802 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
1804 /* Get current FW pulse sequence */
1805 bp->fw_drv_pulse_wr_seq =
1806 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
1807 DRV_PULSE_SEQ_MASK);
1808 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
1810 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1812 BNX2X_ERR("MCP response failure, aborting\n");
1814 LOAD_ERROR_EXIT(bp, load_error1);
1816 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1817 rc = -EBUSY; /* other port in diagnostic mode */
1818 LOAD_ERROR_EXIT(bp, load_error1);
1820 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
1821 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
1822 /* build FW version dword */
1823 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1824 (BCM_5710_FW_MINOR_VERSION << 8) +
1825 (BCM_5710_FW_REVISION_VERSION << 16) +
1826 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1828 /* read loaded FW from chip */
1829 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1831 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x",
1834 /* abort nic load if version mismatch */
1835 if (my_fw != loaded_fw) {
1836 BNX2X_ERR("bnx2x with FW %x already loaded, "
1837 "which mismatches my %x FW. aborting",
1840 LOAD_ERROR_EXIT(bp, load_error2);
1845 int path = BP_PATH(bp);
1847 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1848 path, load_count[path][0], load_count[path][1],
1849 load_count[path][2]);
1850 load_count[path][0]++;
1851 load_count[path][1 + port]++;
1852 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1853 path, load_count[path][0], load_count[path][1],
1854 load_count[path][2]);
1855 if (load_count[path][0] == 1)
1856 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1857 else if (load_count[path][1 + port] == 1)
1858 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1860 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1863 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1864 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1865 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
1868 * We need the barrier to ensure the ordering between the
1869 * writing to bp->port.pmf here and reading it from the
1870 * bnx2x_periodic_task().
1873 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1877 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1879 /* Init Function state controlling object */
1880 bnx2x__init_func_obj(bp);
1883 rc = bnx2x_init_hw(bp, load_code);
1885 BNX2X_ERR("HW init failed, aborting\n");
1886 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1887 LOAD_ERROR_EXIT(bp, load_error2);
1890 /* Connect to IRQs */
1891 rc = bnx2x_setup_irqs(bp);
1893 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1894 LOAD_ERROR_EXIT(bp, load_error2);
1897 /* Setup NIC internals and enable interrupts */
1898 bnx2x_nic_init(bp, load_code);
1900 /* Init per-function objects */
1901 bnx2x_init_bp_objs(bp);
1903 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1904 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1905 (bp->common.shmem2_base)) {
1906 if (SHMEM2_HAS(bp, dcc_support))
1907 SHMEM2_WR(bp, dcc_support,
1908 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1909 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1912 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1913 rc = bnx2x_func_start(bp);
1915 BNX2X_ERR("Function start failed!\n");
1916 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1917 LOAD_ERROR_EXIT(bp, load_error3);
1920 /* Send LOAD_DONE command to MCP */
1921 if (!BP_NOMCP(bp)) {
1922 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1924 BNX2X_ERR("MCP response failure, aborting\n");
1926 LOAD_ERROR_EXIT(bp, load_error3);
1930 rc = bnx2x_setup_leading(bp);
1932 BNX2X_ERR("Setup leading failed!\n");
1933 LOAD_ERROR_EXIT(bp, load_error3);
1937 /* Enable Timer scan */
1938 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
1941 for_each_nondefault_queue(bp, i) {
1942 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
1944 LOAD_ERROR_EXIT(bp, load_error4);
1947 rc = bnx2x_init_rss_pf(bp);
1949 LOAD_ERROR_EXIT(bp, load_error4);
1951 /* Now when Clients are configured we are ready to work */
1952 bp->state = BNX2X_STATE_OPEN;
1954 /* Configure a ucast MAC */
1955 rc = bnx2x_set_eth_mac(bp, true);
1957 LOAD_ERROR_EXIT(bp, load_error4);
1959 if (bp->pending_max) {
1960 bnx2x_update_max_mf_config(bp, bp->pending_max);
1961 bp->pending_max = 0;
1965 bnx2x_initial_phy_init(bp, load_mode);
1967 /* Start fast path */
1969 /* Initialize Rx filter. */
1970 netif_addr_lock_bh(bp->dev);
1971 bnx2x_set_rx_mode(bp->dev);
1972 netif_addr_unlock_bh(bp->dev);
1975 switch (load_mode) {
1977 /* Tx queue should be only reenabled */
1978 netif_tx_wake_all_queues(bp->dev);
1982 netif_tx_start_all_queues(bp->dev);
1983 smp_mb__after_clear_bit();
1987 bp->state = BNX2X_STATE_DIAG;
1995 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
1997 bnx2x__link_status_update(bp);
1999 /* start the timer */
2000 mod_timer(&bp->timer, jiffies + bp->current_interval);
2003 /* re-read iscsi info */
2004 bnx2x_get_iscsi_info(bp);
2005 bnx2x_setup_cnic_irq_info(bp);
2006 if (bp->state == BNX2X_STATE_OPEN)
2007 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2010 /* mark driver is loaded in shmem2 */
2011 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2013 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2014 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2015 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2016 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2019 /* Wait for all pending SP commands to complete */
2020 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2021 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2022 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2026 bnx2x_dcbx_init(bp);
2029 #ifndef BNX2X_STOP_ON_ERROR
2032 /* Disable Timer scan */
2033 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2036 bnx2x_int_disable_sync(bp, 1);
2038 /* Clean queueable objects */
2039 bnx2x_squeeze_objects(bp);
2041 /* Free SKBs, SGEs, TPA pool and driver internals */
2042 bnx2x_free_skbs(bp);
2043 for_each_rx_queue(bp, i)
2044 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2049 if (!BP_NOMCP(bp)) {
2050 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2051 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2056 bnx2x_napi_disable(bp);
2057 /* clear pf_load status, as it was already set */
2058 bnx2x_clear_pf_load(bp);
2063 #endif /* ! BNX2X_STOP_ON_ERROR */
2066 /* must be called with rtnl_lock */
2067 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2070 bool global = false;
2072 /* mark driver is unloaded in shmem2 */
2073 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2075 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2076 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2077 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2080 if ((bp->state == BNX2X_STATE_CLOSED) ||
2081 (bp->state == BNX2X_STATE_ERROR)) {
2082 /* We can get here if the driver has been unloaded
2083 * during parity error recovery and is either waiting for a
2084 * leader to complete or for other functions to unload and
2085 * then ifdown has been issued. In this case we want to
2086 * unload and let other functions to complete a recovery
2089 bp->recovery_state = BNX2X_RECOVERY_DONE;
2091 bnx2x_release_leader_lock(bp);
2094 DP(NETIF_MSG_HW, "Releasing a leadership...\n");
2100 * It's important to set the bp->state to the value different from
2101 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2102 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2104 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2108 bnx2x_tx_disable(bp);
2111 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2114 bp->rx_mode = BNX2X_RX_MODE_NONE;
2116 del_timer_sync(&bp->timer);
2118 /* Set ALWAYS_ALIVE bit in shmem */
2119 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2121 bnx2x_drv_pulse(bp);
2123 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2124 bnx2x_save_statistics(bp);
2126 /* Cleanup the chip if needed */
2127 if (unload_mode != UNLOAD_RECOVERY)
2128 bnx2x_chip_cleanup(bp, unload_mode);
2130 /* Send the UNLOAD_REQUEST to the MCP */
2131 bnx2x_send_unload_req(bp, unload_mode);
2134 * Prevent transactions to host from the functions on the
2135 * engine that doesn't reset global blocks in case of global
2136 * attention once gloabl blocks are reset and gates are opened
2137 * (the engine which leader will perform the recovery
2140 if (!CHIP_IS_E1x(bp))
2141 bnx2x_pf_disable(bp);
2143 /* Disable HW interrupts, NAPI */
2144 bnx2x_netif_stop(bp, 1);
2149 /* Report UNLOAD_DONE to MCP */
2150 bnx2x_send_unload_done(bp);
2154 * At this stage no more interrupts will arrive so we may safly clean
2155 * the queueable objects here in case they failed to get cleaned so far.
2157 bnx2x_squeeze_objects(bp);
2159 /* There should be no more pending SP commands at this stage */
2164 /* Free SKBs, SGEs, TPA pool and driver internals */
2165 bnx2x_free_skbs(bp);
2166 for_each_rx_queue(bp, i)
2167 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2171 bp->state = BNX2X_STATE_CLOSED;
2173 /* Check if there are pending parity attentions. If there are - set
2174 * RECOVERY_IN_PROGRESS.
2176 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2177 bnx2x_set_reset_in_progress(bp);
2179 /* Set RESET_IS_GLOBAL if needed */
2181 bnx2x_set_reset_global(bp);
2185 /* The last driver must disable a "close the gate" if there is no
2186 * parity attention or "process kill" pending.
2188 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2189 bnx2x_disable_close_the_gate(bp);
2194 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2198 /* If there is no power capability, silently succeed */
2200 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2204 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2208 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2209 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2210 PCI_PM_CTRL_PME_STATUS));
2212 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2213 /* delay required during transition out of D3hot */
2218 /* If there are other clients above don't
2219 shut down the power */
2220 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2222 /* Don't shut down the power for emulation and FPGA */
2223 if (CHIP_REV_IS_SLOW(bp))
2226 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2230 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2232 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2235 /* No more memory access after this point until
2236 * device is brought back to D0.
2247 * net_device service functions
2249 int bnx2x_poll(struct napi_struct *napi, int budget)
2253 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2255 struct bnx2x *bp = fp->bp;
2258 #ifdef BNX2X_STOP_ON_ERROR
2259 if (unlikely(bp->panic)) {
2260 napi_complete(napi);
2265 for_each_cos_in_tx_queue(fp, cos)
2266 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2267 bnx2x_tx_int(bp, &fp->txdata[cos]);
2270 if (bnx2x_has_rx_work(fp)) {
2271 work_done += bnx2x_rx_int(fp, budget - work_done);
2273 /* must not complete if we consumed full budget */
2274 if (work_done >= budget)
2278 /* Fall out from the NAPI loop if needed */
2279 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2281 /* No need to update SB for FCoE L2 ring as long as
2282 * it's connected to the default SB and the SB
2283 * has been updated when NAPI was scheduled.
2285 if (IS_FCOE_FP(fp)) {
2286 napi_complete(napi);
2291 bnx2x_update_fpsb_idx(fp);
2292 /* bnx2x_has_rx_work() reads the status block,
2293 * thus we need to ensure that status block indices
2294 * have been actually read (bnx2x_update_fpsb_idx)
2295 * prior to this check (bnx2x_has_rx_work) so that
2296 * we won't write the "newer" value of the status block
2297 * to IGU (if there was a DMA right after
2298 * bnx2x_has_rx_work and if there is no rmb, the memory
2299 * reading (bnx2x_update_fpsb_idx) may be postponed
2300 * to right before bnx2x_ack_sb). In this case there
2301 * will never be another interrupt until there is
2302 * another update of the status block, while there
2303 * is still unhandled work.
2307 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2308 napi_complete(napi);
2309 /* Re-enable interrupts */
2311 "Update index to %d\n", fp->fp_hc_idx);
2312 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2313 le16_to_cpu(fp->fp_hc_idx),
2323 /* we split the first BD into headers and data BDs
2324 * to ease the pain of our fellow microcode engineers
2325 * we use one mapping for both BDs
2326 * So far this has only been observed to happen
2327 * in Other Operating Systems(TM)
2329 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2330 struct bnx2x_fp_txdata *txdata,
2331 struct sw_tx_bd *tx_buf,
2332 struct eth_tx_start_bd **tx_bd, u16 hlen,
2333 u16 bd_prod, int nbd)
2335 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2336 struct eth_tx_bd *d_tx_bd;
2338 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2340 /* first fix first BD */
2341 h_tx_bd->nbd = cpu_to_le16(nbd);
2342 h_tx_bd->nbytes = cpu_to_le16(hlen);
2344 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2345 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2346 h_tx_bd->addr_lo, h_tx_bd->nbd);
2348 /* now get a new data BD
2349 * (after the pbd) and fill it */
2350 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2351 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2353 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2354 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2356 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2357 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2358 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2360 /* this marks the BD as one that has no individual mapping */
2361 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2363 DP(NETIF_MSG_TX_QUEUED,
2364 "TSO split data size is %d (%x:%x)\n",
2365 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2368 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2373 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2376 csum = (u16) ~csum_fold(csum_sub(csum,
2377 csum_partial(t_header - fix, fix, 0)));
2380 csum = (u16) ~csum_fold(csum_add(csum,
2381 csum_partial(t_header, -fix, 0)));
2383 return swab16(csum);
2386 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2390 if (skb->ip_summed != CHECKSUM_PARTIAL)
2394 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2396 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2397 rc |= XMIT_CSUM_TCP;
2401 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2402 rc |= XMIT_CSUM_TCP;
2406 if (skb_is_gso_v6(skb))
2407 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2408 else if (skb_is_gso(skb))
2409 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2414 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2415 /* check if packet requires linearization (packet is too fragmented)
2416 no need to check fragmentation if page size > 8K (there will be no
2417 violation to FW restrictions) */
2418 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2423 int first_bd_sz = 0;
2425 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2426 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2428 if (xmit_type & XMIT_GSO) {
2429 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2430 /* Check if LSO packet needs to be copied:
2431 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2432 int wnd_size = MAX_FETCH_BD - 3;
2433 /* Number of windows to check */
2434 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2439 /* Headers length */
2440 hlen = (int)(skb_transport_header(skb) - skb->data) +
2443 /* Amount of data (w/o headers) on linear part of SKB*/
2444 first_bd_sz = skb_headlen(skb) - hlen;
2446 wnd_sum = first_bd_sz;
2448 /* Calculate the first sum - it's special */
2449 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2451 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2453 /* If there was data on linear skb data - check it */
2454 if (first_bd_sz > 0) {
2455 if (unlikely(wnd_sum < lso_mss)) {
2460 wnd_sum -= first_bd_sz;
2463 /* Others are easier: run through the frag list and
2464 check all windows */
2465 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2467 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2469 if (unlikely(wnd_sum < lso_mss)) {
2474 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2477 /* in non-LSO too fragmented packet should always
2484 if (unlikely(to_copy))
2485 DP(NETIF_MSG_TX_QUEUED,
2486 "Linearization IS REQUIRED for %s packet. "
2487 "num_frags %d hlen %d first_bd_sz %d\n",
2488 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2489 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2495 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2498 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2499 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2500 ETH_TX_PARSE_BD_E2_LSO_MSS;
2501 if ((xmit_type & XMIT_GSO_V6) &&
2502 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2503 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2507 * bnx2x_set_pbd_gso - update PBD in GSO case.
2511 * @xmit_type: xmit flags
2513 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2514 struct eth_tx_parse_bd_e1x *pbd,
2517 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2518 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2519 pbd->tcp_flags = pbd_tcp_flags(skb);
2521 if (xmit_type & XMIT_GSO_V4) {
2522 pbd->ip_id = swab16(ip_hdr(skb)->id);
2523 pbd->tcp_pseudo_csum =
2524 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2526 0, IPPROTO_TCP, 0));
2529 pbd->tcp_pseudo_csum =
2530 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2531 &ipv6_hdr(skb)->daddr,
2532 0, IPPROTO_TCP, 0));
2534 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2538 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2540 * @bp: driver handle
2542 * @parsing_data: data to be updated
2543 * @xmit_type: xmit flags
2547 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2548 u32 *parsing_data, u32 xmit_type)
2551 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2552 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2553 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2555 if (xmit_type & XMIT_CSUM_TCP) {
2556 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2557 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2558 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2560 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2562 /* We support checksum offload for TCP and UDP only.
2563 * No need to pass the UDP header length - it's a constant.
2565 return skb_transport_header(skb) +
2566 sizeof(struct udphdr) - skb->data;
2569 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2570 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2572 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2574 if (xmit_type & XMIT_CSUM_V4)
2575 tx_start_bd->bd_flags.as_bitfield |=
2576 ETH_TX_BD_FLAGS_IP_CSUM;
2578 tx_start_bd->bd_flags.as_bitfield |=
2579 ETH_TX_BD_FLAGS_IPV6;
2581 if (!(xmit_type & XMIT_CSUM_TCP))
2582 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2586 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2588 * @bp: driver handle
2590 * @pbd: parse BD to be updated
2591 * @xmit_type: xmit flags
2593 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2594 struct eth_tx_parse_bd_e1x *pbd,
2597 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2599 /* for now NS flag is not used in Linux */
2601 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2602 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2604 pbd->ip_hlen_w = (skb_transport_header(skb) -
2605 skb_network_header(skb)) >> 1;
2607 hlen += pbd->ip_hlen_w;
2609 /* We support checksum offload for TCP and UDP only */
2610 if (xmit_type & XMIT_CSUM_TCP)
2611 hlen += tcp_hdrlen(skb) / 2;
2613 hlen += sizeof(struct udphdr) / 2;
2615 pbd->total_hlen_w = cpu_to_le16(hlen);
2618 if (xmit_type & XMIT_CSUM_TCP) {
2619 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2622 s8 fix = SKB_CS_OFF(skb); /* signed! */
2624 DP(NETIF_MSG_TX_QUEUED,
2625 "hlen %d fix %d csum before fix %x\n",
2626 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2628 /* HW bug: fixup the CSUM */
2629 pbd->tcp_pseudo_csum =
2630 bnx2x_csum_fix(skb_transport_header(skb),
2633 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2634 pbd->tcp_pseudo_csum);
2640 /* called with netif_tx_lock
2641 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2642 * netif_wake_queue()
2644 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2646 struct bnx2x *bp = netdev_priv(dev);
2648 struct bnx2x_fastpath *fp;
2649 struct netdev_queue *txq;
2650 struct bnx2x_fp_txdata *txdata;
2651 struct sw_tx_bd *tx_buf;
2652 struct eth_tx_start_bd *tx_start_bd, *first_bd;
2653 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2654 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2655 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2656 u32 pbd_e2_parsing_data = 0;
2657 u16 pkt_prod, bd_prod;
2658 int nbd, txq_index, fp_index, txdata_index;
2660 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2663 __le16 pkt_size = 0;
2665 u8 mac_type = UNICAST_ADDRESS;
2667 #ifdef BNX2X_STOP_ON_ERROR
2668 if (unlikely(bp->panic))
2669 return NETDEV_TX_BUSY;
2672 txq_index = skb_get_queue_mapping(skb);
2673 txq = netdev_get_tx_queue(dev, txq_index);
2675 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2677 /* decode the fastpath index and the cos index from the txq */
2678 fp_index = TXQ_TO_FP(txq_index);
2679 txdata_index = TXQ_TO_COS(txq_index);
2683 * Override the above for the FCoE queue:
2684 * - FCoE fp entry is right after the ETH entries.
2685 * - FCoE L2 queue uses bp->txdata[0] only.
2687 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2688 bnx2x_fcoe_tx(bp, txq_index)))) {
2689 fp_index = FCOE_IDX;
2694 /* enable this debug print to view the transmission queue being used
2695 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
2696 txq_index, fp_index, txdata_index); */
2698 /* locate the fastpath and the txdata */
2699 fp = &bp->fp[fp_index];
2700 txdata = &fp->txdata[txdata_index];
2702 /* enable this debug print to view the tranmission details
2703 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
2704 " tx_data ptr %p fp pointer %p\n",
2705 txdata->cid, fp_index, txdata_index, txdata, fp); */
2707 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2708 (skb_shinfo(skb)->nr_frags + 3))) {
2709 fp->eth_q_stats.driver_xoff++;
2710 netif_tx_stop_queue(txq);
2711 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2712 return NETDEV_TX_BUSY;
2715 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2716 "protocol(%x,%x) gso type %x xmit_type %x\n",
2717 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2718 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2720 eth = (struct ethhdr *)skb->data;
2722 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2723 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2724 if (is_broadcast_ether_addr(eth->h_dest))
2725 mac_type = BROADCAST_ADDRESS;
2727 mac_type = MULTICAST_ADDRESS;
2730 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2731 /* First, check if we need to linearize the skb (due to FW
2732 restrictions). No need to check fragmentation if page size > 8K
2733 (there will be no violation to FW restrictions) */
2734 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2735 /* Statistics of linearization */
2737 if (skb_linearize(skb) != 0) {
2738 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2739 "silently dropping this SKB\n");
2740 dev_kfree_skb_any(skb);
2741 return NETDEV_TX_OK;
2745 /* Map skb linear data for DMA */
2746 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2747 skb_headlen(skb), DMA_TO_DEVICE);
2748 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2749 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2750 "silently dropping this SKB\n");
2751 dev_kfree_skb_any(skb);
2752 return NETDEV_TX_OK;
2755 Please read carefully. First we use one BD which we mark as start,
2756 then we have a parsing info BD (used for TSO or xsum),
2757 and only then we have the rest of the TSO BDs.
2758 (don't forget to mark the last one as last,
2759 and to unmap only AFTER you write to the BD ...)
2760 And above all, all pdb sizes are in words - NOT DWORDS!
2763 /* get current pkt produced now - advance it just before sending packet
2764 * since mapping of pages may fail and cause packet to be dropped
2766 pkt_prod = txdata->tx_pkt_prod;
2767 bd_prod = TX_BD(txdata->tx_bd_prod);
2769 /* get a tx_buf and first BD
2770 * tx_start_bd may be changed during SPLIT,
2771 * but first_bd will always stay first
2773 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2774 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
2775 first_bd = tx_start_bd;
2777 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2778 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2782 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2784 /* remember the first BD of the packet */
2785 tx_buf->first_bd = txdata->tx_bd_prod;
2789 DP(NETIF_MSG_TX_QUEUED,
2790 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2791 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
2793 if (vlan_tx_tag_present(skb)) {
2794 tx_start_bd->vlan_or_ethertype =
2795 cpu_to_le16(vlan_tx_tag_get(skb));
2796 tx_start_bd->bd_flags.as_bitfield |=
2797 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2799 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2801 /* turn on parsing and get a BD */
2802 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2804 if (xmit_type & XMIT_CSUM)
2805 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
2807 if (!CHIP_IS_E1x(bp)) {
2808 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
2809 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2810 /* Set PBD in checksum offload case */
2811 if (xmit_type & XMIT_CSUM)
2812 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2813 &pbd_e2_parsing_data,
2817 * fill in the MAC addresses in the PBD - for local
2820 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2821 &pbd_e2->src_mac_addr_mid,
2822 &pbd_e2->src_mac_addr_lo,
2824 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2825 &pbd_e2->dst_mac_addr_mid,
2826 &pbd_e2->dst_mac_addr_lo,
2830 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
2831 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2832 /* Set PBD in checksum offload case */
2833 if (xmit_type & XMIT_CSUM)
2834 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2838 /* Setup the data pointer of the first BD of the packet */
2839 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2840 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2841 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
2842 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2843 pkt_size = tx_start_bd->nbytes;
2845 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2846 " nbytes %d flags %x vlan %x\n",
2847 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2848 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2849 tx_start_bd->bd_flags.as_bitfield,
2850 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2852 if (xmit_type & XMIT_GSO) {
2854 DP(NETIF_MSG_TX_QUEUED,
2855 "TSO packet len %d hlen %d total len %d tso size %d\n",
2856 skb->len, hlen, skb_headlen(skb),
2857 skb_shinfo(skb)->gso_size);
2859 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2861 if (unlikely(skb_headlen(skb) > hlen))
2862 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2865 if (!CHIP_IS_E1x(bp))
2866 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2869 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2872 /* Set the PBD's parsing_data field if not zero
2873 * (for the chips newer than 57711).
2875 if (pbd_e2_parsing_data)
2876 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2878 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2880 /* Handle fragmented skb */
2881 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2882 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2884 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
2885 skb_frag_size(frag), DMA_TO_DEVICE);
2886 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2887 unsigned int pkts_compl = 0, bytes_compl = 0;
2889 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2890 "dropping packet...\n");
2892 /* we need unmap all buffers already mapped
2894 * first_bd->nbd need to be properly updated
2895 * before call to bnx2x_free_tx_pkt
2897 first_bd->nbd = cpu_to_le16(nbd);
2898 bnx2x_free_tx_pkt(bp, txdata,
2899 TX_BD(txdata->tx_pkt_prod),
2900 &pkts_compl, &bytes_compl);
2901 return NETDEV_TX_OK;
2904 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2905 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2906 if (total_pkt_bd == NULL)
2907 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2909 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2910 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2911 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
2912 le16_add_cpu(&pkt_size, skb_frag_size(frag));
2915 DP(NETIF_MSG_TX_QUEUED,
2916 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2917 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2918 le16_to_cpu(tx_data_bd->nbytes));
2921 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2923 /* update with actual num BDs */
2924 first_bd->nbd = cpu_to_le16(nbd);
2926 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2928 /* now send a tx doorbell, counting the next BD
2929 * if the packet contains or ends with it
2931 if (TX_BD_POFF(bd_prod) < nbd)
2934 /* total_pkt_bytes should be set on the first data BD if
2935 * it's not an LSO packet and there is more than one
2936 * data BD. In this case pkt_size is limited by an MTU value.
2937 * However we prefer to set it for an LSO packet (while we don't
2938 * have to) in order to save some CPU cycles in a none-LSO
2939 * case, when we much more care about them.
2941 if (total_pkt_bd != NULL)
2942 total_pkt_bd->total_pkt_bytes = pkt_size;
2945 DP(NETIF_MSG_TX_QUEUED,
2946 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2947 " tcp_flags %x xsum %x seq %u hlen %u\n",
2948 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2949 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2950 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2951 le16_to_cpu(pbd_e1x->total_hlen_w));
2953 DP(NETIF_MSG_TX_QUEUED,
2954 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2955 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2956 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2957 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2958 pbd_e2->parsing_data);
2959 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2961 netdev_tx_sent_queue(txq, skb->len);
2963 txdata->tx_pkt_prod++;
2965 * Make sure that the BD data is updated before updating the producer
2966 * since FW might read the BD right after the producer is updated.
2967 * This is only applicable for weak-ordered memory model archs such
2968 * as IA-64. The following barrier is also mandatory since FW will
2969 * assumes packets must have BDs.
2973 txdata->tx_db.data.prod += nbd;
2976 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
2980 txdata->tx_bd_prod += nbd;
2982 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
2983 netif_tx_stop_queue(txq);
2985 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2986 * ordering of set_bit() in netif_tx_stop_queue() and read of
2990 fp->eth_q_stats.driver_xoff++;
2991 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
2992 netif_tx_wake_queue(txq);
2996 return NETDEV_TX_OK;
3000 * bnx2x_setup_tc - routine to configure net_device for multi tc
3002 * @netdev: net device to configure
3003 * @tc: number of traffic classes to enable
3005 * callback connected to the ndo_setup_tc function pointer
3007 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3009 int cos, prio, count, offset;
3010 struct bnx2x *bp = netdev_priv(dev);
3012 /* setup tc must be called under rtnl lock */
3015 /* no traffic classes requested. aborting */
3017 netdev_reset_tc(dev);
3021 /* requested to support too many traffic classes */
3022 if (num_tc > bp->max_cos) {
3023 DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
3024 " requested: %d. max supported is %d\n",
3025 num_tc, bp->max_cos);
3029 /* declare amount of supported traffic classes */
3030 if (netdev_set_num_tc(dev, num_tc)) {
3031 DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n",
3036 /* configure priority to traffic class mapping */
3037 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3038 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3039 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n",
3040 prio, bp->prio_to_cos[prio]);
3044 /* Use this configuration to diffrentiate tc0 from other COSes
3045 This can be used for ets or pfc, and save the effort of setting
3046 up a multio class queue disc or negotiating DCBX with a switch
3047 netdev_set_prio_tc_map(dev, 0, 0);
3048 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3049 for (prio = 1; prio < 16; prio++) {
3050 netdev_set_prio_tc_map(dev, prio, 1);
3051 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3054 /* configure traffic class to transmission queue mapping */
3055 for (cos = 0; cos < bp->max_cos; cos++) {
3056 count = BNX2X_NUM_ETH_QUEUES(bp);
3057 offset = cos * MAX_TXQS_PER_COS;
3058 netdev_set_tc_queue(dev, cos, count, offset);
3059 DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n",
3060 cos, offset, count);
3066 /* called with rtnl_lock */
3067 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3069 struct sockaddr *addr = p;
3070 struct bnx2x *bp = netdev_priv(dev);
3073 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data))
3077 if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data))
3081 if (netif_running(dev)) {
3082 rc = bnx2x_set_eth_mac(bp, false);
3087 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
3088 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3090 if (netif_running(dev))
3091 rc = bnx2x_set_eth_mac(bp, true);
3096 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3098 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3099 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3104 if (IS_FCOE_IDX(fp_index)) {
3105 memset(sb, 0, sizeof(union host_hc_status_block));
3106 fp->status_blk_mapping = 0;
3111 if (!CHIP_IS_E1x(bp))
3112 BNX2X_PCI_FREE(sb->e2_sb,
3113 bnx2x_fp(bp, fp_index,
3114 status_blk_mapping),
3115 sizeof(struct host_hc_status_block_e2));
3117 BNX2X_PCI_FREE(sb->e1x_sb,
3118 bnx2x_fp(bp, fp_index,
3119 status_blk_mapping),
3120 sizeof(struct host_hc_status_block_e1x));
3125 if (!skip_rx_queue(bp, fp_index)) {
3126 bnx2x_free_rx_bds(fp);
3128 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3129 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3130 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3131 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3132 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3134 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3135 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3136 sizeof(struct eth_fast_path_rx_cqe) *
3140 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3141 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3142 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3143 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3147 if (!skip_tx_queue(bp, fp_index)) {
3148 /* fastpath tx rings: tx_buf tx_desc */
3149 for_each_cos_in_tx_queue(fp, cos) {
3150 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3153 "freeing tx memory of fp %d cos %d cid %d\n",
3154 fp_index, cos, txdata->cid);
3156 BNX2X_FREE(txdata->tx_buf_ring);
3157 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3158 txdata->tx_desc_mapping,
3159 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3162 /* end of fastpath */
3165 void bnx2x_free_fp_mem(struct bnx2x *bp)
3168 for_each_queue(bp, i)
3169 bnx2x_free_fp_mem_at(bp, i);
3172 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3174 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3175 if (!CHIP_IS_E1x(bp)) {
3176 bnx2x_fp(bp, index, sb_index_values) =
3177 (__le16 *)status_blk.e2_sb->sb.index_values;
3178 bnx2x_fp(bp, index, sb_running_index) =
3179 (__le16 *)status_blk.e2_sb->sb.running_index;
3181 bnx2x_fp(bp, index, sb_index_values) =
3182 (__le16 *)status_blk.e1x_sb->sb.index_values;
3183 bnx2x_fp(bp, index, sb_running_index) =
3184 (__le16 *)status_blk.e1x_sb->sb.running_index;
3188 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3190 union host_hc_status_block *sb;
3191 struct bnx2x_fastpath *fp = &bp->fp[index];
3194 int rx_ring_size = 0;
3197 if (!bp->rx_ring_size && IS_MF_ISCSI_SD(bp)) {
3198 rx_ring_size = MIN_RX_SIZE_NONTPA;
3199 bp->rx_ring_size = rx_ring_size;
3202 if (!bp->rx_ring_size) {
3203 u32 cfg = SHMEM_RD(bp,
3204 dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
3206 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3208 /* Dercease ring size for 1G functions */
3209 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3210 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3213 /* allocate at least number of buffers required by FW */
3214 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3215 MIN_RX_SIZE_TPA, rx_ring_size);
3217 bp->rx_ring_size = rx_ring_size;
3218 } else /* if rx_ring_size specified - use it */
3219 rx_ring_size = bp->rx_ring_size;
3222 sb = &bnx2x_fp(bp, index, status_blk);
3224 if (!IS_FCOE_IDX(index)) {
3227 if (!CHIP_IS_E1x(bp))
3228 BNX2X_PCI_ALLOC(sb->e2_sb,
3229 &bnx2x_fp(bp, index, status_blk_mapping),
3230 sizeof(struct host_hc_status_block_e2));
3232 BNX2X_PCI_ALLOC(sb->e1x_sb,
3233 &bnx2x_fp(bp, index, status_blk_mapping),
3234 sizeof(struct host_hc_status_block_e1x));
3239 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3240 * set shortcuts for it.
3242 if (!IS_FCOE_IDX(index))
3243 set_sb_shortcuts(bp, index);
3246 if (!skip_tx_queue(bp, index)) {
3247 /* fastpath tx rings: tx_buf tx_desc */
3248 for_each_cos_in_tx_queue(fp, cos) {
3249 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3251 DP(BNX2X_MSG_SP, "allocating tx memory of "
3255 BNX2X_ALLOC(txdata->tx_buf_ring,
3256 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3257 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3258 &txdata->tx_desc_mapping,
3259 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3264 if (!skip_rx_queue(bp, index)) {
3265 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3266 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3267 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3268 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3269 &bnx2x_fp(bp, index, rx_desc_mapping),
3270 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3272 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3273 &bnx2x_fp(bp, index, rx_comp_mapping),
3274 sizeof(struct eth_fast_path_rx_cqe) *
3278 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3279 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3280 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3281 &bnx2x_fp(bp, index, rx_sge_mapping),
3282 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3284 bnx2x_set_next_page_rx_bd(fp);
3287 bnx2x_set_next_page_rx_cq(fp);
3290 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3291 if (ring_size < rx_ring_size)
3297 /* handles low memory cases */
3299 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3301 /* FW will drop all packets if queue is not big enough,
3302 * In these cases we disable the queue
3303 * Min size is different for OOO, TPA and non-TPA queues
3305 if (ring_size < (fp->disable_tpa ?
3306 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3307 /* release memory allocated for this queue */
3308 bnx2x_free_fp_mem_at(bp, index);
3314 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3319 * 1. Allocate FP for leading - fatal if error
3320 * 2. {CNIC} Allocate FCoE FP - fatal if error
3321 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3322 * 4. Allocate RSS - fix number of queues if error
3326 if (bnx2x_alloc_fp_mem_at(bp, 0))
3332 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3333 /* we will fail load process instead of mark
3340 for_each_nondefault_eth_queue(bp, i)
3341 if (bnx2x_alloc_fp_mem_at(bp, i))
3344 /* handle memory failures */
3345 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3346 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3351 * move non eth FPs next to last eth FP
3352 * must be done in that order
3353 * FCOE_IDX < FWD_IDX < OOO_IDX
3356 /* move FCoE fp even NO_FCOE_FLAG is on */
3357 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3359 bp->num_queues -= delta;
3360 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3361 bp->num_queues + delta, bp->num_queues);
3367 void bnx2x_free_mem_bp(struct bnx2x *bp)
3370 kfree(bp->msix_table);
3374 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3376 struct bnx2x_fastpath *fp;
3377 struct msix_entry *tbl;
3378 struct bnx2x_ilt *ilt;
3379 int msix_table_size = 0;
3382 * The biggest MSI-X table we might need is as a maximum number of fast
3383 * path IGU SBs plus default SB (for PF).
3385 msix_table_size = bp->igu_sb_cnt + 1;
3387 /* fp array: RSS plus CNIC related L2 queues */
3388 fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
3389 sizeof(*fp), GFP_KERNEL);
3395 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3398 bp->msix_table = tbl;
3401 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3408 bnx2x_free_mem_bp(bp);
3413 int bnx2x_reload_if_running(struct net_device *dev)
3415 struct bnx2x *bp = netdev_priv(dev);
3417 if (unlikely(!netif_running(dev)))
3420 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3421 return bnx2x_nic_load(bp, LOAD_NORMAL);
3424 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3426 u32 sel_phy_idx = 0;
3427 if (bp->link_params.num_phys <= 1)
3430 if (bp->link_vars.link_up) {
3431 sel_phy_idx = EXT_PHY1;
3432 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3433 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3434 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3435 sel_phy_idx = EXT_PHY2;
3438 switch (bnx2x_phy_selection(&bp->link_params)) {
3439 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3440 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3441 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3442 sel_phy_idx = EXT_PHY1;
3444 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3445 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3446 sel_phy_idx = EXT_PHY2;
3454 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3456 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3458 * The selected actived PHY is always after swapping (in case PHY
3459 * swapping is enabled). So when swapping is enabled, we need to reverse
3463 if (bp->link_params.multi_phy_config &
3464 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3465 if (sel_phy_idx == EXT_PHY1)
3466 sel_phy_idx = EXT_PHY2;
3467 else if (sel_phy_idx == EXT_PHY2)
3468 sel_phy_idx = EXT_PHY1;
3470 return LINK_CONFIG_IDX(sel_phy_idx);
3473 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3474 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3476 struct bnx2x *bp = netdev_priv(dev);
3477 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3480 case NETDEV_FCOE_WWNN:
3481 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3482 cp->fcoe_wwn_node_name_lo);
3484 case NETDEV_FCOE_WWPN:
3485 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3486 cp->fcoe_wwn_port_name_lo);
3496 /* called with rtnl_lock */
3497 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3499 struct bnx2x *bp = netdev_priv(dev);
3501 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3502 netdev_err(dev, "Handling parity error recovery. Try again later\n");
3506 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3507 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3510 /* This does not race with packet allocation
3511 * because the actual alloc size is
3512 * only updated as part of load
3516 bp->gro_check = bnx2x_need_gro_check(new_mtu);
3518 return bnx2x_reload_if_running(dev);
3521 netdev_features_t bnx2x_fix_features(struct net_device *dev,
3522 netdev_features_t features)
3524 struct bnx2x *bp = netdev_priv(dev);
3526 /* TPA requires Rx CSUM offloading */
3527 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
3528 features &= ~NETIF_F_LRO;
3529 features &= ~NETIF_F_GRO;
3535 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
3537 struct bnx2x *bp = netdev_priv(dev);
3538 u32 flags = bp->flags;
3539 bool bnx2x_reload = false;
3541 if (features & NETIF_F_LRO)
3542 flags |= TPA_ENABLE_FLAG;
3544 flags &= ~TPA_ENABLE_FLAG;
3546 if (features & NETIF_F_GRO)
3547 flags |= GRO_ENABLE_FLAG;
3549 flags &= ~GRO_ENABLE_FLAG;
3551 if (features & NETIF_F_LOOPBACK) {
3552 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3553 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3554 bnx2x_reload = true;
3557 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3558 bp->link_params.loopback_mode = LOOPBACK_NONE;
3559 bnx2x_reload = true;
3563 if (flags ^ bp->flags) {
3565 bnx2x_reload = true;
3569 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3570 return bnx2x_reload_if_running(dev);
3571 /* else: bnx2x_nic_load() will be called at end of recovery */
3577 void bnx2x_tx_timeout(struct net_device *dev)
3579 struct bnx2x *bp = netdev_priv(dev);
3581 #ifdef BNX2X_STOP_ON_ERROR
3586 smp_mb__before_clear_bit();
3587 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3588 smp_mb__after_clear_bit();
3590 /* This allows the netif to be shutdown gracefully before resetting */
3591 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3594 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3596 struct net_device *dev = pci_get_drvdata(pdev);
3600 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3603 bp = netdev_priv(dev);
3607 pci_save_state(pdev);
3609 if (!netif_running(dev)) {
3614 netif_device_detach(dev);
3616 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3618 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3625 int bnx2x_resume(struct pci_dev *pdev)
3627 struct net_device *dev = pci_get_drvdata(pdev);
3632 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3635 bp = netdev_priv(dev);
3637 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3638 netdev_err(dev, "Handling parity error recovery. Try again later\n");
3644 pci_restore_state(pdev);
3646 if (!netif_running(dev)) {
3651 bnx2x_set_power_state(bp, PCI_D0);
3652 netif_device_attach(dev);
3654 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3662 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3665 /* ustorm cxt validation */
3666 cxt->ustorm_ag_context.cdu_usage =
3667 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3668 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3669 /* xcontext validation */
3670 cxt->xstorm_ag_context.cdu_reserved =
3671 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3672 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3675 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3676 u8 fw_sb_id, u8 sb_index,
3680 u32 addr = BAR_CSTRORM_INTMEM +
3681 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3682 REG_WR8(bp, addr, ticks);
3683 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3684 port, fw_sb_id, sb_index, ticks);
3687 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3688 u16 fw_sb_id, u8 sb_index,
3691 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3692 u32 addr = BAR_CSTRORM_INTMEM +
3693 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3694 u16 flags = REG_RD16(bp, addr);
3696 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3697 flags |= enable_flag;
3698 REG_WR16(bp, addr, flags);
3699 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3700 port, fw_sb_id, sb_index, disable);
3703 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3704 u8 sb_index, u8 disable, u16 usec)
3706 int port = BP_PORT(bp);
3707 u8 ticks = usec / BNX2X_BTR;
3709 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3711 disable = disable ? 1 : (usec ? 0 : 1);
3712 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);