Merge tag 'mmc-fixes-for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc
[cascardo/linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2012 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
23 #include <linux/ip.h>
24 #include <net/ipv6.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/prefetch.h>
27 #include "bnx2x_cmn.h"
28 #include "bnx2x_init.h"
29 #include "bnx2x_sp.h"
30
31
32
33 /**
34  * bnx2x_move_fp - move content of the fastpath structure.
35  *
36  * @bp:         driver handle
37  * @from:       source FP index
38  * @to:         destination FP index
39  *
40  * Makes sure the contents of the bp->fp[to].napi is kept
41  * intact. This is done by first copying the napi struct from
42  * the target to the source, and then mem copying the entire
43  * source onto the target. Update txdata pointers and related
44  * content.
45  */
46 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47 {
48         struct bnx2x_fastpath *from_fp = &bp->fp[from];
49         struct bnx2x_fastpath *to_fp = &bp->fp[to];
50         struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51         struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52         struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53         struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54         int old_max_eth_txqs, new_max_eth_txqs;
55         int old_txdata_index = 0, new_txdata_index = 0;
56
57         /* Copy the NAPI object as it has been already initialized */
58         from_fp->napi = to_fp->napi;
59
60         /* Move bnx2x_fastpath contents */
61         memcpy(to_fp, from_fp, sizeof(*to_fp));
62         to_fp->index = to;
63
64         /* move sp_objs contents as well, as their indices match fp ones */
65         memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
66
67         /* move fp_stats contents as well, as their indices match fp ones */
68         memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
69
70         /* Update txdata pointers in fp and move txdata content accordingly:
71          * Each fp consumes 'max_cos' txdata structures, so the index should be
72          * decremented by max_cos x delta.
73          */
74
75         old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76         new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
77                                 (bp)->max_cos;
78         if (from == FCOE_IDX(bp)) {
79                 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80                 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
81         }
82
83         memcpy(&bp->bnx2x_txq[old_txdata_index],
84                &bp->bnx2x_txq[new_txdata_index],
85                sizeof(struct bnx2x_fp_txdata));
86         to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
87 }
88
89 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
90
91 /* free skb in the packet ring at pos idx
92  * return idx of last bd freed
93  */
94 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
95                              u16 idx, unsigned int *pkts_compl,
96                              unsigned int *bytes_compl)
97 {
98         struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
99         struct eth_tx_start_bd *tx_start_bd;
100         struct eth_tx_bd *tx_data_bd;
101         struct sk_buff *skb = tx_buf->skb;
102         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
103         int nbd;
104
105         /* prefetch skb end pointer to speedup dev_kfree_skb() */
106         prefetch(&skb->end);
107
108         DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
109            txdata->txq_index, idx, tx_buf, skb);
110
111         /* unmap first bd */
112         tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
113         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
114                          BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
115
116
117         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
118 #ifdef BNX2X_STOP_ON_ERROR
119         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
120                 BNX2X_ERR("BAD nbd!\n");
121                 bnx2x_panic();
122         }
123 #endif
124         new_cons = nbd + tx_buf->first_bd;
125
126         /* Get the next bd */
127         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
128
129         /* Skip a parse bd... */
130         --nbd;
131         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
132
133         /* ...and the TSO split header bd since they have no mapping */
134         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
135                 --nbd;
136                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
137         }
138
139         /* now free frags */
140         while (nbd > 0) {
141
142                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
143                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
144                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
145                 if (--nbd)
146                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
147         }
148
149         /* release skb */
150         WARN_ON(!skb);
151         if (likely(skb)) {
152                 (*pkts_compl)++;
153                 (*bytes_compl) += skb->len;
154         }
155
156         dev_kfree_skb_any(skb);
157         tx_buf->first_bd = 0;
158         tx_buf->skb = NULL;
159
160         return new_cons;
161 }
162
163 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
164 {
165         struct netdev_queue *txq;
166         u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
167         unsigned int pkts_compl = 0, bytes_compl = 0;
168
169 #ifdef BNX2X_STOP_ON_ERROR
170         if (unlikely(bp->panic))
171                 return -1;
172 #endif
173
174         txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
175         hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
176         sw_cons = txdata->tx_pkt_cons;
177
178         while (sw_cons != hw_cons) {
179                 u16 pkt_cons;
180
181                 pkt_cons = TX_BD(sw_cons);
182
183                 DP(NETIF_MSG_TX_DONE,
184                    "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
185                    txdata->txq_index, hw_cons, sw_cons, pkt_cons);
186
187                 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
188                     &pkts_compl, &bytes_compl);
189
190                 sw_cons++;
191         }
192
193         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
194
195         txdata->tx_pkt_cons = sw_cons;
196         txdata->tx_bd_cons = bd_cons;
197
198         /* Need to make the tx_bd_cons update visible to start_xmit()
199          * before checking for netif_tx_queue_stopped().  Without the
200          * memory barrier, there is a small possibility that
201          * start_xmit() will miss it and cause the queue to be stopped
202          * forever.
203          * On the other hand we need an rmb() here to ensure the proper
204          * ordering of bit testing in the following
205          * netif_tx_queue_stopped(txq) call.
206          */
207         smp_mb();
208
209         if (unlikely(netif_tx_queue_stopped(txq))) {
210                 /* Taking tx_lock() is needed to prevent reenabling the queue
211                  * while it's empty. This could have happen if rx_action() gets
212                  * suspended in bnx2x_tx_int() after the condition before
213                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
214                  *
215                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
216                  * sends some packets consuming the whole queue again->
217                  * stops the queue
218                  */
219
220                 __netif_tx_lock(txq, smp_processor_id());
221
222                 if ((netif_tx_queue_stopped(txq)) &&
223                     (bp->state == BNX2X_STATE_OPEN) &&
224                     (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
225                         netif_tx_wake_queue(txq);
226
227                 __netif_tx_unlock(txq);
228         }
229         return 0;
230 }
231
232 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
233                                              u16 idx)
234 {
235         u16 last_max = fp->last_max_sge;
236
237         if (SUB_S16(idx, last_max) > 0)
238                 fp->last_max_sge = idx;
239 }
240
241 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
242                                          u16 sge_len,
243                                          struct eth_end_agg_rx_cqe *cqe)
244 {
245         struct bnx2x *bp = fp->bp;
246         u16 last_max, last_elem, first_elem;
247         u16 delta = 0;
248         u16 i;
249
250         if (!sge_len)
251                 return;
252
253         /* First mark all used pages */
254         for (i = 0; i < sge_len; i++)
255                 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
256                         RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
257
258         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
259            sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
260
261         /* Here we assume that the last SGE index is the biggest */
262         prefetch((void *)(fp->sge_mask));
263         bnx2x_update_last_max_sge(fp,
264                 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
265
266         last_max = RX_SGE(fp->last_max_sge);
267         last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
268         first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
269
270         /* If ring is not full */
271         if (last_elem + 1 != first_elem)
272                 last_elem++;
273
274         /* Now update the prod */
275         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
276                 if (likely(fp->sge_mask[i]))
277                         break;
278
279                 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
280                 delta += BIT_VEC64_ELEM_SZ;
281         }
282
283         if (delta > 0) {
284                 fp->rx_sge_prod += delta;
285                 /* clear page-end entries */
286                 bnx2x_clear_sge_mask_next_elems(fp);
287         }
288
289         DP(NETIF_MSG_RX_STATUS,
290            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
291            fp->last_max_sge, fp->rx_sge_prod);
292 }
293
294 /* Set Toeplitz hash value in the skb using the value from the
295  * CQE (calculated by HW).
296  */
297 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
298                             const struct eth_fast_path_rx_cqe *cqe,
299                             bool *l4_rxhash)
300 {
301         /* Set Toeplitz hash from CQE */
302         if ((bp->dev->features & NETIF_F_RXHASH) &&
303             (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
304                 enum eth_rss_hash_type htype;
305
306                 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
307                 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
308                              (htype == TCP_IPV6_HASH_TYPE);
309                 return le32_to_cpu(cqe->rss_hash_result);
310         }
311         *l4_rxhash = false;
312         return 0;
313 }
314
315 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
316                             u16 cons, u16 prod,
317                             struct eth_fast_path_rx_cqe *cqe)
318 {
319         struct bnx2x *bp = fp->bp;
320         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
321         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
322         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
323         dma_addr_t mapping;
324         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
325         struct sw_rx_bd *first_buf = &tpa_info->first_buf;
326
327         /* print error if current state != stop */
328         if (tpa_info->tpa_state != BNX2X_TPA_STOP)
329                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
330
331         /* Try to map an empty data buffer from the aggregation info  */
332         mapping = dma_map_single(&bp->pdev->dev,
333                                  first_buf->data + NET_SKB_PAD,
334                                  fp->rx_buf_size, DMA_FROM_DEVICE);
335         /*
336          *  ...if it fails - move the skb from the consumer to the producer
337          *  and set the current aggregation state as ERROR to drop it
338          *  when TPA_STOP arrives.
339          */
340
341         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
342                 /* Move the BD from the consumer to the producer */
343                 bnx2x_reuse_rx_data(fp, cons, prod);
344                 tpa_info->tpa_state = BNX2X_TPA_ERROR;
345                 return;
346         }
347
348         /* move empty data from pool to prod */
349         prod_rx_buf->data = first_buf->data;
350         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
351         /* point prod_bd to new data */
352         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
353         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
354
355         /* move partial skb from cons to pool (don't unmap yet) */
356         *first_buf = *cons_rx_buf;
357
358         /* mark bin state as START */
359         tpa_info->parsing_flags =
360                 le16_to_cpu(cqe->pars_flags.flags);
361         tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
362         tpa_info->tpa_state = BNX2X_TPA_START;
363         tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
364         tpa_info->placement_offset = cqe->placement_offset;
365         tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
366         if (fp->mode == TPA_MODE_GRO) {
367                 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
368                 tpa_info->full_page =
369                         SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
370                 tpa_info->gro_size = gro_size;
371         }
372
373 #ifdef BNX2X_STOP_ON_ERROR
374         fp->tpa_queue_used |= (1 << queue);
375 #ifdef _ASM_GENERIC_INT_L64_H
376         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
377 #else
378         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
379 #endif
380            fp->tpa_queue_used);
381 #endif
382 }
383
384 /* Timestamp option length allowed for TPA aggregation:
385  *
386  *              nop nop kind length echo val
387  */
388 #define TPA_TSTAMP_OPT_LEN      12
389 /**
390  * bnx2x_set_lro_mss - calculate the approximate value of the MSS
391  *
392  * @bp:                 driver handle
393  * @parsing_flags:      parsing flags from the START CQE
394  * @len_on_bd:          total length of the first packet for the
395  *                      aggregation.
396  *
397  * Approximate value of the MSS for this aggregation calculated using
398  * the first packet of it.
399  */
400 static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
401                              u16 len_on_bd)
402 {
403         /*
404          * TPA arrgregation won't have either IP options or TCP options
405          * other than timestamp or IPv6 extension headers.
406          */
407         u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
408
409         if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
410             PRS_FLAG_OVERETH_IPV6)
411                 hdrs_len += sizeof(struct ipv6hdr);
412         else /* IPv4 */
413                 hdrs_len += sizeof(struct iphdr);
414
415
416         /* Check if there was a TCP timestamp, if there is it's will
417          * always be 12 bytes length: nop nop kind length echo val.
418          *
419          * Otherwise FW would close the aggregation.
420          */
421         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
422                 hdrs_len += TPA_TSTAMP_OPT_LEN;
423
424         return len_on_bd - hdrs_len;
425 }
426
427 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
428                               struct bnx2x_fastpath *fp, u16 index)
429 {
430         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
431         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
432         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
433         dma_addr_t mapping;
434
435         if (unlikely(page == NULL)) {
436                 BNX2X_ERR("Can't alloc sge\n");
437                 return -ENOMEM;
438         }
439
440         mapping = dma_map_page(&bp->pdev->dev, page, 0,
441                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
442         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
443                 __free_pages(page, PAGES_PER_SGE_SHIFT);
444                 BNX2X_ERR("Can't map sge\n");
445                 return -ENOMEM;
446         }
447
448         sw_buf->page = page;
449         dma_unmap_addr_set(sw_buf, mapping, mapping);
450
451         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
452         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
453
454         return 0;
455 }
456
457 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
458                                struct bnx2x_agg_info *tpa_info,
459                                u16 pages,
460                                struct sk_buff *skb,
461                                struct eth_end_agg_rx_cqe *cqe,
462                                u16 cqe_idx)
463 {
464         struct sw_rx_page *rx_pg, old_rx_pg;
465         u32 i, frag_len, frag_size;
466         int err, j, frag_id = 0;
467         u16 len_on_bd = tpa_info->len_on_bd;
468         u16 full_page = 0, gro_size = 0;
469
470         frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
471
472         if (fp->mode == TPA_MODE_GRO) {
473                 gro_size = tpa_info->gro_size;
474                 full_page = tpa_info->full_page;
475         }
476
477         /* This is needed in order to enable forwarding support */
478         if (frag_size) {
479                 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
480                                         tpa_info->parsing_flags, len_on_bd);
481
482                 /* set for GRO */
483                 if (fp->mode == TPA_MODE_GRO)
484                         skb_shinfo(skb)->gso_type =
485                             (GET_FLAG(tpa_info->parsing_flags,
486                                       PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
487                                                 PRS_FLAG_OVERETH_IPV6) ?
488                                 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
489         }
490
491
492 #ifdef BNX2X_STOP_ON_ERROR
493         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
494                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
495                           pages, cqe_idx);
496                 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
497                 bnx2x_panic();
498                 return -EINVAL;
499         }
500 #endif
501
502         /* Run through the SGL and compose the fragmented skb */
503         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
504                 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
505
506                 /* FW gives the indices of the SGE as if the ring is an array
507                    (meaning that "next" element will consume 2 indices) */
508                 if (fp->mode == TPA_MODE_GRO)
509                         frag_len = min_t(u32, frag_size, (u32)full_page);
510                 else /* LRO */
511                         frag_len = min_t(u32, frag_size,
512                                          (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
513
514                 rx_pg = &fp->rx_page_ring[sge_idx];
515                 old_rx_pg = *rx_pg;
516
517                 /* If we fail to allocate a substitute page, we simply stop
518                    where we are and drop the whole packet */
519                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
520                 if (unlikely(err)) {
521                         bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
522                         return err;
523                 }
524
525                 /* Unmap the page as we r going to pass it to the stack */
526                 dma_unmap_page(&bp->pdev->dev,
527                                dma_unmap_addr(&old_rx_pg, mapping),
528                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
529                 /* Add one frag and update the appropriate fields in the skb */
530                 if (fp->mode == TPA_MODE_LRO)
531                         skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
532                 else { /* GRO */
533                         int rem;
534                         int offset = 0;
535                         for (rem = frag_len; rem > 0; rem -= gro_size) {
536                                 int len = rem > gro_size ? gro_size : rem;
537                                 skb_fill_page_desc(skb, frag_id++,
538                                                    old_rx_pg.page, offset, len);
539                                 if (offset)
540                                         get_page(old_rx_pg.page);
541                                 offset += len;
542                         }
543                 }
544
545                 skb->data_len += frag_len;
546                 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
547                 skb->len += frag_len;
548
549                 frag_size -= frag_len;
550         }
551
552         return 0;
553 }
554
555 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
556                            struct bnx2x_agg_info *tpa_info,
557                            u16 pages,
558                            struct eth_end_agg_rx_cqe *cqe,
559                            u16 cqe_idx)
560 {
561         struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
562         u8 pad = tpa_info->placement_offset;
563         u16 len = tpa_info->len_on_bd;
564         struct sk_buff *skb = NULL;
565         u8 *new_data, *data = rx_buf->data;
566         u8 old_tpa_state = tpa_info->tpa_state;
567
568         tpa_info->tpa_state = BNX2X_TPA_STOP;
569
570         /* If we there was an error during the handling of the TPA_START -
571          * drop this aggregation.
572          */
573         if (old_tpa_state == BNX2X_TPA_ERROR)
574                 goto drop;
575
576         /* Try to allocate the new data */
577         new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
578
579         /* Unmap skb in the pool anyway, as we are going to change
580            pool entry status to BNX2X_TPA_STOP even if new skb allocation
581            fails. */
582         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
583                          fp->rx_buf_size, DMA_FROM_DEVICE);
584         if (likely(new_data))
585                 skb = build_skb(data, 0);
586
587         if (likely(skb)) {
588 #ifdef BNX2X_STOP_ON_ERROR
589                 if (pad + len > fp->rx_buf_size) {
590                         BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
591                                   pad, len, fp->rx_buf_size);
592                         bnx2x_panic();
593                         return;
594                 }
595 #endif
596
597                 skb_reserve(skb, pad + NET_SKB_PAD);
598                 skb_put(skb, len);
599                 skb->rxhash = tpa_info->rxhash;
600                 skb->l4_rxhash = tpa_info->l4_rxhash;
601
602                 skb->protocol = eth_type_trans(skb, bp->dev);
603                 skb->ip_summed = CHECKSUM_UNNECESSARY;
604
605                 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
606                                          skb, cqe, cqe_idx)) {
607                         if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
608                                 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
609                         napi_gro_receive(&fp->napi, skb);
610                 } else {
611                         DP(NETIF_MSG_RX_STATUS,
612                            "Failed to allocate new pages - dropping packet!\n");
613                         dev_kfree_skb_any(skb);
614                 }
615
616
617                 /* put new data in bin */
618                 rx_buf->data = new_data;
619
620                 return;
621         }
622         kfree(new_data);
623 drop:
624         /* drop the packet and keep the buffer in the bin */
625         DP(NETIF_MSG_RX_STATUS,
626            "Failed to allocate or map a new skb - dropping packet!\n");
627         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
628 }
629
630 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
631                                struct bnx2x_fastpath *fp, u16 index)
632 {
633         u8 *data;
634         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
635         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
636         dma_addr_t mapping;
637
638         data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
639         if (unlikely(data == NULL))
640                 return -ENOMEM;
641
642         mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
643                                  fp->rx_buf_size,
644                                  DMA_FROM_DEVICE);
645         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
646                 kfree(data);
647                 BNX2X_ERR("Can't map rx data\n");
648                 return -ENOMEM;
649         }
650
651         rx_buf->data = data;
652         dma_unmap_addr_set(rx_buf, mapping, mapping);
653
654         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
655         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
656
657         return 0;
658 }
659
660 static
661 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
662                                  struct bnx2x_fastpath *fp,
663                                  struct bnx2x_eth_q_stats *qstats)
664 {
665         /* Do nothing if no L4 csum validation was done.
666          * We do not check whether IP csum was validated. For IPv4 we assume
667          * that if the card got as far as validating the L4 csum, it also
668          * validated the IP csum. IPv6 has no IP csum.
669          */
670         if (cqe->fast_path_cqe.status_flags &
671             ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
672                 return;
673
674         /* If L4 validation was done, check if an error was found. */
675
676         if (cqe->fast_path_cqe.type_error_flags &
677             (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
678              ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
679                 qstats->hw_csum_err++;
680         else
681                 skb->ip_summed = CHECKSUM_UNNECESSARY;
682 }
683
684 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
685 {
686         struct bnx2x *bp = fp->bp;
687         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
688         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
689         int rx_pkt = 0;
690
691 #ifdef BNX2X_STOP_ON_ERROR
692         if (unlikely(bp->panic))
693                 return 0;
694 #endif
695
696         /* CQ "next element" is of the size of the regular element,
697            that's why it's ok here */
698         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
699         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
700                 hw_comp_cons++;
701
702         bd_cons = fp->rx_bd_cons;
703         bd_prod = fp->rx_bd_prod;
704         bd_prod_fw = bd_prod;
705         sw_comp_cons = fp->rx_comp_cons;
706         sw_comp_prod = fp->rx_comp_prod;
707
708         /* Memory barrier necessary as speculative reads of the rx
709          * buffer can be ahead of the index in the status block
710          */
711         rmb();
712
713         DP(NETIF_MSG_RX_STATUS,
714            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
715            fp->index, hw_comp_cons, sw_comp_cons);
716
717         while (sw_comp_cons != hw_comp_cons) {
718                 struct sw_rx_bd *rx_buf = NULL;
719                 struct sk_buff *skb;
720                 union eth_rx_cqe *cqe;
721                 struct eth_fast_path_rx_cqe *cqe_fp;
722                 u8 cqe_fp_flags;
723                 enum eth_rx_cqe_type cqe_fp_type;
724                 u16 len, pad, queue;
725                 u8 *data;
726                 bool l4_rxhash;
727
728 #ifdef BNX2X_STOP_ON_ERROR
729                 if (unlikely(bp->panic))
730                         return 0;
731 #endif
732
733                 comp_ring_cons = RCQ_BD(sw_comp_cons);
734                 bd_prod = RX_BD(bd_prod);
735                 bd_cons = RX_BD(bd_cons);
736
737                 cqe = &fp->rx_comp_ring[comp_ring_cons];
738                 cqe_fp = &cqe->fast_path_cqe;
739                 cqe_fp_flags = cqe_fp->type_error_flags;
740                 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
741
742                 DP(NETIF_MSG_RX_STATUS,
743                    "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
744                    CQE_TYPE(cqe_fp_flags),
745                    cqe_fp_flags, cqe_fp->status_flags,
746                    le32_to_cpu(cqe_fp->rss_hash_result),
747                    le16_to_cpu(cqe_fp->vlan_tag),
748                    le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
749
750                 /* is this a slowpath msg? */
751                 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
752                         bnx2x_sp_event(fp, cqe);
753                         goto next_cqe;
754                 }
755
756                 rx_buf = &fp->rx_buf_ring[bd_cons];
757                 data = rx_buf->data;
758
759                 if (!CQE_TYPE_FAST(cqe_fp_type)) {
760                         struct bnx2x_agg_info *tpa_info;
761                         u16 frag_size, pages;
762 #ifdef BNX2X_STOP_ON_ERROR
763                         /* sanity check */
764                         if (fp->disable_tpa &&
765                             (CQE_TYPE_START(cqe_fp_type) ||
766                              CQE_TYPE_STOP(cqe_fp_type)))
767                                 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
768                                           CQE_TYPE(cqe_fp_type));
769 #endif
770
771                         if (CQE_TYPE_START(cqe_fp_type)) {
772                                 u16 queue = cqe_fp->queue_index;
773                                 DP(NETIF_MSG_RX_STATUS,
774                                    "calling tpa_start on queue %d\n",
775                                    queue);
776
777                                 bnx2x_tpa_start(fp, queue,
778                                                 bd_cons, bd_prod,
779                                                 cqe_fp);
780
781                                 goto next_rx;
782
783                         }
784                         queue = cqe->end_agg_cqe.queue_index;
785                         tpa_info = &fp->tpa_info[queue];
786                         DP(NETIF_MSG_RX_STATUS,
787                            "calling tpa_stop on queue %d\n",
788                            queue);
789
790                         frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
791                                     tpa_info->len_on_bd;
792
793                         if (fp->mode == TPA_MODE_GRO)
794                                 pages = (frag_size + tpa_info->full_page - 1) /
795                                          tpa_info->full_page;
796                         else
797                                 pages = SGE_PAGE_ALIGN(frag_size) >>
798                                         SGE_PAGE_SHIFT;
799
800                         bnx2x_tpa_stop(bp, fp, tpa_info, pages,
801                                        &cqe->end_agg_cqe, comp_ring_cons);
802 #ifdef BNX2X_STOP_ON_ERROR
803                         if (bp->panic)
804                                 return 0;
805 #endif
806
807                         bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
808                         goto next_cqe;
809                 }
810                 /* non TPA */
811                 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
812                 pad = cqe_fp->placement_offset;
813                 dma_sync_single_for_cpu(&bp->pdev->dev,
814                                         dma_unmap_addr(rx_buf, mapping),
815                                         pad + RX_COPY_THRESH,
816                                         DMA_FROM_DEVICE);
817                 pad += NET_SKB_PAD;
818                 prefetch(data + pad); /* speedup eth_type_trans() */
819                 /* is this an error packet? */
820                 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
821                         DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
822                            "ERROR  flags %x  rx packet %u\n",
823                            cqe_fp_flags, sw_comp_cons);
824                         bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
825                         goto reuse_rx;
826                 }
827
828                 /* Since we don't have a jumbo ring
829                  * copy small packets if mtu > 1500
830                  */
831                 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
832                     (len <= RX_COPY_THRESH)) {
833                         skb = netdev_alloc_skb_ip_align(bp->dev, len);
834                         if (skb == NULL) {
835                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
836                                    "ERROR  packet dropped because of alloc failure\n");
837                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
838                                 goto reuse_rx;
839                         }
840                         memcpy(skb->data, data + pad, len);
841                         bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
842                 } else {
843                         if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
844                                 dma_unmap_single(&bp->pdev->dev,
845                                                  dma_unmap_addr(rx_buf, mapping),
846                                                  fp->rx_buf_size,
847                                                  DMA_FROM_DEVICE);
848                                 skb = build_skb(data, 0);
849                                 if (unlikely(!skb)) {
850                                         kfree(data);
851                                         bnx2x_fp_qstats(bp, fp)->
852                                                         rx_skb_alloc_failed++;
853                                         goto next_rx;
854                                 }
855                                 skb_reserve(skb, pad);
856                         } else {
857                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
858                                    "ERROR  packet dropped because of alloc failure\n");
859                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
860 reuse_rx:
861                                 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
862                                 goto next_rx;
863                         }
864                 }
865
866                 skb_put(skb, len);
867                 skb->protocol = eth_type_trans(skb, bp->dev);
868
869                 /* Set Toeplitz hash for a none-LRO skb */
870                 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
871                 skb->l4_rxhash = l4_rxhash;
872
873                 skb_checksum_none_assert(skb);
874
875                 if (bp->dev->features & NETIF_F_RXCSUM)
876                         bnx2x_csum_validate(skb, cqe, fp,
877                                             bnx2x_fp_qstats(bp, fp));
878
879                 skb_record_rx_queue(skb, fp->rx_queue);
880
881                 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
882                     PARSING_FLAGS_VLAN)
883                         __vlan_hwaccel_put_tag(skb,
884                                                le16_to_cpu(cqe_fp->vlan_tag));
885                 napi_gro_receive(&fp->napi, skb);
886
887
888 next_rx:
889                 rx_buf->data = NULL;
890
891                 bd_cons = NEXT_RX_IDX(bd_cons);
892                 bd_prod = NEXT_RX_IDX(bd_prod);
893                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
894                 rx_pkt++;
895 next_cqe:
896                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
897                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
898
899                 if (rx_pkt == budget)
900                         break;
901         } /* while */
902
903         fp->rx_bd_cons = bd_cons;
904         fp->rx_bd_prod = bd_prod_fw;
905         fp->rx_comp_cons = sw_comp_cons;
906         fp->rx_comp_prod = sw_comp_prod;
907
908         /* Update producers */
909         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
910                              fp->rx_sge_prod);
911
912         fp->rx_pkt += rx_pkt;
913         fp->rx_calls++;
914
915         return rx_pkt;
916 }
917
918 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
919 {
920         struct bnx2x_fastpath *fp = fp_cookie;
921         struct bnx2x *bp = fp->bp;
922         u8 cos;
923
924         DP(NETIF_MSG_INTR,
925            "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
926            fp->index, fp->fw_sb_id, fp->igu_sb_id);
927         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
928
929 #ifdef BNX2X_STOP_ON_ERROR
930         if (unlikely(bp->panic))
931                 return IRQ_HANDLED;
932 #endif
933
934         /* Handle Rx and Tx according to MSI-X vector */
935         prefetch(fp->rx_cons_sb);
936
937         for_each_cos_in_tx_queue(fp, cos)
938                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
939
940         prefetch(&fp->sb_running_index[SM_RX_ID]);
941         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
942
943         return IRQ_HANDLED;
944 }
945
946 /* HW Lock for shared dual port PHYs */
947 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
948 {
949         mutex_lock(&bp->port.phy_mutex);
950
951         if (bp->port.need_hw_lock)
952                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
953 }
954
955 void bnx2x_release_phy_lock(struct bnx2x *bp)
956 {
957         if (bp->port.need_hw_lock)
958                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
959
960         mutex_unlock(&bp->port.phy_mutex);
961 }
962
963 /* calculates MF speed according to current linespeed and MF configuration */
964 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
965 {
966         u16 line_speed = bp->link_vars.line_speed;
967         if (IS_MF(bp)) {
968                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
969                                                    bp->mf_config[BP_VN(bp)]);
970
971                 /* Calculate the current MAX line speed limit for the MF
972                  * devices
973                  */
974                 if (IS_MF_SI(bp))
975                         line_speed = (line_speed * maxCfg) / 100;
976                 else { /* SD mode */
977                         u16 vn_max_rate = maxCfg * 100;
978
979                         if (vn_max_rate < line_speed)
980                                 line_speed = vn_max_rate;
981                 }
982         }
983
984         return line_speed;
985 }
986
987 /**
988  * bnx2x_fill_report_data - fill link report data to report
989  *
990  * @bp:         driver handle
991  * @data:       link state to update
992  *
993  * It uses a none-atomic bit operations because is called under the mutex.
994  */
995 static void bnx2x_fill_report_data(struct bnx2x *bp,
996                                    struct bnx2x_link_report_data *data)
997 {
998         u16 line_speed = bnx2x_get_mf_speed(bp);
999
1000         memset(data, 0, sizeof(*data));
1001
1002         /* Fill the report data: efective line speed */
1003         data->line_speed = line_speed;
1004
1005         /* Link is down */
1006         if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1007                 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1008                           &data->link_report_flags);
1009
1010         /* Full DUPLEX */
1011         if (bp->link_vars.duplex == DUPLEX_FULL)
1012                 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1013
1014         /* Rx Flow Control is ON */
1015         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1016                 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1017
1018         /* Tx Flow Control is ON */
1019         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1020                 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1021 }
1022
1023 /**
1024  * bnx2x_link_report - report link status to OS.
1025  *
1026  * @bp:         driver handle
1027  *
1028  * Calls the __bnx2x_link_report() under the same locking scheme
1029  * as a link/PHY state managing code to ensure a consistent link
1030  * reporting.
1031  */
1032
1033 void bnx2x_link_report(struct bnx2x *bp)
1034 {
1035         bnx2x_acquire_phy_lock(bp);
1036         __bnx2x_link_report(bp);
1037         bnx2x_release_phy_lock(bp);
1038 }
1039
1040 /**
1041  * __bnx2x_link_report - report link status to OS.
1042  *
1043  * @bp:         driver handle
1044  *
1045  * None atomic inmlementation.
1046  * Should be called under the phy_lock.
1047  */
1048 void __bnx2x_link_report(struct bnx2x *bp)
1049 {
1050         struct bnx2x_link_report_data cur_data;
1051
1052         /* reread mf_cfg */
1053         if (!CHIP_IS_E1(bp))
1054                 bnx2x_read_mf_cfg(bp);
1055
1056         /* Read the current link report info */
1057         bnx2x_fill_report_data(bp, &cur_data);
1058
1059         /* Don't report link down or exactly the same link status twice */
1060         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1061             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1062                       &bp->last_reported_link.link_report_flags) &&
1063              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1064                       &cur_data.link_report_flags)))
1065                 return;
1066
1067         bp->link_cnt++;
1068
1069         /* We are going to report a new link parameters now -
1070          * remember the current data for the next time.
1071          */
1072         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1073
1074         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1075                      &cur_data.link_report_flags)) {
1076                 netif_carrier_off(bp->dev);
1077                 netdev_err(bp->dev, "NIC Link is Down\n");
1078                 return;
1079         } else {
1080                 const char *duplex;
1081                 const char *flow;
1082
1083                 netif_carrier_on(bp->dev);
1084
1085                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1086                                        &cur_data.link_report_flags))
1087                         duplex = "full";
1088                 else
1089                         duplex = "half";
1090
1091                 /* Handle the FC at the end so that only these flags would be
1092                  * possibly set. This way we may easily check if there is no FC
1093                  * enabled.
1094                  */
1095                 if (cur_data.link_report_flags) {
1096                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1097                                      &cur_data.link_report_flags)) {
1098                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1099                                      &cur_data.link_report_flags))
1100                                         flow = "ON - receive & transmit";
1101                                 else
1102                                         flow = "ON - receive";
1103                         } else {
1104                                 flow = "ON - transmit";
1105                         }
1106                 } else {
1107                         flow = "none";
1108                 }
1109                 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1110                             cur_data.line_speed, duplex, flow);
1111         }
1112 }
1113
1114 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1115 {
1116         int i;
1117
1118         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1119                 struct eth_rx_sge *sge;
1120
1121                 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1122                 sge->addr_hi =
1123                         cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1124                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1125
1126                 sge->addr_lo =
1127                         cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1128                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1129         }
1130 }
1131
1132 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1133                                 struct bnx2x_fastpath *fp, int last)
1134 {
1135         int i;
1136
1137         for (i = 0; i < last; i++) {
1138                 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1139                 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1140                 u8 *data = first_buf->data;
1141
1142                 if (data == NULL) {
1143                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1144                         continue;
1145                 }
1146                 if (tpa_info->tpa_state == BNX2X_TPA_START)
1147                         dma_unmap_single(&bp->pdev->dev,
1148                                          dma_unmap_addr(first_buf, mapping),
1149                                          fp->rx_buf_size, DMA_FROM_DEVICE);
1150                 kfree(data);
1151                 first_buf->data = NULL;
1152         }
1153 }
1154
1155 void bnx2x_init_rx_rings(struct bnx2x *bp)
1156 {
1157         int func = BP_FUNC(bp);
1158         u16 ring_prod;
1159         int i, j;
1160
1161         /* Allocate TPA resources */
1162         for_each_rx_queue(bp, j) {
1163                 struct bnx2x_fastpath *fp = &bp->fp[j];
1164
1165                 DP(NETIF_MSG_IFUP,
1166                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1167
1168                 if (!fp->disable_tpa) {
1169                         /* Fill the per-aggregtion pool */
1170                         for (i = 0; i < MAX_AGG_QS(bp); i++) {
1171                                 struct bnx2x_agg_info *tpa_info =
1172                                         &fp->tpa_info[i];
1173                                 struct sw_rx_bd *first_buf =
1174                                         &tpa_info->first_buf;
1175
1176                                 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1177                                                           GFP_ATOMIC);
1178                                 if (!first_buf->data) {
1179                                         BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1180                                                   j);
1181                                         bnx2x_free_tpa_pool(bp, fp, i);
1182                                         fp->disable_tpa = 1;
1183                                         break;
1184                                 }
1185                                 dma_unmap_addr_set(first_buf, mapping, 0);
1186                                 tpa_info->tpa_state = BNX2X_TPA_STOP;
1187                         }
1188
1189                         /* "next page" elements initialization */
1190                         bnx2x_set_next_page_sgl(fp);
1191
1192                         /* set SGEs bit mask */
1193                         bnx2x_init_sge_ring_bit_mask(fp);
1194
1195                         /* Allocate SGEs and initialize the ring elements */
1196                         for (i = 0, ring_prod = 0;
1197                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1198
1199                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1200                                         BNX2X_ERR("was only able to allocate %d rx sges\n",
1201                                                   i);
1202                                         BNX2X_ERR("disabling TPA for queue[%d]\n",
1203                                                   j);
1204                                         /* Cleanup already allocated elements */
1205                                         bnx2x_free_rx_sge_range(bp, fp,
1206                                                                 ring_prod);
1207                                         bnx2x_free_tpa_pool(bp, fp,
1208                                                             MAX_AGG_QS(bp));
1209                                         fp->disable_tpa = 1;
1210                                         ring_prod = 0;
1211                                         break;
1212                                 }
1213                                 ring_prod = NEXT_SGE_IDX(ring_prod);
1214                         }
1215
1216                         fp->rx_sge_prod = ring_prod;
1217                 }
1218         }
1219
1220         for_each_rx_queue(bp, j) {
1221                 struct bnx2x_fastpath *fp = &bp->fp[j];
1222
1223                 fp->rx_bd_cons = 0;
1224
1225                 /* Activate BD ring */
1226                 /* Warning!
1227                  * this will generate an interrupt (to the TSTORM)
1228                  * must only be done after chip is initialized
1229                  */
1230                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1231                                      fp->rx_sge_prod);
1232
1233                 if (j != 0)
1234                         continue;
1235
1236                 if (CHIP_IS_E1(bp)) {
1237                         REG_WR(bp, BAR_USTRORM_INTMEM +
1238                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1239                                U64_LO(fp->rx_comp_mapping));
1240                         REG_WR(bp, BAR_USTRORM_INTMEM +
1241                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1242                                U64_HI(fp->rx_comp_mapping));
1243                 }
1244         }
1245 }
1246
1247 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1248 {
1249         int i;
1250         u8 cos;
1251
1252         for_each_tx_queue(bp, i) {
1253                 struct bnx2x_fastpath *fp = &bp->fp[i];
1254                 for_each_cos_in_tx_queue(fp, cos) {
1255                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1256                         unsigned pkts_compl = 0, bytes_compl = 0;
1257
1258                         u16 sw_prod = txdata->tx_pkt_prod;
1259                         u16 sw_cons = txdata->tx_pkt_cons;
1260
1261                         while (sw_cons != sw_prod) {
1262                                 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1263                                     &pkts_compl, &bytes_compl);
1264                                 sw_cons++;
1265                         }
1266                         netdev_tx_reset_queue(
1267                                 netdev_get_tx_queue(bp->dev,
1268                                                     txdata->txq_index));
1269                 }
1270         }
1271 }
1272
1273 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1274 {
1275         struct bnx2x *bp = fp->bp;
1276         int i;
1277
1278         /* ring wasn't allocated */
1279         if (fp->rx_buf_ring == NULL)
1280                 return;
1281
1282         for (i = 0; i < NUM_RX_BD; i++) {
1283                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1284                 u8 *data = rx_buf->data;
1285
1286                 if (data == NULL)
1287                         continue;
1288                 dma_unmap_single(&bp->pdev->dev,
1289                                  dma_unmap_addr(rx_buf, mapping),
1290                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1291
1292                 rx_buf->data = NULL;
1293                 kfree(data);
1294         }
1295 }
1296
1297 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1298 {
1299         int j;
1300
1301         for_each_rx_queue(bp, j) {
1302                 struct bnx2x_fastpath *fp = &bp->fp[j];
1303
1304                 bnx2x_free_rx_bds(fp);
1305
1306                 if (!fp->disable_tpa)
1307                         bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1308         }
1309 }
1310
1311 void bnx2x_free_skbs(struct bnx2x *bp)
1312 {
1313         bnx2x_free_tx_skbs(bp);
1314         bnx2x_free_rx_skbs(bp);
1315 }
1316
1317 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1318 {
1319         /* load old values */
1320         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1321
1322         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1323                 /* leave all but MAX value */
1324                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1325
1326                 /* set new MAX value */
1327                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1328                                 & FUNC_MF_CFG_MAX_BW_MASK;
1329
1330                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1331         }
1332 }
1333
1334 /**
1335  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1336  *
1337  * @bp:         driver handle
1338  * @nvecs:      number of vectors to be released
1339  */
1340 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1341 {
1342         int i, offset = 0;
1343
1344         if (nvecs == offset)
1345                 return;
1346         free_irq(bp->msix_table[offset].vector, bp->dev);
1347         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1348            bp->msix_table[offset].vector);
1349         offset++;
1350 #ifdef BCM_CNIC
1351         if (nvecs == offset)
1352                 return;
1353         offset++;
1354 #endif
1355
1356         for_each_eth_queue(bp, i) {
1357                 if (nvecs == offset)
1358                         return;
1359                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1360                    i, bp->msix_table[offset].vector);
1361
1362                 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1363         }
1364 }
1365
1366 void bnx2x_free_irq(struct bnx2x *bp)
1367 {
1368         if (bp->flags & USING_MSIX_FLAG &&
1369             !(bp->flags & USING_SINGLE_MSIX_FLAG))
1370                 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1371                                      CNIC_PRESENT + 1);
1372         else
1373                 free_irq(bp->dev->irq, bp->dev);
1374 }
1375
1376 int bnx2x_enable_msix(struct bnx2x *bp)
1377 {
1378         int msix_vec = 0, i, rc, req_cnt;
1379
1380         bp->msix_table[msix_vec].entry = msix_vec;
1381         BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1382            bp->msix_table[0].entry);
1383         msix_vec++;
1384
1385 #ifdef BCM_CNIC
1386         bp->msix_table[msix_vec].entry = msix_vec;
1387         BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1388            bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1389         msix_vec++;
1390 #endif
1391         /* We need separate vectors for ETH queues only (not FCoE) */
1392         for_each_eth_queue(bp, i) {
1393                 bp->msix_table[msix_vec].entry = msix_vec;
1394                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1395                                msix_vec, msix_vec, i);
1396                 msix_vec++;
1397         }
1398
1399         req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
1400
1401         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1402
1403         /*
1404          * reconfigure number of tx/rx queues according to available
1405          * MSI-X vectors
1406          */
1407         if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1408                 /* how less vectors we will have? */
1409                 int diff = req_cnt - rc;
1410
1411                 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1412
1413                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1414
1415                 if (rc) {
1416                         BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1417                         goto no_msix;
1418                 }
1419                 /*
1420                  * decrease number of queues by number of unallocated entries
1421                  */
1422                 bp->num_queues -= diff;
1423
1424                 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1425                                bp->num_queues);
1426         } else if (rc > 0) {
1427                 /* Get by with single vector */
1428                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1429                 if (rc) {
1430                         BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1431                                        rc);
1432                         goto no_msix;
1433                 }
1434
1435                 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1436                 bp->flags |= USING_SINGLE_MSIX_FLAG;
1437
1438         } else if (rc < 0) {
1439                 BNX2X_DEV_INFO("MSI-X is not attainable  rc %d\n", rc);
1440                 goto no_msix;
1441         }
1442
1443         bp->flags |= USING_MSIX_FLAG;
1444
1445         return 0;
1446
1447 no_msix:
1448         /* fall to INTx if not enough memory */
1449         if (rc == -ENOMEM)
1450                 bp->flags |= DISABLE_MSI_FLAG;
1451
1452         return rc;
1453 }
1454
1455 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1456 {
1457         int i, rc, offset = 0;
1458
1459         rc = request_irq(bp->msix_table[offset++].vector,
1460                          bnx2x_msix_sp_int, 0,
1461                          bp->dev->name, bp->dev);
1462         if (rc) {
1463                 BNX2X_ERR("request sp irq failed\n");
1464                 return -EBUSY;
1465         }
1466
1467 #ifdef BCM_CNIC
1468         offset++;
1469 #endif
1470         for_each_eth_queue(bp, i) {
1471                 struct bnx2x_fastpath *fp = &bp->fp[i];
1472                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1473                          bp->dev->name, i);
1474
1475                 rc = request_irq(bp->msix_table[offset].vector,
1476                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1477                 if (rc) {
1478                         BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1479                               bp->msix_table[offset].vector, rc);
1480                         bnx2x_free_msix_irqs(bp, offset);
1481                         return -EBUSY;
1482                 }
1483
1484                 offset++;
1485         }
1486
1487         i = BNX2X_NUM_ETH_QUEUES(bp);
1488         offset = 1 + CNIC_PRESENT;
1489         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1490                bp->msix_table[0].vector,
1491                0, bp->msix_table[offset].vector,
1492                i - 1, bp->msix_table[offset + i - 1].vector);
1493
1494         return 0;
1495 }
1496
1497 int bnx2x_enable_msi(struct bnx2x *bp)
1498 {
1499         int rc;
1500
1501         rc = pci_enable_msi(bp->pdev);
1502         if (rc) {
1503                 BNX2X_DEV_INFO("MSI is not attainable\n");
1504                 return -1;
1505         }
1506         bp->flags |= USING_MSI_FLAG;
1507
1508         return 0;
1509 }
1510
1511 static int bnx2x_req_irq(struct bnx2x *bp)
1512 {
1513         unsigned long flags;
1514         unsigned int irq;
1515
1516         if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1517                 flags = 0;
1518         else
1519                 flags = IRQF_SHARED;
1520
1521         if (bp->flags & USING_MSIX_FLAG)
1522                 irq = bp->msix_table[0].vector;
1523         else
1524                 irq = bp->pdev->irq;
1525
1526         return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1527 }
1528
1529 static int bnx2x_setup_irqs(struct bnx2x *bp)
1530 {
1531         int rc = 0;
1532         if (bp->flags & USING_MSIX_FLAG &&
1533             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1534                 rc = bnx2x_req_msix_irqs(bp);
1535                 if (rc)
1536                         return rc;
1537         } else {
1538                 bnx2x_ack_int(bp);
1539                 rc = bnx2x_req_irq(bp);
1540                 if (rc) {
1541                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1542                         return rc;
1543                 }
1544                 if (bp->flags & USING_MSI_FLAG) {
1545                         bp->dev->irq = bp->pdev->irq;
1546                         netdev_info(bp->dev, "using MSI IRQ %d\n",
1547                                     bp->dev->irq);
1548                 }
1549                 if (bp->flags & USING_MSIX_FLAG) {
1550                         bp->dev->irq = bp->msix_table[0].vector;
1551                         netdev_info(bp->dev, "using MSIX IRQ %d\n",
1552                                     bp->dev->irq);
1553                 }
1554         }
1555
1556         return 0;
1557 }
1558
1559 static void bnx2x_napi_enable(struct bnx2x *bp)
1560 {
1561         int i;
1562
1563         for_each_rx_queue(bp, i)
1564                 napi_enable(&bnx2x_fp(bp, i, napi));
1565 }
1566
1567 static void bnx2x_napi_disable(struct bnx2x *bp)
1568 {
1569         int i;
1570
1571         for_each_rx_queue(bp, i)
1572                 napi_disable(&bnx2x_fp(bp, i, napi));
1573 }
1574
1575 void bnx2x_netif_start(struct bnx2x *bp)
1576 {
1577         if (netif_running(bp->dev)) {
1578                 bnx2x_napi_enable(bp);
1579                 bnx2x_int_enable(bp);
1580                 if (bp->state == BNX2X_STATE_OPEN)
1581                         netif_tx_wake_all_queues(bp->dev);
1582         }
1583 }
1584
1585 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1586 {
1587         bnx2x_int_disable_sync(bp, disable_hw);
1588         bnx2x_napi_disable(bp);
1589 }
1590
1591 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1592 {
1593         struct bnx2x *bp = netdev_priv(dev);
1594
1595 #ifdef BCM_CNIC
1596         if (!NO_FCOE(bp)) {
1597                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1598                 u16 ether_type = ntohs(hdr->h_proto);
1599
1600                 /* Skip VLAN tag if present */
1601                 if (ether_type == ETH_P_8021Q) {
1602                         struct vlan_ethhdr *vhdr =
1603                                 (struct vlan_ethhdr *)skb->data;
1604
1605                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1606                 }
1607
1608                 /* If ethertype is FCoE or FIP - use FCoE ring */
1609                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1610                         return bnx2x_fcoe_tx(bp, txq_index);
1611         }
1612 #endif
1613         /* select a non-FCoE queue */
1614         return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1615 }
1616
1617
1618 void bnx2x_set_num_queues(struct bnx2x *bp)
1619 {
1620         /* RSS queues */
1621         bp->num_queues = bnx2x_calc_num_queues(bp);
1622
1623 #ifdef BCM_CNIC
1624         /* override in STORAGE SD modes */
1625         if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1626                 bp->num_queues = 1;
1627 #endif
1628         /* Add special queues */
1629         bp->num_queues += NON_ETH_CONTEXT_USE;
1630
1631         BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1632 }
1633
1634 /**
1635  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1636  *
1637  * @bp:         Driver handle
1638  *
1639  * We currently support for at most 16 Tx queues for each CoS thus we will
1640  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1641  * bp->max_cos.
1642  *
1643  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1644  * index after all ETH L2 indices.
1645  *
1646  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1647  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1648  * 16..31,...) with indicies that are not coupled with any real Tx queue.
1649  *
1650  * The proper configuration of skb->queue_mapping is handled by
1651  * bnx2x_select_queue() and __skb_tx_hash().
1652  *
1653  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1654  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1655  */
1656 static int bnx2x_set_real_num_queues(struct bnx2x *bp)
1657 {
1658         int rc, tx, rx;
1659
1660         tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1661         rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE;
1662
1663 /* account for fcoe queue */
1664 #ifdef BCM_CNIC
1665         if (!NO_FCOE(bp)) {
1666                 rx += FCOE_PRESENT;
1667                 tx += FCOE_PRESENT;
1668         }
1669 #endif
1670
1671         rc = netif_set_real_num_tx_queues(bp->dev, tx);
1672         if (rc) {
1673                 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1674                 return rc;
1675         }
1676         rc = netif_set_real_num_rx_queues(bp->dev, rx);
1677         if (rc) {
1678                 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1679                 return rc;
1680         }
1681
1682         DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1683                           tx, rx);
1684
1685         return rc;
1686 }
1687
1688 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1689 {
1690         int i;
1691
1692         for_each_queue(bp, i) {
1693                 struct bnx2x_fastpath *fp = &bp->fp[i];
1694                 u32 mtu;
1695
1696                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1697                 if (IS_FCOE_IDX(i))
1698                         /*
1699                          * Although there are no IP frames expected to arrive to
1700                          * this ring we still want to add an
1701                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1702                          * overrun attack.
1703                          */
1704                         mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1705                 else
1706                         mtu = bp->dev->mtu;
1707                 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1708                                   IP_HEADER_ALIGNMENT_PADDING +
1709                                   ETH_OVREHEAD +
1710                                   mtu +
1711                                   BNX2X_FW_RX_ALIGN_END;
1712                 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1713         }
1714 }
1715
1716 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1717 {
1718         int i;
1719         u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1720
1721         /* Prepare the initial contents fo the indirection table if RSS is
1722          * enabled
1723          */
1724         for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1725                 bp->rss_conf_obj.ind_table[i] =
1726                         bp->fp->cl_id +
1727                         ethtool_rxfh_indir_default(i, num_eth_queues);
1728
1729         /*
1730          * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1731          * per-port, so if explicit configuration is needed , do it only
1732          * for a PMF.
1733          *
1734          * For 57712 and newer on the other hand it's a per-function
1735          * configuration.
1736          */
1737         return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1738 }
1739
1740 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1741                         bool config_hash)
1742 {
1743         struct bnx2x_config_rss_params params = {NULL};
1744         int i;
1745
1746         /* Although RSS is meaningless when there is a single HW queue we
1747          * still need it enabled in order to have HW Rx hash generated.
1748          *
1749          * if (!is_eth_multi(bp))
1750          *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
1751          */
1752
1753         params.rss_obj = rss_obj;
1754
1755         __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1756
1757         __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1758
1759         /* RSS configuration */
1760         __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1761         __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1762         __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1763         __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1764         if (rss_obj->udp_rss_v4)
1765                 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1766         if (rss_obj->udp_rss_v6)
1767                 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
1768
1769         /* Hash bits */
1770         params.rss_result_mask = MULTI_MASK;
1771
1772         memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
1773
1774         if (config_hash) {
1775                 /* RSS keys */
1776                 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1777                         params.rss_key[i] = random32();
1778
1779                 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1780         }
1781
1782         return bnx2x_config_rss(bp, &params);
1783 }
1784
1785 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1786 {
1787         struct bnx2x_func_state_params func_params = {NULL};
1788
1789         /* Prepare parameters for function state transitions */
1790         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1791
1792         func_params.f_obj = &bp->func_obj;
1793         func_params.cmd = BNX2X_F_CMD_HW_INIT;
1794
1795         func_params.params.hw_init.load_phase = load_code;
1796
1797         return bnx2x_func_state_change(bp, &func_params);
1798 }
1799
1800 /*
1801  * Cleans the object that have internal lists without sending
1802  * ramrods. Should be run when interrutps are disabled.
1803  */
1804 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1805 {
1806         int rc;
1807         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1808         struct bnx2x_mcast_ramrod_params rparam = {NULL};
1809         struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
1810
1811         /***************** Cleanup MACs' object first *************************/
1812
1813         /* Wait for completion of requested */
1814         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1815         /* Perform a dry cleanup */
1816         __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1817
1818         /* Clean ETH primary MAC */
1819         __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1820         rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
1821                                  &ramrod_flags);
1822         if (rc != 0)
1823                 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1824
1825         /* Cleanup UC list */
1826         vlan_mac_flags = 0;
1827         __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1828         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1829                                  &ramrod_flags);
1830         if (rc != 0)
1831                 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1832
1833         /***************** Now clean mcast object *****************************/
1834         rparam.mcast_obj = &bp->mcast_obj;
1835         __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1836
1837         /* Add a DEL command... */
1838         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1839         if (rc < 0)
1840                 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1841                           rc);
1842
1843         /* ...and wait until all pending commands are cleared */
1844         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1845         while (rc != 0) {
1846                 if (rc < 0) {
1847                         BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1848                                   rc);
1849                         return;
1850                 }
1851
1852                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1853         }
1854 }
1855
1856 #ifndef BNX2X_STOP_ON_ERROR
1857 #define LOAD_ERROR_EXIT(bp, label) \
1858         do { \
1859                 (bp)->state = BNX2X_STATE_ERROR; \
1860                 goto label; \
1861         } while (0)
1862 #else
1863 #define LOAD_ERROR_EXIT(bp, label) \
1864         do { \
1865                 (bp)->state = BNX2X_STATE_ERROR; \
1866                 (bp)->panic = 1; \
1867                 return -EBUSY; \
1868         } while (0)
1869 #endif
1870
1871 bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1872 {
1873         /* build FW version dword */
1874         u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1875                     (BCM_5710_FW_MINOR_VERSION << 8) +
1876                     (BCM_5710_FW_REVISION_VERSION << 16) +
1877                     (BCM_5710_FW_ENGINEERING_VERSION << 24);
1878
1879         /* read loaded FW from chip */
1880         u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1881
1882         DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1883
1884         if (loaded_fw != my_fw) {
1885                 if (is_err)
1886                         BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1887                                   loaded_fw, my_fw);
1888                 return false;
1889         }
1890
1891         return true;
1892 }
1893
1894 /**
1895  * bnx2x_bz_fp - zero content of the fastpath structure.
1896  *
1897  * @bp:         driver handle
1898  * @index:      fastpath index to be zeroed
1899  *
1900  * Makes sure the contents of the bp->fp[index].napi is kept
1901  * intact.
1902  */
1903 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1904 {
1905         struct bnx2x_fastpath *fp = &bp->fp[index];
1906         struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
1907
1908         int cos;
1909         struct napi_struct orig_napi = fp->napi;
1910         struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
1911         /* bzero bnx2x_fastpath contents */
1912         if (bp->stats_init) {
1913                 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1914                 memset(fp, 0, sizeof(*fp));
1915         } else {
1916                 /* Keep Queue statistics */
1917                 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1918                 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1919
1920                 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1921                                           GFP_KERNEL);
1922                 if (tmp_eth_q_stats)
1923                         memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
1924                                sizeof(struct bnx2x_eth_q_stats));
1925
1926                 tmp_eth_q_stats_old =
1927                         kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1928                                 GFP_KERNEL);
1929                 if (tmp_eth_q_stats_old)
1930                         memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
1931                                sizeof(struct bnx2x_eth_q_stats_old));
1932
1933                 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1934                 memset(fp, 0, sizeof(*fp));
1935
1936                 if (tmp_eth_q_stats) {
1937                         memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
1938                                sizeof(struct bnx2x_eth_q_stats));
1939                         kfree(tmp_eth_q_stats);
1940                 }
1941
1942                 if (tmp_eth_q_stats_old) {
1943                         memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
1944                                sizeof(struct bnx2x_eth_q_stats_old));
1945                         kfree(tmp_eth_q_stats_old);
1946                 }
1947
1948         }
1949
1950         /* Restore the NAPI object as it has been already initialized */
1951         fp->napi = orig_napi;
1952         fp->tpa_info = orig_tpa_info;
1953         fp->bp = bp;
1954         fp->index = index;
1955         if (IS_ETH_FP(fp))
1956                 fp->max_cos = bp->max_cos;
1957         else
1958                 /* Special queues support only one CoS */
1959                 fp->max_cos = 1;
1960
1961         /* Init txdata pointers */
1962 #ifdef BCM_CNIC
1963         if (IS_FCOE_FP(fp))
1964                 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
1965 #endif
1966         if (IS_ETH_FP(fp))
1967                 for_each_cos_in_tx_queue(fp, cos)
1968                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
1969                                 BNX2X_NUM_ETH_QUEUES(bp) + index];
1970
1971         /*
1972          * set the tpa flag for each queue. The tpa flag determines the queue
1973          * minimal size so it must be set prior to queue memory allocation
1974          */
1975         fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1976                                   (bp->flags & GRO_ENABLE_FLAG &&
1977                                    bnx2x_mtu_allows_gro(bp->dev->mtu)));
1978         if (bp->flags & TPA_ENABLE_FLAG)
1979                 fp->mode = TPA_MODE_LRO;
1980         else if (bp->flags & GRO_ENABLE_FLAG)
1981                 fp->mode = TPA_MODE_GRO;
1982
1983 #ifdef BCM_CNIC
1984         /* We don't want TPA on an FCoE L2 ring */
1985         if (IS_FCOE_FP(fp))
1986                 fp->disable_tpa = 1;
1987 #endif
1988 }
1989
1990
1991 /* must be called with rtnl_lock */
1992 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1993 {
1994         int port = BP_PORT(bp);
1995         u32 load_code;
1996         int i, rc;
1997
1998 #ifdef BNX2X_STOP_ON_ERROR
1999         if (unlikely(bp->panic)) {
2000                 BNX2X_ERR("Can't load NIC when there is panic\n");
2001                 return -EPERM;
2002         }
2003 #endif
2004
2005         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2006
2007         /* Set the initial link reported state to link down */
2008         bnx2x_acquire_phy_lock(bp);
2009         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2010         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2011                 &bp->last_reported_link.link_report_flags);
2012         bnx2x_release_phy_lock(bp);
2013
2014         /* must be called before memory allocation and HW init */
2015         bnx2x_ilt_set_info(bp);
2016
2017         /*
2018          * Zero fastpath structures preserving invariants like napi, which are
2019          * allocated only once, fp index, max_cos, bp pointer.
2020          * Also set fp->disable_tpa and txdata_ptr.
2021          */
2022         DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2023         for_each_queue(bp, i)
2024                 bnx2x_bz_fp(bp, i);
2025         memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size *
2026                sizeof(struct bnx2x_fp_txdata));
2027
2028
2029         /* Set the receive queues buffer size */
2030         bnx2x_set_rx_buf_size(bp);
2031
2032         if (bnx2x_alloc_mem(bp))
2033                 return -ENOMEM;
2034
2035         /* As long as bnx2x_alloc_mem() may possibly update
2036          * bp->num_queues, bnx2x_set_real_num_queues() should always
2037          * come after it.
2038          */
2039         rc = bnx2x_set_real_num_queues(bp);
2040         if (rc) {
2041                 BNX2X_ERR("Unable to set real_num_queues\n");
2042                 LOAD_ERROR_EXIT(bp, load_error0);
2043         }
2044
2045         /* configure multi cos mappings in kernel.
2046          * this configuration may be overriden by a multi class queue discipline
2047          * or by a dcbx negotiation result.
2048          */
2049         bnx2x_setup_tc(bp->dev, bp->max_cos);
2050
2051         /* Add all NAPI objects */
2052         bnx2x_add_all_napi(bp);
2053         bnx2x_napi_enable(bp);
2054
2055         /* set pf load just before approaching the MCP */
2056         bnx2x_set_pf_load(bp);
2057
2058         /* Send LOAD_REQUEST command to MCP
2059          * Returns the type of LOAD command:
2060          * if it is the first port to be initialized
2061          * common blocks should be initialized, otherwise - not
2062          */
2063         if (!BP_NOMCP(bp)) {
2064                 /* init fw_seq */
2065                 bp->fw_seq =
2066                         (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2067                          DRV_MSG_SEQ_NUMBER_MASK);
2068                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2069
2070                 /* Get current FW pulse sequence */
2071                 bp->fw_drv_pulse_wr_seq =
2072                         (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2073                          DRV_PULSE_SEQ_MASK);
2074                 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2075
2076                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
2077                 if (!load_code) {
2078                         BNX2X_ERR("MCP response failure, aborting\n");
2079                         rc = -EBUSY;
2080                         LOAD_ERROR_EXIT(bp, load_error1);
2081                 }
2082                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2083                         BNX2X_ERR("Driver load refused\n");
2084                         rc = -EBUSY; /* other port in diagnostic mode */
2085                         LOAD_ERROR_EXIT(bp, load_error1);
2086                 }
2087                 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2088                     load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2089                         /* abort nic load if version mismatch */
2090                         if (!bnx2x_test_firmware_version(bp, true)) {
2091                                 rc = -EBUSY;
2092                                 LOAD_ERROR_EXIT(bp, load_error2);
2093                         }
2094                 }
2095
2096         } else {
2097                 int path = BP_PATH(bp);
2098
2099                 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2100                    path, load_count[path][0], load_count[path][1],
2101                    load_count[path][2]);
2102                 load_count[path][0]++;
2103                 load_count[path][1 + port]++;
2104                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2105                    path, load_count[path][0], load_count[path][1],
2106                    load_count[path][2]);
2107                 if (load_count[path][0] == 1)
2108                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
2109                 else if (load_count[path][1 + port] == 1)
2110                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2111                 else
2112                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2113         }
2114
2115         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2116             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2117             (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2118                 bp->port.pmf = 1;
2119                 /*
2120                  * We need the barrier to ensure the ordering between the
2121                  * writing to bp->port.pmf here and reading it from the
2122                  * bnx2x_periodic_task().
2123                  */
2124                 smp_mb();
2125         } else
2126                 bp->port.pmf = 0;
2127
2128         DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
2129
2130         /* Init Function state controlling object */
2131         bnx2x__init_func_obj(bp);
2132
2133         /* Initialize HW */
2134         rc = bnx2x_init_hw(bp, load_code);
2135         if (rc) {
2136                 BNX2X_ERR("HW init failed, aborting\n");
2137                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2138                 LOAD_ERROR_EXIT(bp, load_error2);
2139         }
2140
2141         /* Connect to IRQs */
2142         rc = bnx2x_setup_irqs(bp);
2143         if (rc) {
2144                 BNX2X_ERR("IRQs setup failed\n");
2145                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2146                 LOAD_ERROR_EXIT(bp, load_error2);
2147         }
2148
2149         /* Setup NIC internals and enable interrupts */
2150         bnx2x_nic_init(bp, load_code);
2151
2152         /* Init per-function objects */
2153         bnx2x_init_bp_objs(bp);
2154
2155         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2156             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2157             (bp->common.shmem2_base)) {
2158                 if (SHMEM2_HAS(bp, dcc_support))
2159                         SHMEM2_WR(bp, dcc_support,
2160                                   (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2161                                    SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2162                 if (SHMEM2_HAS(bp, afex_driver_support))
2163                         SHMEM2_WR(bp, afex_driver_support,
2164                                   SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2165         }
2166
2167         /* Set AFEX default VLAN tag to an invalid value */
2168         bp->afex_def_vlan_tag = -1;
2169
2170         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2171         rc = bnx2x_func_start(bp);
2172         if (rc) {
2173                 BNX2X_ERR("Function start failed!\n");
2174                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2175                 LOAD_ERROR_EXIT(bp, load_error3);
2176         }
2177
2178         /* Send LOAD_DONE command to MCP */
2179         if (!BP_NOMCP(bp)) {
2180                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2181                 if (!load_code) {
2182                         BNX2X_ERR("MCP response failure, aborting\n");
2183                         rc = -EBUSY;
2184                         LOAD_ERROR_EXIT(bp, load_error3);
2185                 }
2186         }
2187
2188         rc = bnx2x_setup_leading(bp);
2189         if (rc) {
2190                 BNX2X_ERR("Setup leading failed!\n");
2191                 LOAD_ERROR_EXIT(bp, load_error3);
2192         }
2193
2194 #ifdef BCM_CNIC
2195         /* Enable Timer scan */
2196         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2197 #endif
2198
2199         for_each_nondefault_queue(bp, i) {
2200                 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2201                 if (rc) {
2202                         BNX2X_ERR("Queue setup failed\n");
2203                         LOAD_ERROR_EXIT(bp, load_error4);
2204                 }
2205         }
2206
2207         rc = bnx2x_init_rss_pf(bp);
2208         if (rc) {
2209                 BNX2X_ERR("PF RSS init failed\n");
2210                 LOAD_ERROR_EXIT(bp, load_error4);
2211         }
2212
2213         /* Now when Clients are configured we are ready to work */
2214         bp->state = BNX2X_STATE_OPEN;
2215
2216         /* Configure a ucast MAC */
2217         rc = bnx2x_set_eth_mac(bp, true);
2218         if (rc) {
2219                 BNX2X_ERR("Setting Ethernet MAC failed\n");
2220                 LOAD_ERROR_EXIT(bp, load_error4);
2221         }
2222
2223         if (bp->pending_max) {
2224                 bnx2x_update_max_mf_config(bp, bp->pending_max);
2225                 bp->pending_max = 0;
2226         }
2227
2228         if (bp->port.pmf)
2229                 bnx2x_initial_phy_init(bp, load_mode);
2230
2231         /* Start fast path */
2232
2233         /* Initialize Rx filter. */
2234         netif_addr_lock_bh(bp->dev);
2235         bnx2x_set_rx_mode(bp->dev);
2236         netif_addr_unlock_bh(bp->dev);
2237
2238         /* Start the Tx */
2239         switch (load_mode) {
2240         case LOAD_NORMAL:
2241                 /* Tx queue should be only reenabled */
2242                 netif_tx_wake_all_queues(bp->dev);
2243                 break;
2244
2245         case LOAD_OPEN:
2246                 netif_tx_start_all_queues(bp->dev);
2247                 smp_mb__after_clear_bit();
2248                 break;
2249
2250         case LOAD_DIAG:
2251         case LOAD_LOOPBACK_EXT:
2252                 bp->state = BNX2X_STATE_DIAG;
2253                 break;
2254
2255         default:
2256                 break;
2257         }
2258
2259         if (bp->port.pmf)
2260                 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
2261         else
2262                 bnx2x__link_status_update(bp);
2263
2264         /* start the timer */
2265         mod_timer(&bp->timer, jiffies + bp->current_interval);
2266
2267 #ifdef BCM_CNIC
2268         /* re-read iscsi info */
2269         bnx2x_get_iscsi_info(bp);
2270         bnx2x_setup_cnic_irq_info(bp);
2271         bnx2x_setup_cnic_info(bp);
2272         if (bp->state == BNX2X_STATE_OPEN)
2273                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2274 #endif
2275
2276         /* mark driver is loaded in shmem2 */
2277         if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2278                 u32 val;
2279                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2280                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2281                           val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2282                           DRV_FLAGS_CAPABILITIES_LOADED_L2);
2283         }
2284
2285         /* Wait for all pending SP commands to complete */
2286         if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2287                 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2288                 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2289                 return -EBUSY;
2290         }
2291
2292         /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2293         if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2294                 bnx2x_dcbx_init(bp, false);
2295
2296         return 0;
2297
2298 #ifndef BNX2X_STOP_ON_ERROR
2299 load_error4:
2300 #ifdef BCM_CNIC
2301         /* Disable Timer scan */
2302         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2303 #endif
2304 load_error3:
2305         bnx2x_int_disable_sync(bp, 1);
2306
2307         /* Clean queueable objects */
2308         bnx2x_squeeze_objects(bp);
2309
2310         /* Free SKBs, SGEs, TPA pool and driver internals */
2311         bnx2x_free_skbs(bp);
2312         for_each_rx_queue(bp, i)
2313                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2314
2315         /* Release IRQs */
2316         bnx2x_free_irq(bp);
2317 load_error2:
2318         if (!BP_NOMCP(bp)) {
2319                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2320                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2321         }
2322
2323         bp->port.pmf = 0;
2324 load_error1:
2325         bnx2x_napi_disable(bp);
2326         /* clear pf_load status, as it was already set */
2327         bnx2x_clear_pf_load(bp);
2328 load_error0:
2329         bnx2x_free_mem(bp);
2330
2331         return rc;
2332 #endif /* ! BNX2X_STOP_ON_ERROR */
2333 }
2334
2335 /* must be called with rtnl_lock */
2336 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2337 {
2338         int i;
2339         bool global = false;
2340
2341         /* mark driver is unloaded in shmem2 */
2342         if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2343                 u32 val;
2344                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2345                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2346                           val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2347         }
2348
2349         if ((bp->state == BNX2X_STATE_CLOSED) ||
2350             (bp->state == BNX2X_STATE_ERROR)) {
2351                 /* We can get here if the driver has been unloaded
2352                  * during parity error recovery and is either waiting for a
2353                  * leader to complete or for other functions to unload and
2354                  * then ifdown has been issued. In this case we want to
2355                  * unload and let other functions to complete a recovery
2356                  * process.
2357                  */
2358                 bp->recovery_state = BNX2X_RECOVERY_DONE;
2359                 bp->is_leader = 0;
2360                 bnx2x_release_leader_lock(bp);
2361                 smp_mb();
2362
2363                 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2364                 BNX2X_ERR("Can't unload in closed or error state\n");
2365                 return -EINVAL;
2366         }
2367
2368         /*
2369          * It's important to set the bp->state to the value different from
2370          * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2371          * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2372          */
2373         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2374         smp_mb();
2375
2376         /* Stop Tx */
2377         bnx2x_tx_disable(bp);
2378         netdev_reset_tc(bp->dev);
2379
2380 #ifdef BCM_CNIC
2381         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2382 #endif
2383
2384         bp->rx_mode = BNX2X_RX_MODE_NONE;
2385
2386         del_timer_sync(&bp->timer);
2387
2388         /* Set ALWAYS_ALIVE bit in shmem */
2389         bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2390
2391         bnx2x_drv_pulse(bp);
2392
2393         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2394         bnx2x_save_statistics(bp);
2395
2396         /* Cleanup the chip if needed */
2397         if (unload_mode != UNLOAD_RECOVERY)
2398                 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2399         else {
2400                 /* Send the UNLOAD_REQUEST to the MCP */
2401                 bnx2x_send_unload_req(bp, unload_mode);
2402
2403                 /*
2404                  * Prevent transactions to host from the functions on the
2405                  * engine that doesn't reset global blocks in case of global
2406                  * attention once gloabl blocks are reset and gates are opened
2407                  * (the engine which leader will perform the recovery
2408                  * last).
2409                  */
2410                 if (!CHIP_IS_E1x(bp))
2411                         bnx2x_pf_disable(bp);
2412
2413                 /* Disable HW interrupts, NAPI */
2414                 bnx2x_netif_stop(bp, 1);
2415                 /* Delete all NAPI objects */
2416                 bnx2x_del_all_napi(bp);
2417
2418                 /* Release IRQs */
2419                 bnx2x_free_irq(bp);
2420
2421                 /* Report UNLOAD_DONE to MCP */
2422                 bnx2x_send_unload_done(bp, false);
2423         }
2424
2425         /*
2426          * At this stage no more interrupts will arrive so we may safly clean
2427          * the queueable objects here in case they failed to get cleaned so far.
2428          */
2429         bnx2x_squeeze_objects(bp);
2430
2431         /* There should be no more pending SP commands at this stage */
2432         bp->sp_state = 0;
2433
2434         bp->port.pmf = 0;
2435
2436         /* Free SKBs, SGEs, TPA pool and driver internals */
2437         bnx2x_free_skbs(bp);
2438         for_each_rx_queue(bp, i)
2439                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2440
2441         bnx2x_free_mem(bp);
2442
2443         bp->state = BNX2X_STATE_CLOSED;
2444
2445         /* Check if there are pending parity attentions. If there are - set
2446          * RECOVERY_IN_PROGRESS.
2447          */
2448         if (bnx2x_chk_parity_attn(bp, &global, false)) {
2449                 bnx2x_set_reset_in_progress(bp);
2450
2451                 /* Set RESET_IS_GLOBAL if needed */
2452                 if (global)
2453                         bnx2x_set_reset_global(bp);
2454         }
2455
2456
2457         /* The last driver must disable a "close the gate" if there is no
2458          * parity attention or "process kill" pending.
2459          */
2460         if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2461                 bnx2x_disable_close_the_gate(bp);
2462
2463         return 0;
2464 }
2465
2466 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2467 {
2468         u16 pmcsr;
2469
2470         /* If there is no power capability, silently succeed */
2471         if (!bp->pm_cap) {
2472                 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2473                 return 0;
2474         }
2475
2476         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2477
2478         switch (state) {
2479         case PCI_D0:
2480                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2481                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2482                                        PCI_PM_CTRL_PME_STATUS));
2483
2484                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2485                         /* delay required during transition out of D3hot */
2486                         msleep(20);
2487                 break;
2488
2489         case PCI_D3hot:
2490                 /* If there are other clients above don't
2491                    shut down the power */
2492                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2493                         return 0;
2494                 /* Don't shut down the power for emulation and FPGA */
2495                 if (CHIP_REV_IS_SLOW(bp))
2496                         return 0;
2497
2498                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2499                 pmcsr |= 3;
2500
2501                 if (bp->wol)
2502                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2503
2504                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2505                                       pmcsr);
2506
2507                 /* No more memory access after this point until
2508                 * device is brought back to D0.
2509                 */
2510                 break;
2511
2512         default:
2513                 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
2514                 return -EINVAL;
2515         }
2516         return 0;
2517 }
2518
2519 /*
2520  * net_device service functions
2521  */
2522 int bnx2x_poll(struct napi_struct *napi, int budget)
2523 {
2524         int work_done = 0;
2525         u8 cos;
2526         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2527                                                  napi);
2528         struct bnx2x *bp = fp->bp;
2529
2530         while (1) {
2531 #ifdef BNX2X_STOP_ON_ERROR
2532                 if (unlikely(bp->panic)) {
2533                         napi_complete(napi);
2534                         return 0;
2535                 }
2536 #endif
2537
2538                 for_each_cos_in_tx_queue(fp, cos)
2539                         if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2540                                 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
2541
2542
2543                 if (bnx2x_has_rx_work(fp)) {
2544                         work_done += bnx2x_rx_int(fp, budget - work_done);
2545
2546                         /* must not complete if we consumed full budget */
2547                         if (work_done >= budget)
2548                                 break;
2549                 }
2550
2551                 /* Fall out from the NAPI loop if needed */
2552                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2553 #ifdef BCM_CNIC
2554                         /* No need to update SB for FCoE L2 ring as long as
2555                          * it's connected to the default SB and the SB
2556                          * has been updated when NAPI was scheduled.
2557                          */
2558                         if (IS_FCOE_FP(fp)) {
2559                                 napi_complete(napi);
2560                                 break;
2561                         }
2562 #endif
2563
2564                         bnx2x_update_fpsb_idx(fp);
2565                         /* bnx2x_has_rx_work() reads the status block,
2566                          * thus we need to ensure that status block indices
2567                          * have been actually read (bnx2x_update_fpsb_idx)
2568                          * prior to this check (bnx2x_has_rx_work) so that
2569                          * we won't write the "newer" value of the status block
2570                          * to IGU (if there was a DMA right after
2571                          * bnx2x_has_rx_work and if there is no rmb, the memory
2572                          * reading (bnx2x_update_fpsb_idx) may be postponed
2573                          * to right before bnx2x_ack_sb). In this case there
2574                          * will never be another interrupt until there is
2575                          * another update of the status block, while there
2576                          * is still unhandled work.
2577                          */
2578                         rmb();
2579
2580                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2581                                 napi_complete(napi);
2582                                 /* Re-enable interrupts */
2583                                 DP(NETIF_MSG_RX_STATUS,
2584                                    "Update index to %d\n", fp->fp_hc_idx);
2585                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2586                                              le16_to_cpu(fp->fp_hc_idx),
2587                                              IGU_INT_ENABLE, 1);
2588                                 break;
2589                         }
2590                 }
2591         }
2592
2593         return work_done;
2594 }
2595
2596 /* we split the first BD into headers and data BDs
2597  * to ease the pain of our fellow microcode engineers
2598  * we use one mapping for both BDs
2599  */
2600 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2601                                    struct bnx2x_fp_txdata *txdata,
2602                                    struct sw_tx_bd *tx_buf,
2603                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
2604                                    u16 bd_prod, int nbd)
2605 {
2606         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2607         struct eth_tx_bd *d_tx_bd;
2608         dma_addr_t mapping;
2609         int old_len = le16_to_cpu(h_tx_bd->nbytes);
2610
2611         /* first fix first BD */
2612         h_tx_bd->nbd = cpu_to_le16(nbd);
2613         h_tx_bd->nbytes = cpu_to_le16(hlen);
2614
2615         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2616            h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
2617
2618         /* now get a new data BD
2619          * (after the pbd) and fill it */
2620         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2621         d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2622
2623         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2624                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2625
2626         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2627         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2628         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2629
2630         /* this marks the BD as one that has no individual mapping */
2631         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2632
2633         DP(NETIF_MSG_TX_QUEUED,
2634            "TSO split data size is %d (%x:%x)\n",
2635            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2636
2637         /* update tx_bd */
2638         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2639
2640         return bd_prod;
2641 }
2642
2643 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2644 {
2645         if (fix > 0)
2646                 csum = (u16) ~csum_fold(csum_sub(csum,
2647                                 csum_partial(t_header - fix, fix, 0)));
2648
2649         else if (fix < 0)
2650                 csum = (u16) ~csum_fold(csum_add(csum,
2651                                 csum_partial(t_header, -fix, 0)));
2652
2653         return swab16(csum);
2654 }
2655
2656 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2657 {
2658         u32 rc;
2659
2660         if (skb->ip_summed != CHECKSUM_PARTIAL)
2661                 rc = XMIT_PLAIN;
2662
2663         else {
2664                 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2665                         rc = XMIT_CSUM_V6;
2666                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2667                                 rc |= XMIT_CSUM_TCP;
2668
2669                 } else {
2670                         rc = XMIT_CSUM_V4;
2671                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2672                                 rc |= XMIT_CSUM_TCP;
2673                 }
2674         }
2675
2676         if (skb_is_gso_v6(skb))
2677                 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2678         else if (skb_is_gso(skb))
2679                 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2680
2681         return rc;
2682 }
2683
2684 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2685 /* check if packet requires linearization (packet is too fragmented)
2686    no need to check fragmentation if page size > 8K (there will be no
2687    violation to FW restrictions) */
2688 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2689                              u32 xmit_type)
2690 {
2691         int to_copy = 0;
2692         int hlen = 0;
2693         int first_bd_sz = 0;
2694
2695         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2696         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2697
2698                 if (xmit_type & XMIT_GSO) {
2699                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2700                         /* Check if LSO packet needs to be copied:
2701                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2702                         int wnd_size = MAX_FETCH_BD - 3;
2703                         /* Number of windows to check */
2704                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2705                         int wnd_idx = 0;
2706                         int frag_idx = 0;
2707                         u32 wnd_sum = 0;
2708
2709                         /* Headers length */
2710                         hlen = (int)(skb_transport_header(skb) - skb->data) +
2711                                 tcp_hdrlen(skb);
2712
2713                         /* Amount of data (w/o headers) on linear part of SKB*/
2714                         first_bd_sz = skb_headlen(skb) - hlen;
2715
2716                         wnd_sum  = first_bd_sz;
2717
2718                         /* Calculate the first sum - it's special */
2719                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2720                                 wnd_sum +=
2721                                         skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2722
2723                         /* If there was data on linear skb data - check it */
2724                         if (first_bd_sz > 0) {
2725                                 if (unlikely(wnd_sum < lso_mss)) {
2726                                         to_copy = 1;
2727                                         goto exit_lbl;
2728                                 }
2729
2730                                 wnd_sum -= first_bd_sz;
2731                         }
2732
2733                         /* Others are easier: run through the frag list and
2734                            check all windows */
2735                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2736                                 wnd_sum +=
2737                           skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2738
2739                                 if (unlikely(wnd_sum < lso_mss)) {
2740                                         to_copy = 1;
2741                                         break;
2742                                 }
2743                                 wnd_sum -=
2744                                         skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2745                         }
2746                 } else {
2747                         /* in non-LSO too fragmented packet should always
2748                            be linearized */
2749                         to_copy = 1;
2750                 }
2751         }
2752
2753 exit_lbl:
2754         if (unlikely(to_copy))
2755                 DP(NETIF_MSG_TX_QUEUED,
2756                    "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
2757                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2758                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2759
2760         return to_copy;
2761 }
2762 #endif
2763
2764 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2765                                         u32 xmit_type)
2766 {
2767         *parsing_data |= (skb_shinfo(skb)->gso_size <<
2768                               ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2769                               ETH_TX_PARSE_BD_E2_LSO_MSS;
2770         if ((xmit_type & XMIT_GSO_V6) &&
2771             (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2772                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2773 }
2774
2775 /**
2776  * bnx2x_set_pbd_gso - update PBD in GSO case.
2777  *
2778  * @skb:        packet skb
2779  * @pbd:        parse BD
2780  * @xmit_type:  xmit flags
2781  */
2782 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2783                                      struct eth_tx_parse_bd_e1x *pbd,
2784                                      u32 xmit_type)
2785 {
2786         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2787         pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2788         pbd->tcp_flags = pbd_tcp_flags(skb);
2789
2790         if (xmit_type & XMIT_GSO_V4) {
2791                 pbd->ip_id = swab16(ip_hdr(skb)->id);
2792                 pbd->tcp_pseudo_csum =
2793                         swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2794                                                   ip_hdr(skb)->daddr,
2795                                                   0, IPPROTO_TCP, 0));
2796
2797         } else
2798                 pbd->tcp_pseudo_csum =
2799                         swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2800                                                 &ipv6_hdr(skb)->daddr,
2801                                                 0, IPPROTO_TCP, 0));
2802
2803         pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2804 }
2805
2806 /**
2807  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2808  *
2809  * @bp:                 driver handle
2810  * @skb:                packet skb
2811  * @parsing_data:       data to be updated
2812  * @xmit_type:          xmit flags
2813  *
2814  * 57712 related
2815  */
2816 static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2817         u32 *parsing_data, u32 xmit_type)
2818 {
2819         *parsing_data |=
2820                         ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2821                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2822                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2823
2824         if (xmit_type & XMIT_CSUM_TCP) {
2825                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2826                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2827                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2828
2829                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2830         } else
2831                 /* We support checksum offload for TCP and UDP only.
2832                  * No need to pass the UDP header length - it's a constant.
2833                  */
2834                 return skb_transport_header(skb) +
2835                                 sizeof(struct udphdr) - skb->data;
2836 }
2837
2838 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2839         struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2840 {
2841         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2842
2843         if (xmit_type & XMIT_CSUM_V4)
2844                 tx_start_bd->bd_flags.as_bitfield |=
2845                                         ETH_TX_BD_FLAGS_IP_CSUM;
2846         else
2847                 tx_start_bd->bd_flags.as_bitfield |=
2848                                         ETH_TX_BD_FLAGS_IPV6;
2849
2850         if (!(xmit_type & XMIT_CSUM_TCP))
2851                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2852 }
2853
2854 /**
2855  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2856  *
2857  * @bp:         driver handle
2858  * @skb:        packet skb
2859  * @pbd:        parse BD to be updated
2860  * @xmit_type:  xmit flags
2861  */
2862 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2863         struct eth_tx_parse_bd_e1x *pbd,
2864         u32 xmit_type)
2865 {
2866         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2867
2868         /* for now NS flag is not used in Linux */
2869         pbd->global_data =
2870                 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2871                          ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2872
2873         pbd->ip_hlen_w = (skb_transport_header(skb) -
2874                         skb_network_header(skb)) >> 1;
2875
2876         hlen += pbd->ip_hlen_w;
2877
2878         /* We support checksum offload for TCP and UDP only */
2879         if (xmit_type & XMIT_CSUM_TCP)
2880                 hlen += tcp_hdrlen(skb) / 2;
2881         else
2882                 hlen += sizeof(struct udphdr) / 2;
2883
2884         pbd->total_hlen_w = cpu_to_le16(hlen);
2885         hlen = hlen*2;
2886
2887         if (xmit_type & XMIT_CSUM_TCP) {
2888                 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2889
2890         } else {
2891                 s8 fix = SKB_CS_OFF(skb); /* signed! */
2892
2893                 DP(NETIF_MSG_TX_QUEUED,
2894                    "hlen %d  fix %d  csum before fix %x\n",
2895                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2896
2897                 /* HW bug: fixup the CSUM */
2898                 pbd->tcp_pseudo_csum =
2899                         bnx2x_csum_fix(skb_transport_header(skb),
2900                                        SKB_CS(skb), fix);
2901
2902                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2903                    pbd->tcp_pseudo_csum);
2904         }
2905
2906         return hlen;
2907 }
2908
2909 /* called with netif_tx_lock
2910  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2911  * netif_wake_queue()
2912  */
2913 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2914 {
2915         struct bnx2x *bp = netdev_priv(dev);
2916
2917         struct netdev_queue *txq;
2918         struct bnx2x_fp_txdata *txdata;
2919         struct sw_tx_bd *tx_buf;
2920         struct eth_tx_start_bd *tx_start_bd, *first_bd;
2921         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2922         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2923         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2924         u32 pbd_e2_parsing_data = 0;
2925         u16 pkt_prod, bd_prod;
2926         int nbd, txq_index;
2927         dma_addr_t mapping;
2928         u32 xmit_type = bnx2x_xmit_type(bp, skb);
2929         int i;
2930         u8 hlen = 0;
2931         __le16 pkt_size = 0;
2932         struct ethhdr *eth;
2933         u8 mac_type = UNICAST_ADDRESS;
2934
2935 #ifdef BNX2X_STOP_ON_ERROR
2936         if (unlikely(bp->panic))
2937                 return NETDEV_TX_BUSY;
2938 #endif
2939
2940         txq_index = skb_get_queue_mapping(skb);
2941         txq = netdev_get_tx_queue(dev, txq_index);
2942
2943         BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2944
2945         txdata = &bp->bnx2x_txq[txq_index];
2946
2947         /* enable this debug print to view the transmission queue being used
2948         DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
2949            txq_index, fp_index, txdata_index); */
2950
2951         /* enable this debug print to view the tranmission details
2952         DP(NETIF_MSG_TX_QUEUED,
2953            "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
2954            txdata->cid, fp_index, txdata_index, txdata, fp); */
2955
2956         if (unlikely(bnx2x_tx_avail(bp, txdata) <
2957                         skb_shinfo(skb)->nr_frags +
2958                         BDS_PER_TX_PKT +
2959                         NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2960                 /* Handle special storage cases separately */
2961                 if (txdata->tx_ring_size != 0) {
2962                         BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2963                         bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
2964                         netif_tx_stop_queue(txq);
2965                 }
2966
2967                 return NETDEV_TX_BUSY;
2968         }
2969
2970         DP(NETIF_MSG_TX_QUEUED,
2971            "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x\n",
2972            txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2973            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2974
2975         eth = (struct ethhdr *)skb->data;
2976
2977         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2978         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2979                 if (is_broadcast_ether_addr(eth->h_dest))
2980                         mac_type = BROADCAST_ADDRESS;
2981                 else
2982                         mac_type = MULTICAST_ADDRESS;
2983         }
2984
2985 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2986         /* First, check if we need to linearize the skb (due to FW
2987            restrictions). No need to check fragmentation if page size > 8K
2988            (there will be no violation to FW restrictions) */
2989         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2990                 /* Statistics of linearization */
2991                 bp->lin_cnt++;
2992                 if (skb_linearize(skb) != 0) {
2993                         DP(NETIF_MSG_TX_QUEUED,
2994                            "SKB linearization failed - silently dropping this SKB\n");
2995                         dev_kfree_skb_any(skb);
2996                         return NETDEV_TX_OK;
2997                 }
2998         }
2999 #endif
3000         /* Map skb linear data for DMA */
3001         mapping = dma_map_single(&bp->pdev->dev, skb->data,
3002                                  skb_headlen(skb), DMA_TO_DEVICE);
3003         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3004                 DP(NETIF_MSG_TX_QUEUED,
3005                    "SKB mapping failed - silently dropping this SKB\n");
3006                 dev_kfree_skb_any(skb);
3007                 return NETDEV_TX_OK;
3008         }
3009         /*
3010         Please read carefully. First we use one BD which we mark as start,
3011         then we have a parsing info BD (used for TSO or xsum),
3012         and only then we have the rest of the TSO BDs.
3013         (don't forget to mark the last one as last,
3014         and to unmap only AFTER you write to the BD ...)
3015         And above all, all pdb sizes are in words - NOT DWORDS!
3016         */
3017
3018         /* get current pkt produced now - advance it just before sending packet
3019          * since mapping of pages may fail and cause packet to be dropped
3020          */
3021         pkt_prod = txdata->tx_pkt_prod;
3022         bd_prod = TX_BD(txdata->tx_bd_prod);
3023
3024         /* get a tx_buf and first BD
3025          * tx_start_bd may be changed during SPLIT,
3026          * but first_bd will always stay first
3027          */
3028         tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3029         tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3030         first_bd = tx_start_bd;
3031
3032         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3033         SET_FLAG(tx_start_bd->general_data,
3034                  ETH_TX_START_BD_PARSE_NBDS,
3035                  0);
3036
3037         /* header nbd */
3038         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
3039
3040         /* remember the first BD of the packet */
3041         tx_buf->first_bd = txdata->tx_bd_prod;
3042         tx_buf->skb = skb;
3043         tx_buf->flags = 0;
3044
3045         DP(NETIF_MSG_TX_QUEUED,
3046            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3047            pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3048
3049         if (vlan_tx_tag_present(skb)) {
3050                 tx_start_bd->vlan_or_ethertype =
3051                     cpu_to_le16(vlan_tx_tag_get(skb));
3052                 tx_start_bd->bd_flags.as_bitfield |=
3053                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3054         } else
3055                 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3056
3057         /* turn on parsing and get a BD */
3058         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3059
3060         if (xmit_type & XMIT_CSUM)
3061                 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3062
3063         if (!CHIP_IS_E1x(bp)) {
3064                 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3065                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3066                 /* Set PBD in checksum offload case */
3067                 if (xmit_type & XMIT_CSUM)
3068                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3069                                                      &pbd_e2_parsing_data,
3070                                                      xmit_type);
3071                 if (IS_MF_SI(bp)) {
3072                         /*
3073                          * fill in the MAC addresses in the PBD - for local
3074                          * switching
3075                          */
3076                         bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3077                                               &pbd_e2->src_mac_addr_mid,
3078                                               &pbd_e2->src_mac_addr_lo,
3079                                               eth->h_source);
3080                         bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3081                                               &pbd_e2->dst_mac_addr_mid,
3082                                               &pbd_e2->dst_mac_addr_lo,
3083                                               eth->h_dest);
3084                 }
3085
3086                 SET_FLAG(pbd_e2_parsing_data,
3087                          ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3088         } else {
3089                 u16 global_data = 0;
3090                 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3091                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3092                 /* Set PBD in checksum offload case */
3093                 if (xmit_type & XMIT_CSUM)
3094                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3095
3096                 SET_FLAG(global_data,
3097                          ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3098                 pbd_e1x->global_data |= cpu_to_le16(global_data);
3099         }
3100
3101         /* Setup the data pointer of the first BD of the packet */
3102         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3103         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3104         nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3105         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3106         pkt_size = tx_start_bd->nbytes;
3107
3108         DP(NETIF_MSG_TX_QUEUED,
3109            "first bd @%p  addr (%x:%x)  nbd %d  nbytes %d  flags %x  vlan %x\n",
3110            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3111            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
3112            tx_start_bd->bd_flags.as_bitfield,
3113            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3114
3115         if (xmit_type & XMIT_GSO) {
3116
3117                 DP(NETIF_MSG_TX_QUEUED,
3118                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
3119                    skb->len, hlen, skb_headlen(skb),
3120                    skb_shinfo(skb)->gso_size);
3121
3122                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3123
3124                 if (unlikely(skb_headlen(skb) > hlen))
3125                         bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3126                                                  &tx_start_bd, hlen,
3127                                                  bd_prod, ++nbd);
3128                 if (!CHIP_IS_E1x(bp))
3129                         bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3130                                              xmit_type);
3131                 else
3132                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3133         }
3134
3135         /* Set the PBD's parsing_data field if not zero
3136          * (for the chips newer than 57711).
3137          */
3138         if (pbd_e2_parsing_data)
3139                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3140
3141         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3142
3143         /* Handle fragmented skb */
3144         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3145                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3146
3147                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3148                                            skb_frag_size(frag), DMA_TO_DEVICE);
3149                 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3150                         unsigned int pkts_compl = 0, bytes_compl = 0;
3151
3152                         DP(NETIF_MSG_TX_QUEUED,
3153                            "Unable to map page - dropping packet...\n");
3154
3155                         /* we need unmap all buffers already mapped
3156                          * for this SKB;
3157                          * first_bd->nbd need to be properly updated
3158                          * before call to bnx2x_free_tx_pkt
3159                          */
3160                         first_bd->nbd = cpu_to_le16(nbd);
3161                         bnx2x_free_tx_pkt(bp, txdata,
3162                                           TX_BD(txdata->tx_pkt_prod),
3163                                           &pkts_compl, &bytes_compl);
3164                         return NETDEV_TX_OK;
3165                 }
3166
3167                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3168                 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3169                 if (total_pkt_bd == NULL)
3170                         total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3171
3172                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3173                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3174                 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3175                 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3176                 nbd++;
3177
3178                 DP(NETIF_MSG_TX_QUEUED,
3179                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
3180                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3181                    le16_to_cpu(tx_data_bd->nbytes));
3182         }
3183
3184         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3185
3186         /* update with actual num BDs */
3187         first_bd->nbd = cpu_to_le16(nbd);
3188
3189         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3190
3191         /* now send a tx doorbell, counting the next BD
3192          * if the packet contains or ends with it
3193          */
3194         if (TX_BD_POFF(bd_prod) < nbd)
3195                 nbd++;
3196
3197         /* total_pkt_bytes should be set on the first data BD if
3198          * it's not an LSO packet and there is more than one
3199          * data BD. In this case pkt_size is limited by an MTU value.
3200          * However we prefer to set it for an LSO packet (while we don't
3201          * have to) in order to save some CPU cycles in a none-LSO
3202          * case, when we much more care about them.
3203          */
3204         if (total_pkt_bd != NULL)
3205                 total_pkt_bd->total_pkt_bytes = pkt_size;
3206
3207         if (pbd_e1x)
3208                 DP(NETIF_MSG_TX_QUEUED,
3209                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
3210                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3211                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3212                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3213                     le16_to_cpu(pbd_e1x->total_hlen_w));
3214         if (pbd_e2)
3215                 DP(NETIF_MSG_TX_QUEUED,
3216                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
3217                    pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3218                    pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3219                    pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3220                    pbd_e2->parsing_data);
3221         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
3222
3223         netdev_tx_sent_queue(txq, skb->len);
3224
3225         skb_tx_timestamp(skb);
3226
3227         txdata->tx_pkt_prod++;
3228         /*
3229          * Make sure that the BD data is updated before updating the producer
3230          * since FW might read the BD right after the producer is updated.
3231          * This is only applicable for weak-ordered memory model archs such
3232          * as IA-64. The following barrier is also mandatory since FW will
3233          * assumes packets must have BDs.
3234          */
3235         wmb();
3236
3237         txdata->tx_db.data.prod += nbd;
3238         barrier();
3239
3240         DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3241
3242         mmiowb();
3243
3244         txdata->tx_bd_prod += nbd;
3245
3246         if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3247                 netif_tx_stop_queue(txq);
3248
3249                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3250                  * ordering of set_bit() in netif_tx_stop_queue() and read of
3251                  * fp->bd_tx_cons */
3252                 smp_mb();
3253
3254                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3255                 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
3256                         netif_tx_wake_queue(txq);
3257         }
3258         txdata->tx_pkt++;
3259
3260         return NETDEV_TX_OK;
3261 }
3262
3263 /**
3264  * bnx2x_setup_tc - routine to configure net_device for multi tc
3265  *
3266  * @netdev: net device to configure
3267  * @tc: number of traffic classes to enable
3268  *
3269  * callback connected to the ndo_setup_tc function pointer
3270  */
3271 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3272 {
3273         int cos, prio, count, offset;
3274         struct bnx2x *bp = netdev_priv(dev);
3275
3276         /* setup tc must be called under rtnl lock */
3277         ASSERT_RTNL();
3278
3279         /* no traffic classes requested. aborting */
3280         if (!num_tc) {
3281                 netdev_reset_tc(dev);
3282                 return 0;
3283         }
3284
3285         /* requested to support too many traffic classes */
3286         if (num_tc > bp->max_cos) {
3287                 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3288                           num_tc, bp->max_cos);
3289                 return -EINVAL;
3290         }
3291
3292         /* declare amount of supported traffic classes */
3293         if (netdev_set_num_tc(dev, num_tc)) {
3294                 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3295                 return -EINVAL;
3296         }
3297
3298         /* configure priority to traffic class mapping */
3299         for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3300                 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3301                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3302                    "mapping priority %d to tc %d\n",
3303                    prio, bp->prio_to_cos[prio]);
3304         }
3305
3306
3307         /* Use this configuration to diffrentiate tc0 from other COSes
3308            This can be used for ets or pfc, and save the effort of setting
3309            up a multio class queue disc or negotiating DCBX with a switch
3310         netdev_set_prio_tc_map(dev, 0, 0);
3311         DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3312         for (prio = 1; prio < 16; prio++) {
3313                 netdev_set_prio_tc_map(dev, prio, 1);
3314                 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3315         } */
3316
3317         /* configure traffic class to transmission queue mapping */
3318         for (cos = 0; cos < bp->max_cos; cos++) {
3319                 count = BNX2X_NUM_ETH_QUEUES(bp);
3320                 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
3321                 netdev_set_tc_queue(dev, cos, count, offset);
3322                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3323                    "mapping tc %d to offset %d count %d\n",
3324                    cos, offset, count);
3325         }
3326
3327         return 0;
3328 }
3329
3330 /* called with rtnl_lock */
3331 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3332 {
3333         struct sockaddr *addr = p;
3334         struct bnx2x *bp = netdev_priv(dev);
3335         int rc = 0;
3336
3337         if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3338                 BNX2X_ERR("Requested MAC address is not valid\n");
3339                 return -EINVAL;
3340         }
3341
3342 #ifdef BCM_CNIC
3343         if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3344             !is_zero_ether_addr(addr->sa_data)) {
3345                 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3346                 return -EINVAL;
3347         }
3348 #endif
3349
3350         if (netif_running(dev))  {
3351                 rc = bnx2x_set_eth_mac(bp, false);
3352                 if (rc)
3353                         return rc;
3354         }
3355
3356         dev->addr_assign_type &= ~NET_ADDR_RANDOM;
3357         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3358
3359         if (netif_running(dev))
3360                 rc = bnx2x_set_eth_mac(bp, true);
3361
3362         return rc;
3363 }
3364
3365 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3366 {
3367         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3368         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3369         u8 cos;
3370
3371         /* Common */
3372 #ifdef BCM_CNIC
3373         if (IS_FCOE_IDX(fp_index)) {
3374                 memset(sb, 0, sizeof(union host_hc_status_block));
3375                 fp->status_blk_mapping = 0;
3376
3377         } else {
3378 #endif
3379                 /* status blocks */
3380                 if (!CHIP_IS_E1x(bp))
3381                         BNX2X_PCI_FREE(sb->e2_sb,
3382                                        bnx2x_fp(bp, fp_index,
3383                                                 status_blk_mapping),
3384                                        sizeof(struct host_hc_status_block_e2));
3385                 else
3386                         BNX2X_PCI_FREE(sb->e1x_sb,
3387                                        bnx2x_fp(bp, fp_index,
3388                                                 status_blk_mapping),
3389                                        sizeof(struct host_hc_status_block_e1x));
3390 #ifdef BCM_CNIC
3391         }
3392 #endif
3393         /* Rx */
3394         if (!skip_rx_queue(bp, fp_index)) {
3395                 bnx2x_free_rx_bds(fp);
3396
3397                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3398                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3399                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3400                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
3401                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
3402
3403                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3404                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
3405                                sizeof(struct eth_fast_path_rx_cqe) *
3406                                NUM_RCQ_BD);
3407
3408                 /* SGE ring */
3409                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3410                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3411                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
3412                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3413         }
3414
3415         /* Tx */
3416         if (!skip_tx_queue(bp, fp_index)) {
3417                 /* fastpath tx rings: tx_buf tx_desc */
3418                 for_each_cos_in_tx_queue(fp, cos) {
3419                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3420
3421                         DP(NETIF_MSG_IFDOWN,
3422                            "freeing tx memory of fp %d cos %d cid %d\n",
3423                            fp_index, cos, txdata->cid);
3424
3425                         BNX2X_FREE(txdata->tx_buf_ring);
3426                         BNX2X_PCI_FREE(txdata->tx_desc_ring,
3427                                 txdata->tx_desc_mapping,
3428                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3429                 }
3430         }
3431         /* end of fastpath */
3432 }
3433
3434 void bnx2x_free_fp_mem(struct bnx2x *bp)
3435 {
3436         int i;
3437         for_each_queue(bp, i)
3438                 bnx2x_free_fp_mem_at(bp, i);
3439 }
3440
3441 static void set_sb_shortcuts(struct bnx2x *bp, int index)
3442 {
3443         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3444         if (!CHIP_IS_E1x(bp)) {
3445                 bnx2x_fp(bp, index, sb_index_values) =
3446                         (__le16 *)status_blk.e2_sb->sb.index_values;
3447                 bnx2x_fp(bp, index, sb_running_index) =
3448                         (__le16 *)status_blk.e2_sb->sb.running_index;
3449         } else {
3450                 bnx2x_fp(bp, index, sb_index_values) =
3451                         (__le16 *)status_blk.e1x_sb->sb.index_values;
3452                 bnx2x_fp(bp, index, sb_running_index) =
3453                         (__le16 *)status_blk.e1x_sb->sb.running_index;
3454         }
3455 }
3456
3457 /* Returns the number of actually allocated BDs */
3458 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3459                               int rx_ring_size)
3460 {
3461         struct bnx2x *bp = fp->bp;
3462         u16 ring_prod, cqe_ring_prod;
3463         int i, failure_cnt = 0;
3464
3465         fp->rx_comp_cons = 0;
3466         cqe_ring_prod = ring_prod = 0;
3467
3468         /* This routine is called only during fo init so
3469          * fp->eth_q_stats.rx_skb_alloc_failed = 0
3470          */
3471         for (i = 0; i < rx_ring_size; i++) {
3472                 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3473                         failure_cnt++;
3474                         continue;
3475                 }
3476                 ring_prod = NEXT_RX_IDX(ring_prod);
3477                 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3478                 WARN_ON(ring_prod <= (i - failure_cnt));
3479         }
3480
3481         if (failure_cnt)
3482                 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3483                           i - failure_cnt, fp->index);
3484
3485         fp->rx_bd_prod = ring_prod;
3486         /* Limit the CQE producer by the CQE ring size */
3487         fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3488                                cqe_ring_prod);
3489         fp->rx_pkt = fp->rx_calls = 0;
3490
3491         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3492
3493         return i - failure_cnt;
3494 }
3495
3496 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3497 {
3498         int i;
3499
3500         for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3501                 struct eth_rx_cqe_next_page *nextpg;
3502
3503                 nextpg = (struct eth_rx_cqe_next_page *)
3504                         &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3505                 nextpg->addr_hi =
3506                         cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3507                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3508                 nextpg->addr_lo =
3509                         cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3510                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3511         }
3512 }
3513
3514 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3515 {
3516         union host_hc_status_block *sb;
3517         struct bnx2x_fastpath *fp = &bp->fp[index];
3518         int ring_size = 0;
3519         u8 cos;
3520         int rx_ring_size = 0;
3521
3522 #ifdef BCM_CNIC
3523         if (!bp->rx_ring_size &&
3524             (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3525                 rx_ring_size = MIN_RX_SIZE_NONTPA;
3526                 bp->rx_ring_size = rx_ring_size;
3527         } else
3528 #endif
3529         if (!bp->rx_ring_size) {
3530                 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3531
3532                 if (CHIP_IS_E3(bp)) {
3533                         u32 cfg = SHMEM_RD(bp,
3534                                            dev_info.port_hw_config[BP_PORT(bp)].
3535                                            default_cfg);
3536
3537                         /* Decrease ring size for 1G functions */
3538                         if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3539                             PORT_HW_CFG_NET_SERDES_IF_SGMII)
3540                                 rx_ring_size /= 10;
3541                 }
3542
3543                 /* allocate at least number of buffers required by FW */
3544                 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3545                                      MIN_RX_SIZE_TPA, rx_ring_size);
3546
3547                 bp->rx_ring_size = rx_ring_size;
3548         } else /* if rx_ring_size specified - use it */
3549                 rx_ring_size = bp->rx_ring_size;
3550
3551         /* Common */
3552         sb = &bnx2x_fp(bp, index, status_blk);
3553 #ifdef BCM_CNIC
3554         if (!IS_FCOE_IDX(index)) {
3555 #endif
3556                 /* status blocks */
3557                 if (!CHIP_IS_E1x(bp))
3558                         BNX2X_PCI_ALLOC(sb->e2_sb,
3559                                 &bnx2x_fp(bp, index, status_blk_mapping),
3560                                 sizeof(struct host_hc_status_block_e2));
3561                 else
3562                         BNX2X_PCI_ALLOC(sb->e1x_sb,
3563                                 &bnx2x_fp(bp, index, status_blk_mapping),
3564                             sizeof(struct host_hc_status_block_e1x));
3565 #ifdef BCM_CNIC
3566         }
3567 #endif
3568
3569         /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3570          * set shortcuts for it.
3571          */
3572         if (!IS_FCOE_IDX(index))
3573                 set_sb_shortcuts(bp, index);
3574
3575         /* Tx */
3576         if (!skip_tx_queue(bp, index)) {
3577                 /* fastpath tx rings: tx_buf tx_desc */
3578                 for_each_cos_in_tx_queue(fp, cos) {
3579                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3580
3581                         DP(NETIF_MSG_IFUP,
3582                            "allocating tx memory of fp %d cos %d\n",
3583                            index, cos);
3584
3585                         BNX2X_ALLOC(txdata->tx_buf_ring,
3586                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3587                         BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3588                                 &txdata->tx_desc_mapping,
3589                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3590                 }
3591         }
3592
3593         /* Rx */
3594         if (!skip_rx_queue(bp, index)) {
3595                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3596                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3597                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3598                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3599                                 &bnx2x_fp(bp, index, rx_desc_mapping),
3600                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3601
3602                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3603                                 &bnx2x_fp(bp, index, rx_comp_mapping),
3604                                 sizeof(struct eth_fast_path_rx_cqe) *
3605                                 NUM_RCQ_BD);
3606
3607                 /* SGE ring */
3608                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3609                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3610                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3611                                 &bnx2x_fp(bp, index, rx_sge_mapping),
3612                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3613                 /* RX BD ring */
3614                 bnx2x_set_next_page_rx_bd(fp);
3615
3616                 /* CQ ring */
3617                 bnx2x_set_next_page_rx_cq(fp);
3618
3619                 /* BDs */
3620                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3621                 if (ring_size < rx_ring_size)
3622                         goto alloc_mem_err;
3623         }
3624
3625         return 0;
3626
3627 /* handles low memory cases */
3628 alloc_mem_err:
3629         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3630                                                 index, ring_size);
3631         /* FW will drop all packets if queue is not big enough,
3632          * In these cases we disable the queue
3633          * Min size is different for OOO, TPA and non-TPA queues
3634          */
3635         if (ring_size < (fp->disable_tpa ?
3636                                 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3637                         /* release memory allocated for this queue */
3638                         bnx2x_free_fp_mem_at(bp, index);
3639                         return -ENOMEM;
3640         }
3641         return 0;
3642 }
3643
3644 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3645 {
3646         int i;
3647
3648         /**
3649          * 1. Allocate FP for leading - fatal if error
3650          * 2. {CNIC} Allocate FCoE FP - fatal if error
3651          * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3652          * 4. Allocate RSS - fix number of queues if error
3653          */
3654
3655         /* leading */
3656         if (bnx2x_alloc_fp_mem_at(bp, 0))
3657                 return -ENOMEM;
3658
3659 #ifdef BCM_CNIC
3660         if (!NO_FCOE(bp))
3661                 /* FCoE */
3662                 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3663                         /* we will fail load process instead of mark
3664                          * NO_FCOE_FLAG
3665                          */
3666                         return -ENOMEM;
3667 #endif
3668
3669         /* RSS */
3670         for_each_nondefault_eth_queue(bp, i)
3671                 if (bnx2x_alloc_fp_mem_at(bp, i))
3672                         break;
3673
3674         /* handle memory failures */
3675         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3676                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3677
3678                 WARN_ON(delta < 0);
3679 #ifdef BCM_CNIC
3680                 /**
3681                  * move non eth FPs next to last eth FP
3682                  * must be done in that order
3683                  * FCOE_IDX < FWD_IDX < OOO_IDX
3684                  */
3685
3686                 /* move FCoE fp even NO_FCOE_FLAG is on */
3687                 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
3688 #endif
3689                 bp->num_queues -= delta;
3690                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3691                           bp->num_queues + delta, bp->num_queues);
3692         }
3693
3694         return 0;
3695 }
3696
3697 void bnx2x_free_mem_bp(struct bnx2x *bp)
3698 {
3699         kfree(bp->fp->tpa_info);
3700         kfree(bp->fp);
3701         kfree(bp->sp_objs);
3702         kfree(bp->fp_stats);
3703         kfree(bp->bnx2x_txq);
3704         kfree(bp->msix_table);
3705         kfree(bp->ilt);
3706 }
3707
3708 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3709 {
3710         struct bnx2x_fastpath *fp;
3711         struct msix_entry *tbl;
3712         struct bnx2x_ilt *ilt;
3713         int msix_table_size = 0;
3714         int fp_array_size;
3715         int i;
3716
3717         /*
3718          * The biggest MSI-X table we might need is as a maximum number of fast
3719          * path IGU SBs plus default SB (for PF).
3720          */
3721         msix_table_size = bp->igu_sb_cnt + 1;
3722
3723         /* fp array: RSS plus CNIC related L2 queues */
3724         fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE;
3725         BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3726
3727         fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
3728         if (!fp)
3729                 goto alloc_err;
3730         for (i = 0; i < fp_array_size; i++) {
3731                 fp[i].tpa_info =
3732                         kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
3733                                 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
3734                 if (!(fp[i].tpa_info))
3735                         goto alloc_err;
3736         }
3737
3738         bp->fp = fp;
3739
3740         /* allocate sp objs */
3741         bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
3742                               GFP_KERNEL);
3743         if (!bp->sp_objs)
3744                 goto alloc_err;
3745
3746         /* allocate fp_stats */
3747         bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
3748                                GFP_KERNEL);
3749         if (!bp->fp_stats)
3750                 goto alloc_err;
3751
3752         /* Allocate memory for the transmission queues array */
3753         bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS;
3754 #ifdef BCM_CNIC
3755         bp->bnx2x_txq_size++;
3756 #endif
3757         bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size,
3758                                 sizeof(struct bnx2x_fp_txdata), GFP_KERNEL);
3759         if (!bp->bnx2x_txq)
3760                 goto alloc_err;
3761
3762         /* msix table */
3763         tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3764         if (!tbl)
3765                 goto alloc_err;
3766         bp->msix_table = tbl;
3767
3768         /* ilt */
3769         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3770         if (!ilt)
3771                 goto alloc_err;
3772         bp->ilt = ilt;
3773
3774         return 0;
3775 alloc_err:
3776         bnx2x_free_mem_bp(bp);
3777         return -ENOMEM;
3778
3779 }
3780
3781 int bnx2x_reload_if_running(struct net_device *dev)
3782 {
3783         struct bnx2x *bp = netdev_priv(dev);
3784
3785         if (unlikely(!netif_running(dev)))
3786                 return 0;
3787
3788         bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
3789         return bnx2x_nic_load(bp, LOAD_NORMAL);
3790 }
3791
3792 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3793 {
3794         u32 sel_phy_idx = 0;
3795         if (bp->link_params.num_phys <= 1)
3796                 return INT_PHY;
3797
3798         if (bp->link_vars.link_up) {
3799                 sel_phy_idx = EXT_PHY1;
3800                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3801                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3802                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3803                         sel_phy_idx = EXT_PHY2;
3804         } else {
3805
3806                 switch (bnx2x_phy_selection(&bp->link_params)) {
3807                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3808                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3809                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3810                        sel_phy_idx = EXT_PHY1;
3811                        break;
3812                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3813                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3814                        sel_phy_idx = EXT_PHY2;
3815                        break;
3816                 }
3817         }
3818
3819         return sel_phy_idx;
3820
3821 }
3822 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3823 {
3824         u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3825         /*
3826          * The selected actived PHY is always after swapping (in case PHY
3827          * swapping is enabled). So when swapping is enabled, we need to reverse
3828          * the configuration
3829          */
3830
3831         if (bp->link_params.multi_phy_config &
3832             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3833                 if (sel_phy_idx == EXT_PHY1)
3834                         sel_phy_idx = EXT_PHY2;
3835                 else if (sel_phy_idx == EXT_PHY2)
3836                         sel_phy_idx = EXT_PHY1;
3837         }
3838         return LINK_CONFIG_IDX(sel_phy_idx);
3839 }
3840
3841 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3842 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3843 {
3844         struct bnx2x *bp = netdev_priv(dev);
3845         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3846
3847         switch (type) {
3848         case NETDEV_FCOE_WWNN:
3849                 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3850                                 cp->fcoe_wwn_node_name_lo);
3851                 break;
3852         case NETDEV_FCOE_WWPN:
3853                 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3854                                 cp->fcoe_wwn_port_name_lo);
3855                 break;
3856         default:
3857                 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
3858                 return -EINVAL;
3859         }
3860
3861         return 0;
3862 }
3863 #endif
3864
3865 /* called with rtnl_lock */
3866 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3867 {
3868         struct bnx2x *bp = netdev_priv(dev);
3869
3870         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3871                 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
3872                 return -EAGAIN;
3873         }
3874
3875         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3876             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
3877                 BNX2X_ERR("Can't support requested MTU size\n");
3878                 return -EINVAL;
3879         }
3880
3881         /* This does not race with packet allocation
3882          * because the actual alloc size is
3883          * only updated as part of load
3884          */
3885         dev->mtu = new_mtu;
3886
3887         return bnx2x_reload_if_running(dev);
3888 }
3889
3890 netdev_features_t bnx2x_fix_features(struct net_device *dev,
3891                                      netdev_features_t features)
3892 {
3893         struct bnx2x *bp = netdev_priv(dev);
3894
3895         /* TPA requires Rx CSUM offloading */
3896         if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
3897                 features &= ~NETIF_F_LRO;
3898                 features &= ~NETIF_F_GRO;
3899         }
3900
3901         return features;
3902 }
3903
3904 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
3905 {
3906         struct bnx2x *bp = netdev_priv(dev);
3907         u32 flags = bp->flags;
3908         bool bnx2x_reload = false;
3909
3910         if (features & NETIF_F_LRO)
3911                 flags |= TPA_ENABLE_FLAG;
3912         else
3913                 flags &= ~TPA_ENABLE_FLAG;
3914
3915         if (features & NETIF_F_GRO)
3916                 flags |= GRO_ENABLE_FLAG;
3917         else
3918                 flags &= ~GRO_ENABLE_FLAG;
3919
3920         if (features & NETIF_F_LOOPBACK) {
3921                 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3922                         bp->link_params.loopback_mode = LOOPBACK_BMAC;
3923                         bnx2x_reload = true;
3924                 }
3925         } else {
3926                 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3927                         bp->link_params.loopback_mode = LOOPBACK_NONE;
3928                         bnx2x_reload = true;
3929                 }
3930         }
3931
3932         if (flags ^ bp->flags) {
3933                 bp->flags = flags;
3934                 bnx2x_reload = true;
3935         }
3936
3937         if (bnx2x_reload) {
3938                 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3939                         return bnx2x_reload_if_running(dev);
3940                 /* else: bnx2x_nic_load() will be called at end of recovery */
3941         }
3942
3943         return 0;
3944 }
3945
3946 void bnx2x_tx_timeout(struct net_device *dev)
3947 {
3948         struct bnx2x *bp = netdev_priv(dev);
3949
3950 #ifdef BNX2X_STOP_ON_ERROR
3951         if (!bp->panic)
3952                 bnx2x_panic();
3953 #endif
3954
3955         smp_mb__before_clear_bit();
3956         set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3957         smp_mb__after_clear_bit();
3958
3959         /* This allows the netif to be shutdown gracefully before resetting */
3960         schedule_delayed_work(&bp->sp_rtnl_task, 0);
3961 }
3962
3963 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3964 {
3965         struct net_device *dev = pci_get_drvdata(pdev);
3966         struct bnx2x *bp;
3967
3968         if (!dev) {
3969                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3970                 return -ENODEV;
3971         }
3972         bp = netdev_priv(dev);
3973
3974         rtnl_lock();
3975
3976         pci_save_state(pdev);
3977
3978         if (!netif_running(dev)) {
3979                 rtnl_unlock();
3980                 return 0;
3981         }
3982
3983         netif_device_detach(dev);
3984
3985         bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
3986
3987         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3988
3989         rtnl_unlock();
3990
3991         return 0;
3992 }
3993
3994 int bnx2x_resume(struct pci_dev *pdev)
3995 {
3996         struct net_device *dev = pci_get_drvdata(pdev);
3997         struct bnx2x *bp;
3998         int rc;
3999
4000         if (!dev) {
4001                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4002                 return -ENODEV;
4003         }
4004         bp = netdev_priv(dev);
4005
4006         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4007                 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4008                 return -EAGAIN;
4009         }
4010
4011         rtnl_lock();
4012
4013         pci_restore_state(pdev);
4014
4015         if (!netif_running(dev)) {
4016                 rtnl_unlock();
4017                 return 0;
4018         }
4019
4020         bnx2x_set_power_state(bp, PCI_D0);
4021         netif_device_attach(dev);
4022
4023         rc = bnx2x_nic_load(bp, LOAD_OPEN);
4024
4025         rtnl_unlock();
4026
4027         return rc;
4028 }
4029
4030
4031 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4032                               u32 cid)
4033 {
4034         /* ustorm cxt validation */
4035         cxt->ustorm_ag_context.cdu_usage =
4036                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4037                         CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4038         /* xcontext validation */
4039         cxt->xstorm_ag_context.cdu_reserved =
4040                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4041                         CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4042 }
4043
4044 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4045                                     u8 fw_sb_id, u8 sb_index,
4046                                     u8 ticks)
4047 {
4048
4049         u32 addr = BAR_CSTRORM_INTMEM +
4050                    CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4051         REG_WR8(bp, addr, ticks);
4052         DP(NETIF_MSG_IFUP,
4053            "port %x fw_sb_id %d sb_index %d ticks %d\n",
4054            port, fw_sb_id, sb_index, ticks);
4055 }
4056
4057 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4058                                     u16 fw_sb_id, u8 sb_index,
4059                                     u8 disable)
4060 {
4061         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4062         u32 addr = BAR_CSTRORM_INTMEM +
4063                    CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4064         u16 flags = REG_RD16(bp, addr);
4065         /* clear and set */
4066         flags &= ~HC_INDEX_DATA_HC_ENABLED;
4067         flags |= enable_flag;
4068         REG_WR16(bp, addr, flags);
4069         DP(NETIF_MSG_IFUP,
4070            "port %x fw_sb_id %d sb_index %d disable %d\n",
4071            port, fw_sb_id, sb_index, disable);
4072 }
4073
4074 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4075                                     u8 sb_index, u8 disable, u16 usec)
4076 {
4077         int port = BP_PORT(bp);
4078         u8 ticks = usec / BNX2X_BTR;
4079
4080         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4081
4082         disable = disable ? 1 : (usec ? 0 : 1);
4083         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4084 }