Merge remote-tracking branches 'regulator/topic/88pm800', 'regulator/topic/ab8500...
[cascardo/linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2013 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
23 #include <linux/ip.h>
24 #include <net/tcp.h>
25 #include <net/ipv6.h>
26 #include <net/ip6_checksum.h>
27 #include <net/busy_poll.h>
28 #include <linux/prefetch.h>
29 #include "bnx2x_cmn.h"
30 #include "bnx2x_init.h"
31 #include "bnx2x_sp.h"
32
33 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
34 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
35 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
36 static int bnx2x_poll(struct napi_struct *napi, int budget);
37
38 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
39 {
40         int i;
41
42         /* Add NAPI objects */
43         for_each_rx_queue_cnic(bp, i) {
44                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
45                                bnx2x_poll, NAPI_POLL_WEIGHT);
46                 napi_hash_add(&bnx2x_fp(bp, i, napi));
47         }
48 }
49
50 static void bnx2x_add_all_napi(struct bnx2x *bp)
51 {
52         int i;
53
54         /* Add NAPI objects */
55         for_each_eth_queue(bp, i) {
56                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
57                                bnx2x_poll, NAPI_POLL_WEIGHT);
58                 napi_hash_add(&bnx2x_fp(bp, i, napi));
59         }
60 }
61
62 static int bnx2x_calc_num_queues(struct bnx2x *bp)
63 {
64         int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
65
66         /* Reduce memory usage in kdump environment by using only one queue */
67         if (reset_devices)
68                 nq = 1;
69
70         nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
71         return nq;
72 }
73
74 /**
75  * bnx2x_move_fp - move content of the fastpath structure.
76  *
77  * @bp:         driver handle
78  * @from:       source FP index
79  * @to:         destination FP index
80  *
81  * Makes sure the contents of the bp->fp[to].napi is kept
82  * intact. This is done by first copying the napi struct from
83  * the target to the source, and then mem copying the entire
84  * source onto the target. Update txdata pointers and related
85  * content.
86  */
87 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
88 {
89         struct bnx2x_fastpath *from_fp = &bp->fp[from];
90         struct bnx2x_fastpath *to_fp = &bp->fp[to];
91         struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
92         struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
93         struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
94         struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
95         int old_max_eth_txqs, new_max_eth_txqs;
96         int old_txdata_index = 0, new_txdata_index = 0;
97         struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
98
99         /* Copy the NAPI object as it has been already initialized */
100         from_fp->napi = to_fp->napi;
101
102         /* Move bnx2x_fastpath contents */
103         memcpy(to_fp, from_fp, sizeof(*to_fp));
104         to_fp->index = to;
105
106         /* Retain the tpa_info of the original `to' version as we don't want
107          * 2 FPs to contain the same tpa_info pointer.
108          */
109         to_fp->tpa_info = old_tpa_info;
110
111         /* move sp_objs contents as well, as their indices match fp ones */
112         memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
113
114         /* move fp_stats contents as well, as their indices match fp ones */
115         memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
116
117         /* Update txdata pointers in fp and move txdata content accordingly:
118          * Each fp consumes 'max_cos' txdata structures, so the index should be
119          * decremented by max_cos x delta.
120          */
121
122         old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
123         new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
124                                 (bp)->max_cos;
125         if (from == FCOE_IDX(bp)) {
126                 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
127                 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128         }
129
130         memcpy(&bp->bnx2x_txq[new_txdata_index],
131                &bp->bnx2x_txq[old_txdata_index],
132                sizeof(struct bnx2x_fp_txdata));
133         to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
134 }
135
136 /**
137  * bnx2x_fill_fw_str - Fill buffer with FW version string.
138  *
139  * @bp:        driver handle
140  * @buf:       character buffer to fill with the fw name
141  * @buf_len:   length of the above buffer
142  *
143  */
144 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
145 {
146         if (IS_PF(bp)) {
147                 u8 phy_fw_ver[PHY_FW_VER_LEN];
148
149                 phy_fw_ver[0] = '\0';
150                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
151                                              phy_fw_ver, PHY_FW_VER_LEN);
152                 strlcpy(buf, bp->fw_ver, buf_len);
153                 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
154                          "bc %d.%d.%d%s%s",
155                          (bp->common.bc_ver & 0xff0000) >> 16,
156                          (bp->common.bc_ver & 0xff00) >> 8,
157                          (bp->common.bc_ver & 0xff),
158                          ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
159         } else {
160                 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
161         }
162 }
163
164 /**
165  * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
166  *
167  * @bp: driver handle
168  * @delta:      number of eth queues which were not allocated
169  */
170 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
171 {
172         int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
173
174         /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
175          * backward along the array could cause memory to be overridden
176          */
177         for (cos = 1; cos < bp->max_cos; cos++) {
178                 for (i = 0; i < old_eth_num - delta; i++) {
179                         struct bnx2x_fastpath *fp = &bp->fp[i];
180                         int new_idx = cos * (old_eth_num - delta) + i;
181
182                         memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
183                                sizeof(struct bnx2x_fp_txdata));
184                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
185                 }
186         }
187 }
188
189 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
190
191 /* free skb in the packet ring at pos idx
192  * return idx of last bd freed
193  */
194 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
195                              u16 idx, unsigned int *pkts_compl,
196                              unsigned int *bytes_compl)
197 {
198         struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
199         struct eth_tx_start_bd *tx_start_bd;
200         struct eth_tx_bd *tx_data_bd;
201         struct sk_buff *skb = tx_buf->skb;
202         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
203         int nbd;
204         u16 split_bd_len = 0;
205
206         /* prefetch skb end pointer to speedup dev_kfree_skb() */
207         prefetch(&skb->end);
208
209         DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
210            txdata->txq_index, idx, tx_buf, skb);
211
212         tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
213
214         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
215 #ifdef BNX2X_STOP_ON_ERROR
216         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
217                 BNX2X_ERR("BAD nbd!\n");
218                 bnx2x_panic();
219         }
220 #endif
221         new_cons = nbd + tx_buf->first_bd;
222
223         /* Get the next bd */
224         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
225
226         /* Skip a parse bd... */
227         --nbd;
228         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
229
230         if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
231                 /* Skip second parse bd... */
232                 --nbd;
233                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
234         }
235
236         /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
237         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
238                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
239                 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
240                 --nbd;
241                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
242         }
243
244         /* unmap first bd */
245         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
246                          BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
247                          DMA_TO_DEVICE);
248
249         /* now free frags */
250         while (nbd > 0) {
251
252                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
253                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
254                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
255                 if (--nbd)
256                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
257         }
258
259         /* release skb */
260         WARN_ON(!skb);
261         if (likely(skb)) {
262                 (*pkts_compl)++;
263                 (*bytes_compl) += skb->len;
264         }
265
266         dev_kfree_skb_any(skb);
267         tx_buf->first_bd = 0;
268         tx_buf->skb = NULL;
269
270         return new_cons;
271 }
272
273 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
274 {
275         struct netdev_queue *txq;
276         u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
277         unsigned int pkts_compl = 0, bytes_compl = 0;
278
279 #ifdef BNX2X_STOP_ON_ERROR
280         if (unlikely(bp->panic))
281                 return -1;
282 #endif
283
284         txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
285         hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
286         sw_cons = txdata->tx_pkt_cons;
287
288         while (sw_cons != hw_cons) {
289                 u16 pkt_cons;
290
291                 pkt_cons = TX_BD(sw_cons);
292
293                 DP(NETIF_MSG_TX_DONE,
294                    "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
295                    txdata->txq_index, hw_cons, sw_cons, pkt_cons);
296
297                 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
298                                             &pkts_compl, &bytes_compl);
299
300                 sw_cons++;
301         }
302
303         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
304
305         txdata->tx_pkt_cons = sw_cons;
306         txdata->tx_bd_cons = bd_cons;
307
308         /* Need to make the tx_bd_cons update visible to start_xmit()
309          * before checking for netif_tx_queue_stopped().  Without the
310          * memory barrier, there is a small possibility that
311          * start_xmit() will miss it and cause the queue to be stopped
312          * forever.
313          * On the other hand we need an rmb() here to ensure the proper
314          * ordering of bit testing in the following
315          * netif_tx_queue_stopped(txq) call.
316          */
317         smp_mb();
318
319         if (unlikely(netif_tx_queue_stopped(txq))) {
320                 /* Taking tx_lock() is needed to prevent re-enabling the queue
321                  * while it's empty. This could have happen if rx_action() gets
322                  * suspended in bnx2x_tx_int() after the condition before
323                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
324                  *
325                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
326                  * sends some packets consuming the whole queue again->
327                  * stops the queue
328                  */
329
330                 __netif_tx_lock(txq, smp_processor_id());
331
332                 if ((netif_tx_queue_stopped(txq)) &&
333                     (bp->state == BNX2X_STATE_OPEN) &&
334                     (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
335                         netif_tx_wake_queue(txq);
336
337                 __netif_tx_unlock(txq);
338         }
339         return 0;
340 }
341
342 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
343                                              u16 idx)
344 {
345         u16 last_max = fp->last_max_sge;
346
347         if (SUB_S16(idx, last_max) > 0)
348                 fp->last_max_sge = idx;
349 }
350
351 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
352                                          u16 sge_len,
353                                          struct eth_end_agg_rx_cqe *cqe)
354 {
355         struct bnx2x *bp = fp->bp;
356         u16 last_max, last_elem, first_elem;
357         u16 delta = 0;
358         u16 i;
359
360         if (!sge_len)
361                 return;
362
363         /* First mark all used pages */
364         for (i = 0; i < sge_len; i++)
365                 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
366                         RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
367
368         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
369            sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
370
371         /* Here we assume that the last SGE index is the biggest */
372         prefetch((void *)(fp->sge_mask));
373         bnx2x_update_last_max_sge(fp,
374                 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
375
376         last_max = RX_SGE(fp->last_max_sge);
377         last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
378         first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
379
380         /* If ring is not full */
381         if (last_elem + 1 != first_elem)
382                 last_elem++;
383
384         /* Now update the prod */
385         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
386                 if (likely(fp->sge_mask[i]))
387                         break;
388
389                 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
390                 delta += BIT_VEC64_ELEM_SZ;
391         }
392
393         if (delta > 0) {
394                 fp->rx_sge_prod += delta;
395                 /* clear page-end entries */
396                 bnx2x_clear_sge_mask_next_elems(fp);
397         }
398
399         DP(NETIF_MSG_RX_STATUS,
400            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
401            fp->last_max_sge, fp->rx_sge_prod);
402 }
403
404 /* Get Toeplitz hash value in the skb using the value from the
405  * CQE (calculated by HW).
406  */
407 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
408                             const struct eth_fast_path_rx_cqe *cqe,
409                             enum pkt_hash_types *rxhash_type)
410 {
411         /* Get Toeplitz hash from CQE */
412         if ((bp->dev->features & NETIF_F_RXHASH) &&
413             (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
414                 enum eth_rss_hash_type htype;
415
416                 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
417                 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
418                                 (htype == TCP_IPV6_HASH_TYPE)) ?
419                                PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
420
421                 return le32_to_cpu(cqe->rss_hash_result);
422         }
423         *rxhash_type = PKT_HASH_TYPE_NONE;
424         return 0;
425 }
426
427 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
428                             u16 cons, u16 prod,
429                             struct eth_fast_path_rx_cqe *cqe)
430 {
431         struct bnx2x *bp = fp->bp;
432         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
433         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
434         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
435         dma_addr_t mapping;
436         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
437         struct sw_rx_bd *first_buf = &tpa_info->first_buf;
438
439         /* print error if current state != stop */
440         if (tpa_info->tpa_state != BNX2X_TPA_STOP)
441                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
442
443         /* Try to map an empty data buffer from the aggregation info  */
444         mapping = dma_map_single(&bp->pdev->dev,
445                                  first_buf->data + NET_SKB_PAD,
446                                  fp->rx_buf_size, DMA_FROM_DEVICE);
447         /*
448          *  ...if it fails - move the skb from the consumer to the producer
449          *  and set the current aggregation state as ERROR to drop it
450          *  when TPA_STOP arrives.
451          */
452
453         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
454                 /* Move the BD from the consumer to the producer */
455                 bnx2x_reuse_rx_data(fp, cons, prod);
456                 tpa_info->tpa_state = BNX2X_TPA_ERROR;
457                 return;
458         }
459
460         /* move empty data from pool to prod */
461         prod_rx_buf->data = first_buf->data;
462         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
463         /* point prod_bd to new data */
464         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
465         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
466
467         /* move partial skb from cons to pool (don't unmap yet) */
468         *first_buf = *cons_rx_buf;
469
470         /* mark bin state as START */
471         tpa_info->parsing_flags =
472                 le16_to_cpu(cqe->pars_flags.flags);
473         tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
474         tpa_info->tpa_state = BNX2X_TPA_START;
475         tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
476         tpa_info->placement_offset = cqe->placement_offset;
477         tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
478         if (fp->mode == TPA_MODE_GRO) {
479                 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
480                 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
481                 tpa_info->gro_size = gro_size;
482         }
483
484 #ifdef BNX2X_STOP_ON_ERROR
485         fp->tpa_queue_used |= (1 << queue);
486 #ifdef _ASM_GENERIC_INT_L64_H
487         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
488 #else
489         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
490 #endif
491            fp->tpa_queue_used);
492 #endif
493 }
494
495 /* Timestamp option length allowed for TPA aggregation:
496  *
497  *              nop nop kind length echo val
498  */
499 #define TPA_TSTAMP_OPT_LEN      12
500 /**
501  * bnx2x_set_gro_params - compute GRO values
502  *
503  * @skb:                packet skb
504  * @parsing_flags:      parsing flags from the START CQE
505  * @len_on_bd:          total length of the first packet for the
506  *                      aggregation.
507  * @pkt_len:            length of all segments
508  *
509  * Approximate value of the MSS for this aggregation calculated using
510  * the first packet of it.
511  * Compute number of aggregated segments, and gso_type.
512  */
513 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
514                                  u16 len_on_bd, unsigned int pkt_len,
515                                  u16 num_of_coalesced_segs)
516 {
517         /* TPA aggregation won't have either IP options or TCP options
518          * other than timestamp or IPv6 extension headers.
519          */
520         u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
521
522         if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
523             PRS_FLAG_OVERETH_IPV6) {
524                 hdrs_len += sizeof(struct ipv6hdr);
525                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
526         } else {
527                 hdrs_len += sizeof(struct iphdr);
528                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
529         }
530
531         /* Check if there was a TCP timestamp, if there is it's will
532          * always be 12 bytes length: nop nop kind length echo val.
533          *
534          * Otherwise FW would close the aggregation.
535          */
536         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
537                 hdrs_len += TPA_TSTAMP_OPT_LEN;
538
539         skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
540
541         /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
542          * to skb_shinfo(skb)->gso_segs
543          */
544         NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
545 }
546
547 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
548                               u16 index, gfp_t gfp_mask)
549 {
550         struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
551         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
552         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
553         dma_addr_t mapping;
554
555         if (unlikely(page == NULL)) {
556                 BNX2X_ERR("Can't alloc sge\n");
557                 return -ENOMEM;
558         }
559
560         mapping = dma_map_page(&bp->pdev->dev, page, 0,
561                                SGE_PAGES, DMA_FROM_DEVICE);
562         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
563                 __free_pages(page, PAGES_PER_SGE_SHIFT);
564                 BNX2X_ERR("Can't map sge\n");
565                 return -ENOMEM;
566         }
567
568         sw_buf->page = page;
569         dma_unmap_addr_set(sw_buf, mapping, mapping);
570
571         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
572         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
573
574         return 0;
575 }
576
577 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
578                                struct bnx2x_agg_info *tpa_info,
579                                u16 pages,
580                                struct sk_buff *skb,
581                                struct eth_end_agg_rx_cqe *cqe,
582                                u16 cqe_idx)
583 {
584         struct sw_rx_page *rx_pg, old_rx_pg;
585         u32 i, frag_len, frag_size;
586         int err, j, frag_id = 0;
587         u16 len_on_bd = tpa_info->len_on_bd;
588         u16 full_page = 0, gro_size = 0;
589
590         frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
591
592         if (fp->mode == TPA_MODE_GRO) {
593                 gro_size = tpa_info->gro_size;
594                 full_page = tpa_info->full_page;
595         }
596
597         /* This is needed in order to enable forwarding support */
598         if (frag_size)
599                 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
600                                      le16_to_cpu(cqe->pkt_len),
601                                      le16_to_cpu(cqe->num_of_coalesced_segs));
602
603 #ifdef BNX2X_STOP_ON_ERROR
604         if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
605                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
606                           pages, cqe_idx);
607                 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
608                 bnx2x_panic();
609                 return -EINVAL;
610         }
611 #endif
612
613         /* Run through the SGL and compose the fragmented skb */
614         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
615                 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
616
617                 /* FW gives the indices of the SGE as if the ring is an array
618                    (meaning that "next" element will consume 2 indices) */
619                 if (fp->mode == TPA_MODE_GRO)
620                         frag_len = min_t(u32, frag_size, (u32)full_page);
621                 else /* LRO */
622                         frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
623
624                 rx_pg = &fp->rx_page_ring[sge_idx];
625                 old_rx_pg = *rx_pg;
626
627                 /* If we fail to allocate a substitute page, we simply stop
628                    where we are and drop the whole packet */
629                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
630                 if (unlikely(err)) {
631                         bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
632                         return err;
633                 }
634
635                 /* Unmap the page as we're going to pass it to the stack */
636                 dma_unmap_page(&bp->pdev->dev,
637                                dma_unmap_addr(&old_rx_pg, mapping),
638                                SGE_PAGES, DMA_FROM_DEVICE);
639                 /* Add one frag and update the appropriate fields in the skb */
640                 if (fp->mode == TPA_MODE_LRO)
641                         skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
642                 else { /* GRO */
643                         int rem;
644                         int offset = 0;
645                         for (rem = frag_len; rem > 0; rem -= gro_size) {
646                                 int len = rem > gro_size ? gro_size : rem;
647                                 skb_fill_page_desc(skb, frag_id++,
648                                                    old_rx_pg.page, offset, len);
649                                 if (offset)
650                                         get_page(old_rx_pg.page);
651                                 offset += len;
652                         }
653                 }
654
655                 skb->data_len += frag_len;
656                 skb->truesize += SGE_PAGES;
657                 skb->len += frag_len;
658
659                 frag_size -= frag_len;
660         }
661
662         return 0;
663 }
664
665 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
666 {
667         if (fp->rx_frag_size)
668                 put_page(virt_to_head_page(data));
669         else
670                 kfree(data);
671 }
672
673 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
674 {
675         if (fp->rx_frag_size) {
676                 /* GFP_KERNEL allocations are used only during initialization */
677                 if (unlikely(gfp_mask & __GFP_WAIT))
678                         return (void *)__get_free_page(gfp_mask);
679
680                 return netdev_alloc_frag(fp->rx_frag_size);
681         }
682
683         return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
684 }
685
686 #ifdef CONFIG_INET
687 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
688 {
689         const struct iphdr *iph = ip_hdr(skb);
690         struct tcphdr *th;
691
692         skb_set_transport_header(skb, sizeof(struct iphdr));
693         th = tcp_hdr(skb);
694
695         th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
696                                   iph->saddr, iph->daddr, 0);
697 }
698
699 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
700 {
701         struct ipv6hdr *iph = ipv6_hdr(skb);
702         struct tcphdr *th;
703
704         skb_set_transport_header(skb, sizeof(struct ipv6hdr));
705         th = tcp_hdr(skb);
706
707         th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
708                                   &iph->saddr, &iph->daddr, 0);
709 }
710
711 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
712                             void (*gro_func)(struct bnx2x*, struct sk_buff*))
713 {
714         skb_set_network_header(skb, 0);
715         gro_func(bp, skb);
716         tcp_gro_complete(skb);
717 }
718 #endif
719
720 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
721                                struct sk_buff *skb)
722 {
723 #ifdef CONFIG_INET
724         if (skb_shinfo(skb)->gso_size) {
725                 switch (be16_to_cpu(skb->protocol)) {
726                 case ETH_P_IP:
727                         bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
728                         break;
729                 case ETH_P_IPV6:
730                         bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
731                         break;
732                 default:
733                         BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
734                                   be16_to_cpu(skb->protocol));
735                 }
736         }
737 #endif
738         skb_record_rx_queue(skb, fp->rx_queue);
739         napi_gro_receive(&fp->napi, skb);
740 }
741
742 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
743                            struct bnx2x_agg_info *tpa_info,
744                            u16 pages,
745                            struct eth_end_agg_rx_cqe *cqe,
746                            u16 cqe_idx)
747 {
748         struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
749         u8 pad = tpa_info->placement_offset;
750         u16 len = tpa_info->len_on_bd;
751         struct sk_buff *skb = NULL;
752         u8 *new_data, *data = rx_buf->data;
753         u8 old_tpa_state = tpa_info->tpa_state;
754
755         tpa_info->tpa_state = BNX2X_TPA_STOP;
756
757         /* If we there was an error during the handling of the TPA_START -
758          * drop this aggregation.
759          */
760         if (old_tpa_state == BNX2X_TPA_ERROR)
761                 goto drop;
762
763         /* Try to allocate the new data */
764         new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
765         /* Unmap skb in the pool anyway, as we are going to change
766            pool entry status to BNX2X_TPA_STOP even if new skb allocation
767            fails. */
768         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
769                          fp->rx_buf_size, DMA_FROM_DEVICE);
770         if (likely(new_data))
771                 skb = build_skb(data, fp->rx_frag_size);
772
773         if (likely(skb)) {
774 #ifdef BNX2X_STOP_ON_ERROR
775                 if (pad + len > fp->rx_buf_size) {
776                         BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
777                                   pad, len, fp->rx_buf_size);
778                         bnx2x_panic();
779                         return;
780                 }
781 #endif
782
783                 skb_reserve(skb, pad + NET_SKB_PAD);
784                 skb_put(skb, len);
785                 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
786
787                 skb->protocol = eth_type_trans(skb, bp->dev);
788                 skb->ip_summed = CHECKSUM_UNNECESSARY;
789
790                 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
791                                          skb, cqe, cqe_idx)) {
792                         if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
793                                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
794                         bnx2x_gro_receive(bp, fp, skb);
795                 } else {
796                         DP(NETIF_MSG_RX_STATUS,
797                            "Failed to allocate new pages - dropping packet!\n");
798                         dev_kfree_skb_any(skb);
799                 }
800
801                 /* put new data in bin */
802                 rx_buf->data = new_data;
803
804                 return;
805         }
806         if (new_data)
807                 bnx2x_frag_free(fp, new_data);
808 drop:
809         /* drop the packet and keep the buffer in the bin */
810         DP(NETIF_MSG_RX_STATUS,
811            "Failed to allocate or map a new skb - dropping packet!\n");
812         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
813 }
814
815 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
816                                u16 index, gfp_t gfp_mask)
817 {
818         u8 *data;
819         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
820         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
821         dma_addr_t mapping;
822
823         data = bnx2x_frag_alloc(fp, gfp_mask);
824         if (unlikely(data == NULL))
825                 return -ENOMEM;
826
827         mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
828                                  fp->rx_buf_size,
829                                  DMA_FROM_DEVICE);
830         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
831                 bnx2x_frag_free(fp, data);
832                 BNX2X_ERR("Can't map rx data\n");
833                 return -ENOMEM;
834         }
835
836         rx_buf->data = data;
837         dma_unmap_addr_set(rx_buf, mapping, mapping);
838
839         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
840         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
841
842         return 0;
843 }
844
845 static
846 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
847                                  struct bnx2x_fastpath *fp,
848                                  struct bnx2x_eth_q_stats *qstats)
849 {
850         /* Do nothing if no L4 csum validation was done.
851          * We do not check whether IP csum was validated. For IPv4 we assume
852          * that if the card got as far as validating the L4 csum, it also
853          * validated the IP csum. IPv6 has no IP csum.
854          */
855         if (cqe->fast_path_cqe.status_flags &
856             ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
857                 return;
858
859         /* If L4 validation was done, check if an error was found. */
860
861         if (cqe->fast_path_cqe.type_error_flags &
862             (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
863              ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
864                 qstats->hw_csum_err++;
865         else
866                 skb->ip_summed = CHECKSUM_UNNECESSARY;
867 }
868
869 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
870 {
871         struct bnx2x *bp = fp->bp;
872         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
873         u16 sw_comp_cons, sw_comp_prod;
874         int rx_pkt = 0;
875         union eth_rx_cqe *cqe;
876         struct eth_fast_path_rx_cqe *cqe_fp;
877
878 #ifdef BNX2X_STOP_ON_ERROR
879         if (unlikely(bp->panic))
880                 return 0;
881 #endif
882         if (budget <= 0)
883                 return rx_pkt;
884
885         bd_cons = fp->rx_bd_cons;
886         bd_prod = fp->rx_bd_prod;
887         bd_prod_fw = bd_prod;
888         sw_comp_cons = fp->rx_comp_cons;
889         sw_comp_prod = fp->rx_comp_prod;
890
891         comp_ring_cons = RCQ_BD(sw_comp_cons);
892         cqe = &fp->rx_comp_ring[comp_ring_cons];
893         cqe_fp = &cqe->fast_path_cqe;
894
895         DP(NETIF_MSG_RX_STATUS,
896            "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
897
898         while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
899                 struct sw_rx_bd *rx_buf = NULL;
900                 struct sk_buff *skb;
901                 u8 cqe_fp_flags;
902                 enum eth_rx_cqe_type cqe_fp_type;
903                 u16 len, pad, queue;
904                 u8 *data;
905                 u32 rxhash;
906                 enum pkt_hash_types rxhash_type;
907
908 #ifdef BNX2X_STOP_ON_ERROR
909                 if (unlikely(bp->panic))
910                         return 0;
911 #endif
912
913                 bd_prod = RX_BD(bd_prod);
914                 bd_cons = RX_BD(bd_cons);
915
916                 /* A rmb() is required to ensure that the CQE is not read
917                  * before it is written by the adapter DMA.  PCI ordering
918                  * rules will make sure the other fields are written before
919                  * the marker at the end of struct eth_fast_path_rx_cqe
920                  * but without rmb() a weakly ordered processor can process
921                  * stale data.  Without the barrier TPA state-machine might
922                  * enter inconsistent state and kernel stack might be
923                  * provided with incorrect packet description - these lead
924                  * to various kernel crashed.
925                  */
926                 rmb();
927
928                 cqe_fp_flags = cqe_fp->type_error_flags;
929                 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
930
931                 DP(NETIF_MSG_RX_STATUS,
932                    "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
933                    CQE_TYPE(cqe_fp_flags),
934                    cqe_fp_flags, cqe_fp->status_flags,
935                    le32_to_cpu(cqe_fp->rss_hash_result),
936                    le16_to_cpu(cqe_fp->vlan_tag),
937                    le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
938
939                 /* is this a slowpath msg? */
940                 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
941                         bnx2x_sp_event(fp, cqe);
942                         goto next_cqe;
943                 }
944
945                 rx_buf = &fp->rx_buf_ring[bd_cons];
946                 data = rx_buf->data;
947
948                 if (!CQE_TYPE_FAST(cqe_fp_type)) {
949                         struct bnx2x_agg_info *tpa_info;
950                         u16 frag_size, pages;
951 #ifdef BNX2X_STOP_ON_ERROR
952                         /* sanity check */
953                         if (fp->disable_tpa &&
954                             (CQE_TYPE_START(cqe_fp_type) ||
955                              CQE_TYPE_STOP(cqe_fp_type)))
956                                 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
957                                           CQE_TYPE(cqe_fp_type));
958 #endif
959
960                         if (CQE_TYPE_START(cqe_fp_type)) {
961                                 u16 queue = cqe_fp->queue_index;
962                                 DP(NETIF_MSG_RX_STATUS,
963                                    "calling tpa_start on queue %d\n",
964                                    queue);
965
966                                 bnx2x_tpa_start(fp, queue,
967                                                 bd_cons, bd_prod,
968                                                 cqe_fp);
969
970                                 goto next_rx;
971                         }
972                         queue = cqe->end_agg_cqe.queue_index;
973                         tpa_info = &fp->tpa_info[queue];
974                         DP(NETIF_MSG_RX_STATUS,
975                            "calling tpa_stop on queue %d\n",
976                            queue);
977
978                         frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
979                                     tpa_info->len_on_bd;
980
981                         if (fp->mode == TPA_MODE_GRO)
982                                 pages = (frag_size + tpa_info->full_page - 1) /
983                                          tpa_info->full_page;
984                         else
985                                 pages = SGE_PAGE_ALIGN(frag_size) >>
986                                         SGE_PAGE_SHIFT;
987
988                         bnx2x_tpa_stop(bp, fp, tpa_info, pages,
989                                        &cqe->end_agg_cqe, comp_ring_cons);
990 #ifdef BNX2X_STOP_ON_ERROR
991                         if (bp->panic)
992                                 return 0;
993 #endif
994
995                         bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
996                         goto next_cqe;
997                 }
998                 /* non TPA */
999                 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1000                 pad = cqe_fp->placement_offset;
1001                 dma_sync_single_for_cpu(&bp->pdev->dev,
1002                                         dma_unmap_addr(rx_buf, mapping),
1003                                         pad + RX_COPY_THRESH,
1004                                         DMA_FROM_DEVICE);
1005                 pad += NET_SKB_PAD;
1006                 prefetch(data + pad); /* speedup eth_type_trans() */
1007                 /* is this an error packet? */
1008                 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1009                         DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1010                            "ERROR  flags %x  rx packet %u\n",
1011                            cqe_fp_flags, sw_comp_cons);
1012                         bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1013                         goto reuse_rx;
1014                 }
1015
1016                 /* Since we don't have a jumbo ring
1017                  * copy small packets if mtu > 1500
1018                  */
1019                 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1020                     (len <= RX_COPY_THRESH)) {
1021                         skb = netdev_alloc_skb_ip_align(bp->dev, len);
1022                         if (skb == NULL) {
1023                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1024                                    "ERROR  packet dropped because of alloc failure\n");
1025                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1026                                 goto reuse_rx;
1027                         }
1028                         memcpy(skb->data, data + pad, len);
1029                         bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1030                 } else {
1031                         if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1032                                                        GFP_ATOMIC) == 0)) {
1033                                 dma_unmap_single(&bp->pdev->dev,
1034                                                  dma_unmap_addr(rx_buf, mapping),
1035                                                  fp->rx_buf_size,
1036                                                  DMA_FROM_DEVICE);
1037                                 skb = build_skb(data, fp->rx_frag_size);
1038                                 if (unlikely(!skb)) {
1039                                         bnx2x_frag_free(fp, data);
1040                                         bnx2x_fp_qstats(bp, fp)->
1041                                                         rx_skb_alloc_failed++;
1042                                         goto next_rx;
1043                                 }
1044                                 skb_reserve(skb, pad);
1045                         } else {
1046                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1047                                    "ERROR  packet dropped because of alloc failure\n");
1048                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1049 reuse_rx:
1050                                 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1051                                 goto next_rx;
1052                         }
1053                 }
1054
1055                 skb_put(skb, len);
1056                 skb->protocol = eth_type_trans(skb, bp->dev);
1057
1058                 /* Set Toeplitz hash for a none-LRO skb */
1059                 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1060                 skb_set_hash(skb, rxhash, rxhash_type);
1061
1062                 skb_checksum_none_assert(skb);
1063
1064                 if (bp->dev->features & NETIF_F_RXCSUM)
1065                         bnx2x_csum_validate(skb, cqe, fp,
1066                                             bnx2x_fp_qstats(bp, fp));
1067
1068                 skb_record_rx_queue(skb, fp->rx_queue);
1069
1070                 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1071                     PARSING_FLAGS_VLAN)
1072                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1073                                                le16_to_cpu(cqe_fp->vlan_tag));
1074
1075                 skb_mark_napi_id(skb, &fp->napi);
1076
1077                 if (bnx2x_fp_ll_polling(fp))
1078                         netif_receive_skb(skb);
1079                 else
1080                         napi_gro_receive(&fp->napi, skb);
1081 next_rx:
1082                 rx_buf->data = NULL;
1083
1084                 bd_cons = NEXT_RX_IDX(bd_cons);
1085                 bd_prod = NEXT_RX_IDX(bd_prod);
1086                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1087                 rx_pkt++;
1088 next_cqe:
1089                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1090                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1091
1092                 /* mark CQE as free */
1093                 BNX2X_SEED_CQE(cqe_fp);
1094
1095                 if (rx_pkt == budget)
1096                         break;
1097
1098                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1099                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1100                 cqe_fp = &cqe->fast_path_cqe;
1101         } /* while */
1102
1103         fp->rx_bd_cons = bd_cons;
1104         fp->rx_bd_prod = bd_prod_fw;
1105         fp->rx_comp_cons = sw_comp_cons;
1106         fp->rx_comp_prod = sw_comp_prod;
1107
1108         /* Update producers */
1109         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1110                              fp->rx_sge_prod);
1111
1112         fp->rx_pkt += rx_pkt;
1113         fp->rx_calls++;
1114
1115         return rx_pkt;
1116 }
1117
1118 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1119 {
1120         struct bnx2x_fastpath *fp = fp_cookie;
1121         struct bnx2x *bp = fp->bp;
1122         u8 cos;
1123
1124         DP(NETIF_MSG_INTR,
1125            "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1126            fp->index, fp->fw_sb_id, fp->igu_sb_id);
1127
1128         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1129
1130 #ifdef BNX2X_STOP_ON_ERROR
1131         if (unlikely(bp->panic))
1132                 return IRQ_HANDLED;
1133 #endif
1134
1135         /* Handle Rx and Tx according to MSI-X vector */
1136         for_each_cos_in_tx_queue(fp, cos)
1137                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1138
1139         prefetch(&fp->sb_running_index[SM_RX_ID]);
1140         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1141
1142         return IRQ_HANDLED;
1143 }
1144
1145 /* HW Lock for shared dual port PHYs */
1146 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1147 {
1148         mutex_lock(&bp->port.phy_mutex);
1149
1150         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1151 }
1152
1153 void bnx2x_release_phy_lock(struct bnx2x *bp)
1154 {
1155         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1156
1157         mutex_unlock(&bp->port.phy_mutex);
1158 }
1159
1160 /* calculates MF speed according to current linespeed and MF configuration */
1161 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1162 {
1163         u16 line_speed = bp->link_vars.line_speed;
1164         if (IS_MF(bp)) {
1165                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1166                                                    bp->mf_config[BP_VN(bp)]);
1167
1168                 /* Calculate the current MAX line speed limit for the MF
1169                  * devices
1170                  */
1171                 if (IS_MF_SI(bp))
1172                         line_speed = (line_speed * maxCfg) / 100;
1173                 else { /* SD mode */
1174                         u16 vn_max_rate = maxCfg * 100;
1175
1176                         if (vn_max_rate < line_speed)
1177                                 line_speed = vn_max_rate;
1178                 }
1179         }
1180
1181         return line_speed;
1182 }
1183
1184 /**
1185  * bnx2x_fill_report_data - fill link report data to report
1186  *
1187  * @bp:         driver handle
1188  * @data:       link state to update
1189  *
1190  * It uses a none-atomic bit operations because is called under the mutex.
1191  */
1192 static void bnx2x_fill_report_data(struct bnx2x *bp,
1193                                    struct bnx2x_link_report_data *data)
1194 {
1195         u16 line_speed = bnx2x_get_mf_speed(bp);
1196
1197         memset(data, 0, sizeof(*data));
1198
1199         /* Fill the report data: effective line speed */
1200         data->line_speed = line_speed;
1201
1202         /* Link is down */
1203         if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1204                 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1205                           &data->link_report_flags);
1206
1207         /* Full DUPLEX */
1208         if (bp->link_vars.duplex == DUPLEX_FULL)
1209                 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1210
1211         /* Rx Flow Control is ON */
1212         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1213                 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1214
1215         /* Tx Flow Control is ON */
1216         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1217                 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1218 }
1219
1220 /**
1221  * bnx2x_link_report - report link status to OS.
1222  *
1223  * @bp:         driver handle
1224  *
1225  * Calls the __bnx2x_link_report() under the same locking scheme
1226  * as a link/PHY state managing code to ensure a consistent link
1227  * reporting.
1228  */
1229
1230 void bnx2x_link_report(struct bnx2x *bp)
1231 {
1232         bnx2x_acquire_phy_lock(bp);
1233         __bnx2x_link_report(bp);
1234         bnx2x_release_phy_lock(bp);
1235 }
1236
1237 /**
1238  * __bnx2x_link_report - report link status to OS.
1239  *
1240  * @bp:         driver handle
1241  *
1242  * None atomic implementation.
1243  * Should be called under the phy_lock.
1244  */
1245 void __bnx2x_link_report(struct bnx2x *bp)
1246 {
1247         struct bnx2x_link_report_data cur_data;
1248
1249         /* reread mf_cfg */
1250         if (IS_PF(bp) && !CHIP_IS_E1(bp))
1251                 bnx2x_read_mf_cfg(bp);
1252
1253         /* Read the current link report info */
1254         bnx2x_fill_report_data(bp, &cur_data);
1255
1256         /* Don't report link down or exactly the same link status twice */
1257         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1258             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1259                       &bp->last_reported_link.link_report_flags) &&
1260              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1261                       &cur_data.link_report_flags)))
1262                 return;
1263
1264         bp->link_cnt++;
1265
1266         /* We are going to report a new link parameters now -
1267          * remember the current data for the next time.
1268          */
1269         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1270
1271         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1272                      &cur_data.link_report_flags)) {
1273                 netif_carrier_off(bp->dev);
1274                 netdev_err(bp->dev, "NIC Link is Down\n");
1275                 return;
1276         } else {
1277                 const char *duplex;
1278                 const char *flow;
1279
1280                 netif_carrier_on(bp->dev);
1281
1282                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1283                                        &cur_data.link_report_flags))
1284                         duplex = "full";
1285                 else
1286                         duplex = "half";
1287
1288                 /* Handle the FC at the end so that only these flags would be
1289                  * possibly set. This way we may easily check if there is no FC
1290                  * enabled.
1291                  */
1292                 if (cur_data.link_report_flags) {
1293                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1294                                      &cur_data.link_report_flags)) {
1295                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1296                                      &cur_data.link_report_flags))
1297                                         flow = "ON - receive & transmit";
1298                                 else
1299                                         flow = "ON - receive";
1300                         } else {
1301                                 flow = "ON - transmit";
1302                         }
1303                 } else {
1304                         flow = "none";
1305                 }
1306                 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1307                             cur_data.line_speed, duplex, flow);
1308         }
1309 }
1310
1311 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1312 {
1313         int i;
1314
1315         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1316                 struct eth_rx_sge *sge;
1317
1318                 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1319                 sge->addr_hi =
1320                         cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1321                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1322
1323                 sge->addr_lo =
1324                         cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1325                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1326         }
1327 }
1328
1329 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1330                                 struct bnx2x_fastpath *fp, int last)
1331 {
1332         int i;
1333
1334         for (i = 0; i < last; i++) {
1335                 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1336                 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1337                 u8 *data = first_buf->data;
1338
1339                 if (data == NULL) {
1340                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1341                         continue;
1342                 }
1343                 if (tpa_info->tpa_state == BNX2X_TPA_START)
1344                         dma_unmap_single(&bp->pdev->dev,
1345                                          dma_unmap_addr(first_buf, mapping),
1346                                          fp->rx_buf_size, DMA_FROM_DEVICE);
1347                 bnx2x_frag_free(fp, data);
1348                 first_buf->data = NULL;
1349         }
1350 }
1351
1352 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1353 {
1354         int j;
1355
1356         for_each_rx_queue_cnic(bp, j) {
1357                 struct bnx2x_fastpath *fp = &bp->fp[j];
1358
1359                 fp->rx_bd_cons = 0;
1360
1361                 /* Activate BD ring */
1362                 /* Warning!
1363                  * this will generate an interrupt (to the TSTORM)
1364                  * must only be done after chip is initialized
1365                  */
1366                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1367                                      fp->rx_sge_prod);
1368         }
1369 }
1370
1371 void bnx2x_init_rx_rings(struct bnx2x *bp)
1372 {
1373         int func = BP_FUNC(bp);
1374         u16 ring_prod;
1375         int i, j;
1376
1377         /* Allocate TPA resources */
1378         for_each_eth_queue(bp, j) {
1379                 struct bnx2x_fastpath *fp = &bp->fp[j];
1380
1381                 DP(NETIF_MSG_IFUP,
1382                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1383
1384                 if (!fp->disable_tpa) {
1385                         /* Fill the per-aggregation pool */
1386                         for (i = 0; i < MAX_AGG_QS(bp); i++) {
1387                                 struct bnx2x_agg_info *tpa_info =
1388                                         &fp->tpa_info[i];
1389                                 struct sw_rx_bd *first_buf =
1390                                         &tpa_info->first_buf;
1391
1392                                 first_buf->data =
1393                                         bnx2x_frag_alloc(fp, GFP_KERNEL);
1394                                 if (!first_buf->data) {
1395                                         BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1396                                                   j);
1397                                         bnx2x_free_tpa_pool(bp, fp, i);
1398                                         fp->disable_tpa = 1;
1399                                         break;
1400                                 }
1401                                 dma_unmap_addr_set(first_buf, mapping, 0);
1402                                 tpa_info->tpa_state = BNX2X_TPA_STOP;
1403                         }
1404
1405                         /* "next page" elements initialization */
1406                         bnx2x_set_next_page_sgl(fp);
1407
1408                         /* set SGEs bit mask */
1409                         bnx2x_init_sge_ring_bit_mask(fp);
1410
1411                         /* Allocate SGEs and initialize the ring elements */
1412                         for (i = 0, ring_prod = 0;
1413                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1414
1415                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1416                                                        GFP_KERNEL) < 0) {
1417                                         BNX2X_ERR("was only able to allocate %d rx sges\n",
1418                                                   i);
1419                                         BNX2X_ERR("disabling TPA for queue[%d]\n",
1420                                                   j);
1421                                         /* Cleanup already allocated elements */
1422                                         bnx2x_free_rx_sge_range(bp, fp,
1423                                                                 ring_prod);
1424                                         bnx2x_free_tpa_pool(bp, fp,
1425                                                             MAX_AGG_QS(bp));
1426                                         fp->disable_tpa = 1;
1427                                         ring_prod = 0;
1428                                         break;
1429                                 }
1430                                 ring_prod = NEXT_SGE_IDX(ring_prod);
1431                         }
1432
1433                         fp->rx_sge_prod = ring_prod;
1434                 }
1435         }
1436
1437         for_each_eth_queue(bp, j) {
1438                 struct bnx2x_fastpath *fp = &bp->fp[j];
1439
1440                 fp->rx_bd_cons = 0;
1441
1442                 /* Activate BD ring */
1443                 /* Warning!
1444                  * this will generate an interrupt (to the TSTORM)
1445                  * must only be done after chip is initialized
1446                  */
1447                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1448                                      fp->rx_sge_prod);
1449
1450                 if (j != 0)
1451                         continue;
1452
1453                 if (CHIP_IS_E1(bp)) {
1454                         REG_WR(bp, BAR_USTRORM_INTMEM +
1455                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1456                                U64_LO(fp->rx_comp_mapping));
1457                         REG_WR(bp, BAR_USTRORM_INTMEM +
1458                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1459                                U64_HI(fp->rx_comp_mapping));
1460                 }
1461         }
1462 }
1463
1464 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1465 {
1466         u8 cos;
1467         struct bnx2x *bp = fp->bp;
1468
1469         for_each_cos_in_tx_queue(fp, cos) {
1470                 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1471                 unsigned pkts_compl = 0, bytes_compl = 0;
1472
1473                 u16 sw_prod = txdata->tx_pkt_prod;
1474                 u16 sw_cons = txdata->tx_pkt_cons;
1475
1476                 while (sw_cons != sw_prod) {
1477                         bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1478                                           &pkts_compl, &bytes_compl);
1479                         sw_cons++;
1480                 }
1481
1482                 netdev_tx_reset_queue(
1483                         netdev_get_tx_queue(bp->dev,
1484                                             txdata->txq_index));
1485         }
1486 }
1487
1488 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1489 {
1490         int i;
1491
1492         for_each_tx_queue_cnic(bp, i) {
1493                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1494         }
1495 }
1496
1497 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1498 {
1499         int i;
1500
1501         for_each_eth_queue(bp, i) {
1502                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1503         }
1504 }
1505
1506 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1507 {
1508         struct bnx2x *bp = fp->bp;
1509         int i;
1510
1511         /* ring wasn't allocated */
1512         if (fp->rx_buf_ring == NULL)
1513                 return;
1514
1515         for (i = 0; i < NUM_RX_BD; i++) {
1516                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1517                 u8 *data = rx_buf->data;
1518
1519                 if (data == NULL)
1520                         continue;
1521                 dma_unmap_single(&bp->pdev->dev,
1522                                  dma_unmap_addr(rx_buf, mapping),
1523                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1524
1525                 rx_buf->data = NULL;
1526                 bnx2x_frag_free(fp, data);
1527         }
1528 }
1529
1530 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1531 {
1532         int j;
1533
1534         for_each_rx_queue_cnic(bp, j) {
1535                 bnx2x_free_rx_bds(&bp->fp[j]);
1536         }
1537 }
1538
1539 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1540 {
1541         int j;
1542
1543         for_each_eth_queue(bp, j) {
1544                 struct bnx2x_fastpath *fp = &bp->fp[j];
1545
1546                 bnx2x_free_rx_bds(fp);
1547
1548                 if (!fp->disable_tpa)
1549                         bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1550         }
1551 }
1552
1553 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1554 {
1555         bnx2x_free_tx_skbs_cnic(bp);
1556         bnx2x_free_rx_skbs_cnic(bp);
1557 }
1558
1559 void bnx2x_free_skbs(struct bnx2x *bp)
1560 {
1561         bnx2x_free_tx_skbs(bp);
1562         bnx2x_free_rx_skbs(bp);
1563 }
1564
1565 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1566 {
1567         /* load old values */
1568         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1569
1570         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1571                 /* leave all but MAX value */
1572                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1573
1574                 /* set new MAX value */
1575                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1576                                 & FUNC_MF_CFG_MAX_BW_MASK;
1577
1578                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1579         }
1580 }
1581
1582 /**
1583  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1584  *
1585  * @bp:         driver handle
1586  * @nvecs:      number of vectors to be released
1587  */
1588 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1589 {
1590         int i, offset = 0;
1591
1592         if (nvecs == offset)
1593                 return;
1594
1595         /* VFs don't have a default SB */
1596         if (IS_PF(bp)) {
1597                 free_irq(bp->msix_table[offset].vector, bp->dev);
1598                 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1599                    bp->msix_table[offset].vector);
1600                 offset++;
1601         }
1602
1603         if (CNIC_SUPPORT(bp)) {
1604                 if (nvecs == offset)
1605                         return;
1606                 offset++;
1607         }
1608
1609         for_each_eth_queue(bp, i) {
1610                 if (nvecs == offset)
1611                         return;
1612                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1613                    i, bp->msix_table[offset].vector);
1614
1615                 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1616         }
1617 }
1618
1619 void bnx2x_free_irq(struct bnx2x *bp)
1620 {
1621         if (bp->flags & USING_MSIX_FLAG &&
1622             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1623                 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1624
1625                 /* vfs don't have a default status block */
1626                 if (IS_PF(bp))
1627                         nvecs++;
1628
1629                 bnx2x_free_msix_irqs(bp, nvecs);
1630         } else {
1631                 free_irq(bp->dev->irq, bp->dev);
1632         }
1633 }
1634
1635 int bnx2x_enable_msix(struct bnx2x *bp)
1636 {
1637         int msix_vec = 0, i, rc;
1638
1639         /* VFs don't have a default status block */
1640         if (IS_PF(bp)) {
1641                 bp->msix_table[msix_vec].entry = msix_vec;
1642                 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1643                                bp->msix_table[0].entry);
1644                 msix_vec++;
1645         }
1646
1647         /* Cnic requires an msix vector for itself */
1648         if (CNIC_SUPPORT(bp)) {
1649                 bp->msix_table[msix_vec].entry = msix_vec;
1650                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1651                                msix_vec, bp->msix_table[msix_vec].entry);
1652                 msix_vec++;
1653         }
1654
1655         /* We need separate vectors for ETH queues only (not FCoE) */
1656         for_each_eth_queue(bp, i) {
1657                 bp->msix_table[msix_vec].entry = msix_vec;
1658                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1659                                msix_vec, msix_vec, i);
1660                 msix_vec++;
1661         }
1662
1663         DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1664            msix_vec);
1665
1666         rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1667                                    BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1668         /*
1669          * reconfigure number of tx/rx queues according to available
1670          * MSI-X vectors
1671          */
1672         if (rc == -ENOSPC) {
1673                 /* Get by with single vector */
1674                 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1675                 if (rc < 0) {
1676                         BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1677                                        rc);
1678                         goto no_msix;
1679                 }
1680
1681                 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1682                 bp->flags |= USING_SINGLE_MSIX_FLAG;
1683
1684                 BNX2X_DEV_INFO("set number of queues to 1\n");
1685                 bp->num_ethernet_queues = 1;
1686                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1687         } else if (rc < 0) {
1688                 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1689                 goto no_msix;
1690         } else if (rc < msix_vec) {
1691                 /* how less vectors we will have? */
1692                 int diff = msix_vec - rc;
1693
1694                 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1695
1696                 /*
1697                  * decrease number of queues by number of unallocated entries
1698                  */
1699                 bp->num_ethernet_queues -= diff;
1700                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1701
1702                 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1703                                bp->num_queues);
1704         }
1705
1706         bp->flags |= USING_MSIX_FLAG;
1707
1708         return 0;
1709
1710 no_msix:
1711         /* fall to INTx if not enough memory */
1712         if (rc == -ENOMEM)
1713                 bp->flags |= DISABLE_MSI_FLAG;
1714
1715         return rc;
1716 }
1717
1718 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1719 {
1720         int i, rc, offset = 0;
1721
1722         /* no default status block for vf */
1723         if (IS_PF(bp)) {
1724                 rc = request_irq(bp->msix_table[offset++].vector,
1725                                  bnx2x_msix_sp_int, 0,
1726                                  bp->dev->name, bp->dev);
1727                 if (rc) {
1728                         BNX2X_ERR("request sp irq failed\n");
1729                         return -EBUSY;
1730                 }
1731         }
1732
1733         if (CNIC_SUPPORT(bp))
1734                 offset++;
1735
1736         for_each_eth_queue(bp, i) {
1737                 struct bnx2x_fastpath *fp = &bp->fp[i];
1738                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1739                          bp->dev->name, i);
1740
1741                 rc = request_irq(bp->msix_table[offset].vector,
1742                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1743                 if (rc) {
1744                         BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1745                               bp->msix_table[offset].vector, rc);
1746                         bnx2x_free_msix_irqs(bp, offset);
1747                         return -EBUSY;
1748                 }
1749
1750                 offset++;
1751         }
1752
1753         i = BNX2X_NUM_ETH_QUEUES(bp);
1754         if (IS_PF(bp)) {
1755                 offset = 1 + CNIC_SUPPORT(bp);
1756                 netdev_info(bp->dev,
1757                             "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1758                             bp->msix_table[0].vector,
1759                             0, bp->msix_table[offset].vector,
1760                             i - 1, bp->msix_table[offset + i - 1].vector);
1761         } else {
1762                 offset = CNIC_SUPPORT(bp);
1763                 netdev_info(bp->dev,
1764                             "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1765                             0, bp->msix_table[offset].vector,
1766                             i - 1, bp->msix_table[offset + i - 1].vector);
1767         }
1768         return 0;
1769 }
1770
1771 int bnx2x_enable_msi(struct bnx2x *bp)
1772 {
1773         int rc;
1774
1775         rc = pci_enable_msi(bp->pdev);
1776         if (rc) {
1777                 BNX2X_DEV_INFO("MSI is not attainable\n");
1778                 return -1;
1779         }
1780         bp->flags |= USING_MSI_FLAG;
1781
1782         return 0;
1783 }
1784
1785 static int bnx2x_req_irq(struct bnx2x *bp)
1786 {
1787         unsigned long flags;
1788         unsigned int irq;
1789
1790         if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1791                 flags = 0;
1792         else
1793                 flags = IRQF_SHARED;
1794
1795         if (bp->flags & USING_MSIX_FLAG)
1796                 irq = bp->msix_table[0].vector;
1797         else
1798                 irq = bp->pdev->irq;
1799
1800         return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1801 }
1802
1803 static int bnx2x_setup_irqs(struct bnx2x *bp)
1804 {
1805         int rc = 0;
1806         if (bp->flags & USING_MSIX_FLAG &&
1807             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1808                 rc = bnx2x_req_msix_irqs(bp);
1809                 if (rc)
1810                         return rc;
1811         } else {
1812                 rc = bnx2x_req_irq(bp);
1813                 if (rc) {
1814                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1815                         return rc;
1816                 }
1817                 if (bp->flags & USING_MSI_FLAG) {
1818                         bp->dev->irq = bp->pdev->irq;
1819                         netdev_info(bp->dev, "using MSI IRQ %d\n",
1820                                     bp->dev->irq);
1821                 }
1822                 if (bp->flags & USING_MSIX_FLAG) {
1823                         bp->dev->irq = bp->msix_table[0].vector;
1824                         netdev_info(bp->dev, "using MSIX IRQ %d\n",
1825                                     bp->dev->irq);
1826                 }
1827         }
1828
1829         return 0;
1830 }
1831
1832 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1833 {
1834         int i;
1835
1836         for_each_rx_queue_cnic(bp, i) {
1837                 bnx2x_fp_init_lock(&bp->fp[i]);
1838                 napi_enable(&bnx2x_fp(bp, i, napi));
1839         }
1840 }
1841
1842 static void bnx2x_napi_enable(struct bnx2x *bp)
1843 {
1844         int i;
1845
1846         for_each_eth_queue(bp, i) {
1847                 bnx2x_fp_init_lock(&bp->fp[i]);
1848                 napi_enable(&bnx2x_fp(bp, i, napi));
1849         }
1850 }
1851
1852 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1853 {
1854         int i;
1855
1856         for_each_rx_queue_cnic(bp, i) {
1857                 napi_disable(&bnx2x_fp(bp, i, napi));
1858                 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1859                         usleep_range(1000, 2000);
1860         }
1861 }
1862
1863 static void bnx2x_napi_disable(struct bnx2x *bp)
1864 {
1865         int i;
1866
1867         for_each_eth_queue(bp, i) {
1868                 napi_disable(&bnx2x_fp(bp, i, napi));
1869                 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1870                         usleep_range(1000, 2000);
1871         }
1872 }
1873
1874 void bnx2x_netif_start(struct bnx2x *bp)
1875 {
1876         if (netif_running(bp->dev)) {
1877                 bnx2x_napi_enable(bp);
1878                 if (CNIC_LOADED(bp))
1879                         bnx2x_napi_enable_cnic(bp);
1880                 bnx2x_int_enable(bp);
1881                 if (bp->state == BNX2X_STATE_OPEN)
1882                         netif_tx_wake_all_queues(bp->dev);
1883         }
1884 }
1885
1886 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1887 {
1888         bnx2x_int_disable_sync(bp, disable_hw);
1889         bnx2x_napi_disable(bp);
1890         if (CNIC_LOADED(bp))
1891                 bnx2x_napi_disable_cnic(bp);
1892 }
1893
1894 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1895                        void *accel_priv, select_queue_fallback_t fallback)
1896 {
1897         struct bnx2x *bp = netdev_priv(dev);
1898
1899         if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1900                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1901                 u16 ether_type = ntohs(hdr->h_proto);
1902
1903                 /* Skip VLAN tag if present */
1904                 if (ether_type == ETH_P_8021Q) {
1905                         struct vlan_ethhdr *vhdr =
1906                                 (struct vlan_ethhdr *)skb->data;
1907
1908                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1909                 }
1910
1911                 /* If ethertype is FCoE or FIP - use FCoE ring */
1912                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1913                         return bnx2x_fcoe_tx(bp, txq_index);
1914         }
1915
1916         /* select a non-FCoE queue */
1917         return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1918 }
1919
1920 void bnx2x_set_num_queues(struct bnx2x *bp)
1921 {
1922         /* RSS queues */
1923         bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1924
1925         /* override in STORAGE SD modes */
1926         if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1927                 bp->num_ethernet_queues = 1;
1928
1929         /* Add special queues */
1930         bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1931         bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1932
1933         BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1934 }
1935
1936 /**
1937  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1938  *
1939  * @bp:         Driver handle
1940  *
1941  * We currently support for at most 16 Tx queues for each CoS thus we will
1942  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1943  * bp->max_cos.
1944  *
1945  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1946  * index after all ETH L2 indices.
1947  *
1948  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1949  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1950  * 16..31,...) with indices that are not coupled with any real Tx queue.
1951  *
1952  * The proper configuration of skb->queue_mapping is handled by
1953  * bnx2x_select_queue() and __skb_tx_hash().
1954  *
1955  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1956  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1957  */
1958 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1959 {
1960         int rc, tx, rx;
1961
1962         tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1963         rx = BNX2X_NUM_ETH_QUEUES(bp);
1964
1965 /* account for fcoe queue */
1966         if (include_cnic && !NO_FCOE(bp)) {
1967                 rx++;
1968                 tx++;
1969         }
1970
1971         rc = netif_set_real_num_tx_queues(bp->dev, tx);
1972         if (rc) {
1973                 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1974                 return rc;
1975         }
1976         rc = netif_set_real_num_rx_queues(bp->dev, rx);
1977         if (rc) {
1978                 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1979                 return rc;
1980         }
1981
1982         DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1983                           tx, rx);
1984
1985         return rc;
1986 }
1987
1988 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1989 {
1990         int i;
1991
1992         for_each_queue(bp, i) {
1993                 struct bnx2x_fastpath *fp = &bp->fp[i];
1994                 u32 mtu;
1995
1996                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1997                 if (IS_FCOE_IDX(i))
1998                         /*
1999                          * Although there are no IP frames expected to arrive to
2000                          * this ring we still want to add an
2001                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2002                          * overrun attack.
2003                          */
2004                         mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2005                 else
2006                         mtu = bp->dev->mtu;
2007                 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2008                                   IP_HEADER_ALIGNMENT_PADDING +
2009                                   ETH_OVREHEAD +
2010                                   mtu +
2011                                   BNX2X_FW_RX_ALIGN_END;
2012                 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2013                 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2014                         fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2015                 else
2016                         fp->rx_frag_size = 0;
2017         }
2018 }
2019
2020 static int bnx2x_init_rss(struct bnx2x *bp)
2021 {
2022         int i;
2023         u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2024
2025         /* Prepare the initial contents for the indirection table if RSS is
2026          * enabled
2027          */
2028         for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2029                 bp->rss_conf_obj.ind_table[i] =
2030                         bp->fp->cl_id +
2031                         ethtool_rxfh_indir_default(i, num_eth_queues);
2032
2033         /*
2034          * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2035          * per-port, so if explicit configuration is needed , do it only
2036          * for a PMF.
2037          *
2038          * For 57712 and newer on the other hand it's a per-function
2039          * configuration.
2040          */
2041         return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2042 }
2043
2044 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2045               bool config_hash, bool enable)
2046 {
2047         struct bnx2x_config_rss_params params = {NULL};
2048
2049         /* Although RSS is meaningless when there is a single HW queue we
2050          * still need it enabled in order to have HW Rx hash generated.
2051          *
2052          * if (!is_eth_multi(bp))
2053          *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
2054          */
2055
2056         params.rss_obj = rss_obj;
2057
2058         __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2059
2060         if (enable) {
2061                 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2062
2063                 /* RSS configuration */
2064                 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2065                 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2066                 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2067                 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2068                 if (rss_obj->udp_rss_v4)
2069                         __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2070                 if (rss_obj->udp_rss_v6)
2071                         __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2072         } else {
2073                 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2074         }
2075
2076         /* Hash bits */
2077         params.rss_result_mask = MULTI_MASK;
2078
2079         memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2080
2081         if (config_hash) {
2082                 /* RSS keys */
2083                 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
2084                 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2085         }
2086
2087         if (IS_PF(bp))
2088                 return bnx2x_config_rss(bp, &params);
2089         else
2090                 return bnx2x_vfpf_config_rss(bp, &params);
2091 }
2092
2093 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2094 {
2095         struct bnx2x_func_state_params func_params = {NULL};
2096
2097         /* Prepare parameters for function state transitions */
2098         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2099
2100         func_params.f_obj = &bp->func_obj;
2101         func_params.cmd = BNX2X_F_CMD_HW_INIT;
2102
2103         func_params.params.hw_init.load_phase = load_code;
2104
2105         return bnx2x_func_state_change(bp, &func_params);
2106 }
2107
2108 /*
2109  * Cleans the object that have internal lists without sending
2110  * ramrods. Should be run when interrupts are disabled.
2111  */
2112 void bnx2x_squeeze_objects(struct bnx2x *bp)
2113 {
2114         int rc;
2115         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2116         struct bnx2x_mcast_ramrod_params rparam = {NULL};
2117         struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2118
2119         /***************** Cleanup MACs' object first *************************/
2120
2121         /* Wait for completion of requested */
2122         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2123         /* Perform a dry cleanup */
2124         __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2125
2126         /* Clean ETH primary MAC */
2127         __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2128         rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2129                                  &ramrod_flags);
2130         if (rc != 0)
2131                 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2132
2133         /* Cleanup UC list */
2134         vlan_mac_flags = 0;
2135         __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2136         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2137                                  &ramrod_flags);
2138         if (rc != 0)
2139                 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2140
2141         /***************** Now clean mcast object *****************************/
2142         rparam.mcast_obj = &bp->mcast_obj;
2143         __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2144
2145         /* Add a DEL command... - Since we're doing a driver cleanup only,
2146          * we take a lock surrounding both the initial send and the CONTs,
2147          * as we don't want a true completion to disrupt us in the middle.
2148          */
2149         netif_addr_lock_bh(bp->dev);
2150         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2151         if (rc < 0)
2152                 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2153                           rc);
2154
2155         /* ...and wait until all pending commands are cleared */
2156         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2157         while (rc != 0) {
2158                 if (rc < 0) {
2159                         BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2160                                   rc);
2161                         netif_addr_unlock_bh(bp->dev);
2162                         return;
2163                 }
2164
2165                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2166         }
2167         netif_addr_unlock_bh(bp->dev);
2168 }
2169
2170 #ifndef BNX2X_STOP_ON_ERROR
2171 #define LOAD_ERROR_EXIT(bp, label) \
2172         do { \
2173                 (bp)->state = BNX2X_STATE_ERROR; \
2174                 goto label; \
2175         } while (0)
2176
2177 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2178         do { \
2179                 bp->cnic_loaded = false; \
2180                 goto label; \
2181         } while (0)
2182 #else /*BNX2X_STOP_ON_ERROR*/
2183 #define LOAD_ERROR_EXIT(bp, label) \
2184         do { \
2185                 (bp)->state = BNX2X_STATE_ERROR; \
2186                 (bp)->panic = 1; \
2187                 return -EBUSY; \
2188         } while (0)
2189 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2190         do { \
2191                 bp->cnic_loaded = false; \
2192                 (bp)->panic = 1; \
2193                 return -EBUSY; \
2194         } while (0)
2195 #endif /*BNX2X_STOP_ON_ERROR*/
2196
2197 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2198 {
2199         BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2200                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2201         return;
2202 }
2203
2204 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2205 {
2206         int num_groups, vf_headroom = 0;
2207         int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2208
2209         /* number of queues for statistics is number of eth queues + FCoE */
2210         u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2211
2212         /* Total number of FW statistics requests =
2213          * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2214          * and fcoe l2 queue) stats + num of queues (which includes another 1
2215          * for fcoe l2 queue if applicable)
2216          */
2217         bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2218
2219         /* vf stats appear in the request list, but their data is allocated by
2220          * the VFs themselves. We don't include them in the bp->fw_stats_num as
2221          * it is used to determine where to place the vf stats queries in the
2222          * request struct
2223          */
2224         if (IS_SRIOV(bp))
2225                 vf_headroom = bnx2x_vf_headroom(bp);
2226
2227         /* Request is built from stats_query_header and an array of
2228          * stats_query_cmd_group each of which contains
2229          * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2230          * configured in the stats_query_header.
2231          */
2232         num_groups =
2233                 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2234                  (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2235                  1 : 0));
2236
2237         DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2238            bp->fw_stats_num, vf_headroom, num_groups);
2239         bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2240                 num_groups * sizeof(struct stats_query_cmd_group);
2241
2242         /* Data for statistics requests + stats_counter
2243          * stats_counter holds per-STORM counters that are incremented
2244          * when STORM has finished with the current request.
2245          * memory for FCoE offloaded statistics are counted anyway,
2246          * even if they will not be sent.
2247          * VF stats are not accounted for here as the data of VF stats is stored
2248          * in memory allocated by the VF, not here.
2249          */
2250         bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2251                 sizeof(struct per_pf_stats) +
2252                 sizeof(struct fcoe_statistics_params) +
2253                 sizeof(struct per_queue_stats) * num_queue_stats +
2254                 sizeof(struct stats_counter);
2255
2256         bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2257                                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2258         if (!bp->fw_stats)
2259                 goto alloc_mem_err;
2260
2261         /* Set shortcuts */
2262         bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2263         bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2264         bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2265                 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2266         bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2267                 bp->fw_stats_req_sz;
2268
2269         DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2270            U64_HI(bp->fw_stats_req_mapping),
2271            U64_LO(bp->fw_stats_req_mapping));
2272         DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2273            U64_HI(bp->fw_stats_data_mapping),
2274            U64_LO(bp->fw_stats_data_mapping));
2275         return 0;
2276
2277 alloc_mem_err:
2278         bnx2x_free_fw_stats_mem(bp);
2279         BNX2X_ERR("Can't allocate FW stats memory\n");
2280         return -ENOMEM;
2281 }
2282
2283 /* send load request to mcp and analyze response */
2284 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2285 {
2286         u32 param;
2287
2288         /* init fw_seq */
2289         bp->fw_seq =
2290                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2291                  DRV_MSG_SEQ_NUMBER_MASK);
2292         BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2293
2294         /* Get current FW pulse sequence */
2295         bp->fw_drv_pulse_wr_seq =
2296                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2297                  DRV_PULSE_SEQ_MASK);
2298         BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2299
2300         param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2301
2302         if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2303                 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2304
2305         /* load request */
2306         (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2307
2308         /* if mcp fails to respond we must abort */
2309         if (!(*load_code)) {
2310                 BNX2X_ERR("MCP response failure, aborting\n");
2311                 return -EBUSY;
2312         }
2313
2314         /* If mcp refused (e.g. other port is in diagnostic mode) we
2315          * must abort
2316          */
2317         if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2318                 BNX2X_ERR("MCP refused load request, aborting\n");
2319                 return -EBUSY;
2320         }
2321         return 0;
2322 }
2323
2324 /* check whether another PF has already loaded FW to chip. In
2325  * virtualized environments a pf from another VM may have already
2326  * initialized the device including loading FW
2327  */
2328 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2329 {
2330         /* is another pf loaded on this engine? */
2331         if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2332             load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2333                 /* build my FW version dword */
2334                 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2335                         (BCM_5710_FW_MINOR_VERSION << 8) +
2336                         (BCM_5710_FW_REVISION_VERSION << 16) +
2337                         (BCM_5710_FW_ENGINEERING_VERSION << 24);
2338
2339                 /* read loaded FW from chip */
2340                 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2341
2342                 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2343                    loaded_fw, my_fw);
2344
2345                 /* abort nic load if version mismatch */
2346                 if (my_fw != loaded_fw) {
2347                         if (print_err)
2348                                 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2349                                           loaded_fw, my_fw);
2350                         else
2351                                 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2352                                                loaded_fw, my_fw);
2353                         return -EBUSY;
2354                 }
2355         }
2356         return 0;
2357 }
2358
2359 /* returns the "mcp load_code" according to global load_count array */
2360 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2361 {
2362         int path = BP_PATH(bp);
2363
2364         DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2365            path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2366            bnx2x_load_count[path][2]);
2367         bnx2x_load_count[path][0]++;
2368         bnx2x_load_count[path][1 + port]++;
2369         DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2370            path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2371            bnx2x_load_count[path][2]);
2372         if (bnx2x_load_count[path][0] == 1)
2373                 return FW_MSG_CODE_DRV_LOAD_COMMON;
2374         else if (bnx2x_load_count[path][1 + port] == 1)
2375                 return FW_MSG_CODE_DRV_LOAD_PORT;
2376         else
2377                 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2378 }
2379
2380 /* mark PMF if applicable */
2381 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2382 {
2383         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2384             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2385             (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2386                 bp->port.pmf = 1;
2387                 /* We need the barrier to ensure the ordering between the
2388                  * writing to bp->port.pmf here and reading it from the
2389                  * bnx2x_periodic_task().
2390                  */
2391                 smp_mb();
2392         } else {
2393                 bp->port.pmf = 0;
2394         }
2395
2396         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2397 }
2398
2399 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2400 {
2401         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2402              (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2403             (bp->common.shmem2_base)) {
2404                 if (SHMEM2_HAS(bp, dcc_support))
2405                         SHMEM2_WR(bp, dcc_support,
2406                                   (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2407                                    SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2408                 if (SHMEM2_HAS(bp, afex_driver_support))
2409                         SHMEM2_WR(bp, afex_driver_support,
2410                                   SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2411         }
2412
2413         /* Set AFEX default VLAN tag to an invalid value */
2414         bp->afex_def_vlan_tag = -1;
2415 }
2416
2417 /**
2418  * bnx2x_bz_fp - zero content of the fastpath structure.
2419  *
2420  * @bp:         driver handle
2421  * @index:      fastpath index to be zeroed
2422  *
2423  * Makes sure the contents of the bp->fp[index].napi is kept
2424  * intact.
2425  */
2426 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2427 {
2428         struct bnx2x_fastpath *fp = &bp->fp[index];
2429         int cos;
2430         struct napi_struct orig_napi = fp->napi;
2431         struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2432
2433         /* bzero bnx2x_fastpath contents */
2434         if (fp->tpa_info)
2435                 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2436                        sizeof(struct bnx2x_agg_info));
2437         memset(fp, 0, sizeof(*fp));
2438
2439         /* Restore the NAPI object as it has been already initialized */
2440         fp->napi = orig_napi;
2441         fp->tpa_info = orig_tpa_info;
2442         fp->bp = bp;
2443         fp->index = index;
2444         if (IS_ETH_FP(fp))
2445                 fp->max_cos = bp->max_cos;
2446         else
2447                 /* Special queues support only one CoS */
2448                 fp->max_cos = 1;
2449
2450         /* Init txdata pointers */
2451         if (IS_FCOE_FP(fp))
2452                 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2453         if (IS_ETH_FP(fp))
2454                 for_each_cos_in_tx_queue(fp, cos)
2455                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2456                                 BNX2X_NUM_ETH_QUEUES(bp) + index];
2457
2458         /* set the tpa flag for each queue. The tpa flag determines the queue
2459          * minimal size so it must be set prior to queue memory allocation
2460          */
2461         fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2462                                   (bp->flags & GRO_ENABLE_FLAG &&
2463                                    bnx2x_mtu_allows_gro(bp->dev->mtu)));
2464         if (bp->flags & TPA_ENABLE_FLAG)
2465                 fp->mode = TPA_MODE_LRO;
2466         else if (bp->flags & GRO_ENABLE_FLAG)
2467                 fp->mode = TPA_MODE_GRO;
2468
2469         /* We don't want TPA on an FCoE L2 ring */
2470         if (IS_FCOE_FP(fp))
2471                 fp->disable_tpa = 1;
2472 }
2473
2474 int bnx2x_load_cnic(struct bnx2x *bp)
2475 {
2476         int i, rc, port = BP_PORT(bp);
2477
2478         DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2479
2480         mutex_init(&bp->cnic_mutex);
2481
2482         if (IS_PF(bp)) {
2483                 rc = bnx2x_alloc_mem_cnic(bp);
2484                 if (rc) {
2485                         BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2486                         LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2487                 }
2488         }
2489
2490         rc = bnx2x_alloc_fp_mem_cnic(bp);
2491         if (rc) {
2492                 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2493                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2494         }
2495
2496         /* Update the number of queues with the cnic queues */
2497         rc = bnx2x_set_real_num_queues(bp, 1);
2498         if (rc) {
2499                 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2500                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2501         }
2502
2503         /* Add all CNIC NAPI objects */
2504         bnx2x_add_all_napi_cnic(bp);
2505         DP(NETIF_MSG_IFUP, "cnic napi added\n");
2506         bnx2x_napi_enable_cnic(bp);
2507
2508         rc = bnx2x_init_hw_func_cnic(bp);
2509         if (rc)
2510                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2511
2512         bnx2x_nic_init_cnic(bp);
2513
2514         if (IS_PF(bp)) {
2515                 /* Enable Timer scan */
2516                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2517
2518                 /* setup cnic queues */
2519                 for_each_cnic_queue(bp, i) {
2520                         rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2521                         if (rc) {
2522                                 BNX2X_ERR("Queue setup failed\n");
2523                                 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2524                         }
2525                 }
2526         }
2527
2528         /* Initialize Rx filter. */
2529         bnx2x_set_rx_mode_inner(bp);
2530
2531         /* re-read iscsi info */
2532         bnx2x_get_iscsi_info(bp);
2533         bnx2x_setup_cnic_irq_info(bp);
2534         bnx2x_setup_cnic_info(bp);
2535         bp->cnic_loaded = true;
2536         if (bp->state == BNX2X_STATE_OPEN)
2537                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2538
2539         DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2540
2541         return 0;
2542
2543 #ifndef BNX2X_STOP_ON_ERROR
2544 load_error_cnic2:
2545         /* Disable Timer scan */
2546         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2547
2548 load_error_cnic1:
2549         bnx2x_napi_disable_cnic(bp);
2550         /* Update the number of queues without the cnic queues */
2551         if (bnx2x_set_real_num_queues(bp, 0))
2552                 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2553 load_error_cnic0:
2554         BNX2X_ERR("CNIC-related load failed\n");
2555         bnx2x_free_fp_mem_cnic(bp);
2556         bnx2x_free_mem_cnic(bp);
2557         return rc;
2558 #endif /* ! BNX2X_STOP_ON_ERROR */
2559 }
2560
2561 /* must be called with rtnl_lock */
2562 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2563 {
2564         int port = BP_PORT(bp);
2565         int i, rc = 0, load_code = 0;
2566
2567         DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2568         DP(NETIF_MSG_IFUP,
2569            "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2570
2571 #ifdef BNX2X_STOP_ON_ERROR
2572         if (unlikely(bp->panic)) {
2573                 BNX2X_ERR("Can't load NIC when there is panic\n");
2574                 return -EPERM;
2575         }
2576 #endif
2577
2578         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2579
2580         /* zero the structure w/o any lock, before SP handler is initialized */
2581         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2582         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2583                 &bp->last_reported_link.link_report_flags);
2584
2585         if (IS_PF(bp))
2586                 /* must be called before memory allocation and HW init */
2587                 bnx2x_ilt_set_info(bp);
2588
2589         /*
2590          * Zero fastpath structures preserving invariants like napi, which are
2591          * allocated only once, fp index, max_cos, bp pointer.
2592          * Also set fp->disable_tpa and txdata_ptr.
2593          */
2594         DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2595         for_each_queue(bp, i)
2596                 bnx2x_bz_fp(bp, i);
2597         memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2598                                   bp->num_cnic_queues) *
2599                                   sizeof(struct bnx2x_fp_txdata));
2600
2601         bp->fcoe_init = false;
2602
2603         /* Set the receive queues buffer size */
2604         bnx2x_set_rx_buf_size(bp);
2605
2606         if (IS_PF(bp)) {
2607                 rc = bnx2x_alloc_mem(bp);
2608                 if (rc) {
2609                         BNX2X_ERR("Unable to allocate bp memory\n");
2610                         return rc;
2611                 }
2612         }
2613
2614         /* need to be done after alloc mem, since it's self adjusting to amount
2615          * of memory available for RSS queues
2616          */
2617         rc = bnx2x_alloc_fp_mem(bp);
2618         if (rc) {
2619                 BNX2X_ERR("Unable to allocate memory for fps\n");
2620                 LOAD_ERROR_EXIT(bp, load_error0);
2621         }
2622
2623         /* Allocated memory for FW statistics  */
2624         if (bnx2x_alloc_fw_stats_mem(bp))
2625                 LOAD_ERROR_EXIT(bp, load_error0);
2626
2627         /* request pf to initialize status blocks */
2628         if (IS_VF(bp)) {
2629                 rc = bnx2x_vfpf_init(bp);
2630                 if (rc)
2631                         LOAD_ERROR_EXIT(bp, load_error0);
2632         }
2633
2634         /* As long as bnx2x_alloc_mem() may possibly update
2635          * bp->num_queues, bnx2x_set_real_num_queues() should always
2636          * come after it. At this stage cnic queues are not counted.
2637          */
2638         rc = bnx2x_set_real_num_queues(bp, 0);
2639         if (rc) {
2640                 BNX2X_ERR("Unable to set real_num_queues\n");
2641                 LOAD_ERROR_EXIT(bp, load_error0);
2642         }
2643
2644         /* configure multi cos mappings in kernel.
2645          * this configuration may be overridden by a multi class queue
2646          * discipline or by a dcbx negotiation result.
2647          */
2648         bnx2x_setup_tc(bp->dev, bp->max_cos);
2649
2650         /* Add all NAPI objects */
2651         bnx2x_add_all_napi(bp);
2652         DP(NETIF_MSG_IFUP, "napi added\n");
2653         bnx2x_napi_enable(bp);
2654
2655         if (IS_PF(bp)) {
2656                 /* set pf load just before approaching the MCP */
2657                 bnx2x_set_pf_load(bp);
2658
2659                 /* if mcp exists send load request and analyze response */
2660                 if (!BP_NOMCP(bp)) {
2661                         /* attempt to load pf */
2662                         rc = bnx2x_nic_load_request(bp, &load_code);
2663                         if (rc)
2664                                 LOAD_ERROR_EXIT(bp, load_error1);
2665
2666                         /* what did mcp say? */
2667                         rc = bnx2x_compare_fw_ver(bp, load_code, true);
2668                         if (rc) {
2669                                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2670                                 LOAD_ERROR_EXIT(bp, load_error2);
2671                         }
2672                 } else {
2673                         load_code = bnx2x_nic_load_no_mcp(bp, port);
2674                 }
2675
2676                 /* mark pmf if applicable */
2677                 bnx2x_nic_load_pmf(bp, load_code);
2678
2679                 /* Init Function state controlling object */
2680                 bnx2x__init_func_obj(bp);
2681
2682                 /* Initialize HW */
2683                 rc = bnx2x_init_hw(bp, load_code);
2684                 if (rc) {
2685                         BNX2X_ERR("HW init failed, aborting\n");
2686                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2687                         LOAD_ERROR_EXIT(bp, load_error2);
2688                 }
2689         }
2690
2691         bnx2x_pre_irq_nic_init(bp);
2692
2693         /* Connect to IRQs */
2694         rc = bnx2x_setup_irqs(bp);
2695         if (rc) {
2696                 BNX2X_ERR("setup irqs failed\n");
2697                 if (IS_PF(bp))
2698                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2699                 LOAD_ERROR_EXIT(bp, load_error2);
2700         }
2701
2702         /* Init per-function objects */
2703         if (IS_PF(bp)) {
2704                 /* Setup NIC internals and enable interrupts */
2705                 bnx2x_post_irq_nic_init(bp, load_code);
2706
2707                 bnx2x_init_bp_objs(bp);
2708                 bnx2x_iov_nic_init(bp);
2709
2710                 /* Set AFEX default VLAN tag to an invalid value */
2711                 bp->afex_def_vlan_tag = -1;
2712                 bnx2x_nic_load_afex_dcc(bp, load_code);
2713                 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2714                 rc = bnx2x_func_start(bp);
2715                 if (rc) {
2716                         BNX2X_ERR("Function start failed!\n");
2717                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2718
2719                         LOAD_ERROR_EXIT(bp, load_error3);
2720                 }
2721
2722                 /* Send LOAD_DONE command to MCP */
2723                 if (!BP_NOMCP(bp)) {
2724                         load_code = bnx2x_fw_command(bp,
2725                                                      DRV_MSG_CODE_LOAD_DONE, 0);
2726                         if (!load_code) {
2727                                 BNX2X_ERR("MCP response failure, aborting\n");
2728                                 rc = -EBUSY;
2729                                 LOAD_ERROR_EXIT(bp, load_error3);
2730                         }
2731                 }
2732
2733                 /* initialize FW coalescing state machines in RAM */
2734                 bnx2x_update_coalesce(bp);
2735         }
2736
2737         /* setup the leading queue */
2738         rc = bnx2x_setup_leading(bp);
2739         if (rc) {
2740                 BNX2X_ERR("Setup leading failed!\n");
2741                 LOAD_ERROR_EXIT(bp, load_error3);
2742         }
2743
2744         /* set up the rest of the queues */
2745         for_each_nondefault_eth_queue(bp, i) {
2746                 if (IS_PF(bp))
2747                         rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2748                 else /* VF */
2749                         rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2750                 if (rc) {
2751                         BNX2X_ERR("Queue %d setup failed\n", i);
2752                         LOAD_ERROR_EXIT(bp, load_error3);
2753                 }
2754         }
2755
2756         /* setup rss */
2757         rc = bnx2x_init_rss(bp);
2758         if (rc) {
2759                 BNX2X_ERR("PF RSS init failed\n");
2760                 LOAD_ERROR_EXIT(bp, load_error3);
2761         }
2762
2763         /* Now when Clients are configured we are ready to work */
2764         bp->state = BNX2X_STATE_OPEN;
2765
2766         /* Configure a ucast MAC */
2767         if (IS_PF(bp))
2768                 rc = bnx2x_set_eth_mac(bp, true);
2769         else /* vf */
2770                 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2771                                            true);
2772         if (rc) {
2773                 BNX2X_ERR("Setting Ethernet MAC failed\n");
2774                 LOAD_ERROR_EXIT(bp, load_error3);
2775         }
2776
2777         if (IS_PF(bp) && bp->pending_max) {
2778                 bnx2x_update_max_mf_config(bp, bp->pending_max);
2779                 bp->pending_max = 0;
2780         }
2781
2782         if (bp->port.pmf) {
2783                 rc = bnx2x_initial_phy_init(bp, load_mode);
2784                 if (rc)
2785                         LOAD_ERROR_EXIT(bp, load_error3);
2786         }
2787         bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2788
2789         /* Start fast path */
2790
2791         /* Initialize Rx filter. */
2792         bnx2x_set_rx_mode_inner(bp);
2793
2794         /* Start the Tx */
2795         switch (load_mode) {
2796         case LOAD_NORMAL:
2797                 /* Tx queue should be only re-enabled */
2798                 netif_tx_wake_all_queues(bp->dev);
2799                 break;
2800
2801         case LOAD_OPEN:
2802                 netif_tx_start_all_queues(bp->dev);
2803                 smp_mb__after_atomic();
2804                 break;
2805
2806         case LOAD_DIAG:
2807         case LOAD_LOOPBACK_EXT:
2808                 bp->state = BNX2X_STATE_DIAG;
2809                 break;
2810
2811         default:
2812                 break;
2813         }
2814
2815         if (bp->port.pmf)
2816                 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2817         else
2818                 bnx2x__link_status_update(bp);
2819
2820         /* start the timer */
2821         mod_timer(&bp->timer, jiffies + bp->current_interval);
2822
2823         if (CNIC_ENABLED(bp))
2824                 bnx2x_load_cnic(bp);
2825
2826         if (IS_PF(bp))
2827                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2828
2829         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2830                 /* mark driver is loaded in shmem2 */
2831                 u32 val;
2832                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2833                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2834                           val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2835                           DRV_FLAGS_CAPABILITIES_LOADED_L2);
2836         }
2837
2838         /* Wait for all pending SP commands to complete */
2839         if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2840                 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2841                 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2842                 return -EBUSY;
2843         }
2844
2845         /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2846         if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2847                 bnx2x_dcbx_init(bp, false);
2848
2849         DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2850
2851         return 0;
2852
2853 #ifndef BNX2X_STOP_ON_ERROR
2854 load_error3:
2855         if (IS_PF(bp)) {
2856                 bnx2x_int_disable_sync(bp, 1);
2857
2858                 /* Clean queueable objects */
2859                 bnx2x_squeeze_objects(bp);
2860         }
2861
2862         /* Free SKBs, SGEs, TPA pool and driver internals */
2863         bnx2x_free_skbs(bp);
2864         for_each_rx_queue(bp, i)
2865                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2866
2867         /* Release IRQs */
2868         bnx2x_free_irq(bp);
2869 load_error2:
2870         if (IS_PF(bp) && !BP_NOMCP(bp)) {
2871                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2872                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2873         }
2874
2875         bp->port.pmf = 0;
2876 load_error1:
2877         bnx2x_napi_disable(bp);
2878         bnx2x_del_all_napi(bp);
2879
2880         /* clear pf_load status, as it was already set */
2881         if (IS_PF(bp))
2882                 bnx2x_clear_pf_load(bp);
2883 load_error0:
2884         bnx2x_free_fw_stats_mem(bp);
2885         bnx2x_free_fp_mem(bp);
2886         bnx2x_free_mem(bp);
2887
2888         return rc;
2889 #endif /* ! BNX2X_STOP_ON_ERROR */
2890 }
2891
2892 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2893 {
2894         u8 rc = 0, cos, i;
2895
2896         /* Wait until tx fastpath tasks complete */
2897         for_each_tx_queue(bp, i) {
2898                 struct bnx2x_fastpath *fp = &bp->fp[i];
2899
2900                 for_each_cos_in_tx_queue(fp, cos)
2901                         rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2902                 if (rc)
2903                         return rc;
2904         }
2905         return 0;
2906 }
2907
2908 /* must be called with rtnl_lock */
2909 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2910 {
2911         int i;
2912         bool global = false;
2913
2914         DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2915
2916         /* mark driver is unloaded in shmem2 */
2917         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2918                 u32 val;
2919                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2920                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2921                           val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2922         }
2923
2924         if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2925             (bp->state == BNX2X_STATE_CLOSED ||
2926              bp->state == BNX2X_STATE_ERROR)) {
2927                 /* We can get here if the driver has been unloaded
2928                  * during parity error recovery and is either waiting for a
2929                  * leader to complete or for other functions to unload and
2930                  * then ifdown has been issued. In this case we want to
2931                  * unload and let other functions to complete a recovery
2932                  * process.
2933                  */
2934                 bp->recovery_state = BNX2X_RECOVERY_DONE;
2935                 bp->is_leader = 0;
2936                 bnx2x_release_leader_lock(bp);
2937                 smp_mb();
2938
2939                 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2940                 BNX2X_ERR("Can't unload in closed or error state\n");
2941                 return -EINVAL;
2942         }
2943
2944         /* Nothing to do during unload if previous bnx2x_nic_load()
2945          * have not completed successfully - all resources are released.
2946          *
2947          * we can get here only after unsuccessful ndo_* callback, during which
2948          * dev->IFF_UP flag is still on.
2949          */
2950         if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2951                 return 0;
2952
2953         /* It's important to set the bp->state to the value different from
2954          * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2955          * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2956          */
2957         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2958         smp_mb();
2959
2960         /* indicate to VFs that the PF is going down */
2961         bnx2x_iov_channel_down(bp);
2962
2963         if (CNIC_LOADED(bp))
2964                 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2965
2966         /* Stop Tx */
2967         bnx2x_tx_disable(bp);
2968         netdev_reset_tc(bp->dev);
2969
2970         bp->rx_mode = BNX2X_RX_MODE_NONE;
2971
2972         del_timer_sync(&bp->timer);
2973
2974         if (IS_PF(bp)) {
2975                 /* Set ALWAYS_ALIVE bit in shmem */
2976                 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2977                 bnx2x_drv_pulse(bp);
2978                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2979                 bnx2x_save_statistics(bp);
2980         }
2981
2982         /* wait till consumers catch up with producers in all queues */
2983         bnx2x_drain_tx_queues(bp);
2984
2985         /* if VF indicate to PF this function is going down (PF will delete sp
2986          * elements and clear initializations
2987          */
2988         if (IS_VF(bp))
2989                 bnx2x_vfpf_close_vf(bp);
2990         else if (unload_mode != UNLOAD_RECOVERY)
2991                 /* if this is a normal/close unload need to clean up chip*/
2992                 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2993         else {
2994                 /* Send the UNLOAD_REQUEST to the MCP */
2995                 bnx2x_send_unload_req(bp, unload_mode);
2996
2997                 /* Prevent transactions to host from the functions on the
2998                  * engine that doesn't reset global blocks in case of global
2999                  * attention once global blocks are reset and gates are opened
3000                  * (the engine which leader will perform the recovery
3001                  * last).
3002                  */
3003                 if (!CHIP_IS_E1x(bp))
3004                         bnx2x_pf_disable(bp);
3005
3006                 /* Disable HW interrupts, NAPI */
3007                 bnx2x_netif_stop(bp, 1);
3008                 /* Delete all NAPI objects */
3009                 bnx2x_del_all_napi(bp);
3010                 if (CNIC_LOADED(bp))
3011                         bnx2x_del_all_napi_cnic(bp);
3012                 /* Release IRQs */
3013                 bnx2x_free_irq(bp);
3014
3015                 /* Report UNLOAD_DONE to MCP */
3016                 bnx2x_send_unload_done(bp, false);
3017         }
3018
3019         /*
3020          * At this stage no more interrupts will arrive so we may safely clean
3021          * the queueable objects here in case they failed to get cleaned so far.
3022          */
3023         if (IS_PF(bp))
3024                 bnx2x_squeeze_objects(bp);
3025
3026         /* There should be no more pending SP commands at this stage */
3027         bp->sp_state = 0;
3028
3029         bp->port.pmf = 0;
3030
3031         /* clear pending work in rtnl task */
3032         bp->sp_rtnl_state = 0;
3033         smp_mb();
3034
3035         /* Free SKBs, SGEs, TPA pool and driver internals */
3036         bnx2x_free_skbs(bp);
3037         if (CNIC_LOADED(bp))
3038                 bnx2x_free_skbs_cnic(bp);
3039         for_each_rx_queue(bp, i)
3040                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3041
3042         bnx2x_free_fp_mem(bp);
3043         if (CNIC_LOADED(bp))
3044                 bnx2x_free_fp_mem_cnic(bp);
3045
3046         if (IS_PF(bp)) {
3047                 if (CNIC_LOADED(bp))
3048                         bnx2x_free_mem_cnic(bp);
3049         }
3050         bnx2x_free_mem(bp);
3051
3052         bp->state = BNX2X_STATE_CLOSED;
3053         bp->cnic_loaded = false;
3054
3055         /* Clear driver version indication in shmem */
3056         if (IS_PF(bp))
3057                 bnx2x_update_mng_version(bp);
3058
3059         /* Check if there are pending parity attentions. If there are - set
3060          * RECOVERY_IN_PROGRESS.
3061          */
3062         if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3063                 bnx2x_set_reset_in_progress(bp);
3064
3065                 /* Set RESET_IS_GLOBAL if needed */
3066                 if (global)
3067                         bnx2x_set_reset_global(bp);
3068         }
3069
3070         /* The last driver must disable a "close the gate" if there is no
3071          * parity attention or "process kill" pending.
3072          */
3073         if (IS_PF(bp) &&
3074             !bnx2x_clear_pf_load(bp) &&
3075             bnx2x_reset_is_done(bp, BP_PATH(bp)))
3076                 bnx2x_disable_close_the_gate(bp);
3077
3078         DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3079
3080         return 0;
3081 }
3082
3083 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3084 {
3085         u16 pmcsr;
3086
3087         /* If there is no power capability, silently succeed */
3088         if (!bp->pdev->pm_cap) {
3089                 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3090                 return 0;
3091         }
3092
3093         pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3094
3095         switch (state) {
3096         case PCI_D0:
3097                 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3098                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3099                                        PCI_PM_CTRL_PME_STATUS));
3100
3101                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3102                         /* delay required during transition out of D3hot */
3103                         msleep(20);
3104                 break;
3105
3106         case PCI_D3hot:
3107                 /* If there are other clients above don't
3108                    shut down the power */
3109                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3110                         return 0;
3111                 /* Don't shut down the power for emulation and FPGA */
3112                 if (CHIP_REV_IS_SLOW(bp))
3113                         return 0;
3114
3115                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3116                 pmcsr |= 3;
3117
3118                 if (bp->wol)
3119                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3120
3121                 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3122                                       pmcsr);
3123
3124                 /* No more memory access after this point until
3125                 * device is brought back to D0.
3126                 */
3127                 break;
3128
3129         default:
3130                 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3131                 return -EINVAL;
3132         }
3133         return 0;
3134 }
3135
3136 /*
3137  * net_device service functions
3138  */
3139 static int bnx2x_poll(struct napi_struct *napi, int budget)
3140 {
3141         int work_done = 0;
3142         u8 cos;
3143         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3144                                                  napi);
3145         struct bnx2x *bp = fp->bp;
3146
3147         while (1) {
3148 #ifdef BNX2X_STOP_ON_ERROR
3149                 if (unlikely(bp->panic)) {
3150                         napi_complete(napi);
3151                         return 0;
3152                 }
3153 #endif
3154                 if (!bnx2x_fp_lock_napi(fp))
3155                         return work_done;
3156
3157                 for_each_cos_in_tx_queue(fp, cos)
3158                         if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3159                                 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3160
3161                 if (bnx2x_has_rx_work(fp)) {
3162                         work_done += bnx2x_rx_int(fp, budget - work_done);
3163
3164                         /* must not complete if we consumed full budget */
3165                         if (work_done >= budget) {
3166                                 bnx2x_fp_unlock_napi(fp);
3167                                 break;
3168                         }
3169                 }
3170
3171                 /* Fall out from the NAPI loop if needed */
3172                 if (!bnx2x_fp_unlock_napi(fp) &&
3173                     !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3174
3175                         /* No need to update SB for FCoE L2 ring as long as
3176                          * it's connected to the default SB and the SB
3177                          * has been updated when NAPI was scheduled.
3178                          */
3179                         if (IS_FCOE_FP(fp)) {
3180                                 napi_complete(napi);
3181                                 break;
3182                         }
3183                         bnx2x_update_fpsb_idx(fp);
3184                         /* bnx2x_has_rx_work() reads the status block,
3185                          * thus we need to ensure that status block indices
3186                          * have been actually read (bnx2x_update_fpsb_idx)
3187                          * prior to this check (bnx2x_has_rx_work) so that
3188                          * we won't write the "newer" value of the status block
3189                          * to IGU (if there was a DMA right after
3190                          * bnx2x_has_rx_work and if there is no rmb, the memory
3191                          * reading (bnx2x_update_fpsb_idx) may be postponed
3192                          * to right before bnx2x_ack_sb). In this case there
3193                          * will never be another interrupt until there is
3194                          * another update of the status block, while there
3195                          * is still unhandled work.
3196                          */
3197                         rmb();
3198
3199                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3200                                 napi_complete(napi);
3201                                 /* Re-enable interrupts */
3202                                 DP(NETIF_MSG_RX_STATUS,
3203                                    "Update index to %d\n", fp->fp_hc_idx);
3204                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3205                                              le16_to_cpu(fp->fp_hc_idx),
3206                                              IGU_INT_ENABLE, 1);
3207                                 break;
3208                         }
3209                 }
3210         }
3211
3212         return work_done;
3213 }
3214
3215 #ifdef CONFIG_NET_RX_BUSY_POLL
3216 /* must be called with local_bh_disable()d */
3217 int bnx2x_low_latency_recv(struct napi_struct *napi)
3218 {
3219         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3220                                                  napi);
3221         struct bnx2x *bp = fp->bp;
3222         int found = 0;
3223
3224         if ((bp->state == BNX2X_STATE_CLOSED) ||
3225             (bp->state == BNX2X_STATE_ERROR) ||
3226             (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3227                 return LL_FLUSH_FAILED;
3228
3229         if (!bnx2x_fp_lock_poll(fp))
3230                 return LL_FLUSH_BUSY;
3231
3232         if (bnx2x_has_rx_work(fp))
3233                 found = bnx2x_rx_int(fp, 4);
3234
3235         bnx2x_fp_unlock_poll(fp);
3236
3237         return found;
3238 }
3239 #endif
3240
3241 /* we split the first BD into headers and data BDs
3242  * to ease the pain of our fellow microcode engineers
3243  * we use one mapping for both BDs
3244  */
3245 static u16 bnx2x_tx_split(struct bnx2x *bp,
3246                           struct bnx2x_fp_txdata *txdata,
3247                           struct sw_tx_bd *tx_buf,
3248                           struct eth_tx_start_bd **tx_bd, u16 hlen,
3249                           u16 bd_prod)
3250 {
3251         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3252         struct eth_tx_bd *d_tx_bd;
3253         dma_addr_t mapping;
3254         int old_len = le16_to_cpu(h_tx_bd->nbytes);
3255
3256         /* first fix first BD */
3257         h_tx_bd->nbytes = cpu_to_le16(hlen);
3258
3259         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3260            h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3261
3262         /* now get a new data BD
3263          * (after the pbd) and fill it */
3264         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3265         d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3266
3267         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3268                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3269
3270         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3271         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3272         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3273
3274         /* this marks the BD as one that has no individual mapping */
3275         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3276
3277         DP(NETIF_MSG_TX_QUEUED,
3278            "TSO split data size is %d (%x:%x)\n",
3279            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3280
3281         /* update tx_bd */
3282         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3283
3284         return bd_prod;
3285 }
3286
3287 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3288 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3289 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3290 {
3291         __sum16 tsum = (__force __sum16) csum;
3292
3293         if (fix > 0)
3294                 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3295                                   csum_partial(t_header - fix, fix, 0)));
3296
3297         else if (fix < 0)
3298                 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3299                                   csum_partial(t_header, -fix, 0)));
3300
3301         return bswab16(tsum);
3302 }
3303
3304 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3305 {
3306         u32 rc;
3307         __u8 prot = 0;
3308         __be16 protocol;
3309
3310         if (skb->ip_summed != CHECKSUM_PARTIAL)
3311                 return XMIT_PLAIN;
3312
3313         protocol = vlan_get_protocol(skb);
3314         if (protocol == htons(ETH_P_IPV6)) {
3315                 rc = XMIT_CSUM_V6;
3316                 prot = ipv6_hdr(skb)->nexthdr;
3317         } else {
3318                 rc = XMIT_CSUM_V4;
3319                 prot = ip_hdr(skb)->protocol;
3320         }
3321
3322         if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3323                 if (inner_ip_hdr(skb)->version == 6) {
3324                         rc |= XMIT_CSUM_ENC_V6;
3325                         if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3326                                 rc |= XMIT_CSUM_TCP;
3327                 } else {
3328                         rc |= XMIT_CSUM_ENC_V4;
3329                         if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3330                                 rc |= XMIT_CSUM_TCP;
3331                 }
3332         }
3333         if (prot == IPPROTO_TCP)
3334                 rc |= XMIT_CSUM_TCP;
3335
3336         if (skb_is_gso(skb)) {
3337                 if (skb_is_gso_v6(skb)) {
3338                         rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3339                         if (rc & XMIT_CSUM_ENC)
3340                                 rc |= XMIT_GSO_ENC_V6;
3341                 } else {
3342                         rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3343                         if (rc & XMIT_CSUM_ENC)
3344                                 rc |= XMIT_GSO_ENC_V4;
3345                 }
3346         }
3347
3348         return rc;
3349 }
3350
3351 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3352 /* check if packet requires linearization (packet is too fragmented)
3353    no need to check fragmentation if page size > 8K (there will be no
3354    violation to FW restrictions) */
3355 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3356                              u32 xmit_type)
3357 {
3358         int to_copy = 0;
3359         int hlen = 0;
3360         int first_bd_sz = 0;
3361
3362         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3363         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3364
3365                 if (xmit_type & XMIT_GSO) {
3366                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3367                         /* Check if LSO packet needs to be copied:
3368                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3369                         int wnd_size = MAX_FETCH_BD - 3;
3370                         /* Number of windows to check */
3371                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3372                         int wnd_idx = 0;
3373                         int frag_idx = 0;
3374                         u32 wnd_sum = 0;
3375
3376                         /* Headers length */
3377                         hlen = (int)(skb_transport_header(skb) - skb->data) +
3378                                 tcp_hdrlen(skb);
3379
3380                         /* Amount of data (w/o headers) on linear part of SKB*/
3381                         first_bd_sz = skb_headlen(skb) - hlen;
3382
3383                         wnd_sum  = first_bd_sz;
3384
3385                         /* Calculate the first sum - it's special */
3386                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3387                                 wnd_sum +=
3388                                         skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3389
3390                         /* If there was data on linear skb data - check it */
3391                         if (first_bd_sz > 0) {
3392                                 if (unlikely(wnd_sum < lso_mss)) {
3393                                         to_copy = 1;
3394                                         goto exit_lbl;
3395                                 }
3396
3397                                 wnd_sum -= first_bd_sz;
3398                         }
3399
3400                         /* Others are easier: run through the frag list and
3401                            check all windows */
3402                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3403                                 wnd_sum +=
3404                           skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3405
3406                                 if (unlikely(wnd_sum < lso_mss)) {
3407                                         to_copy = 1;
3408                                         break;
3409                                 }
3410                                 wnd_sum -=
3411                                         skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3412                         }
3413                 } else {
3414                         /* in non-LSO too fragmented packet should always
3415                            be linearized */
3416                         to_copy = 1;
3417                 }
3418         }
3419
3420 exit_lbl:
3421         if (unlikely(to_copy))
3422                 DP(NETIF_MSG_TX_QUEUED,
3423                    "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3424                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3425                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3426
3427         return to_copy;
3428 }
3429 #endif
3430
3431 static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3432                                  u32 xmit_type)
3433 {
3434         struct ipv6hdr *ipv6;
3435
3436         *parsing_data |= (skb_shinfo(skb)->gso_size <<
3437                               ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3438                               ETH_TX_PARSE_BD_E2_LSO_MSS;
3439
3440         if (xmit_type & XMIT_GSO_ENC_V6)
3441                 ipv6 = inner_ipv6_hdr(skb);
3442         else if (xmit_type & XMIT_GSO_V6)
3443                 ipv6 = ipv6_hdr(skb);
3444         else
3445                 ipv6 = NULL;
3446
3447         if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3448                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3449 }
3450
3451 /**
3452  * bnx2x_set_pbd_gso - update PBD in GSO case.
3453  *
3454  * @skb:        packet skb
3455  * @pbd:        parse BD
3456  * @xmit_type:  xmit flags
3457  */
3458 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3459                               struct eth_tx_parse_bd_e1x *pbd,
3460                               struct eth_tx_start_bd *tx_start_bd,
3461                               u32 xmit_type)
3462 {
3463         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3464         pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3465         pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3466
3467         if (xmit_type & XMIT_GSO_V4) {
3468                 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3469                 pbd->tcp_pseudo_csum =
3470                         bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3471                                                    ip_hdr(skb)->daddr,
3472                                                    0, IPPROTO_TCP, 0));
3473
3474                 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3475                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3476         } else {
3477                 pbd->tcp_pseudo_csum =
3478                         bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3479                                                  &ipv6_hdr(skb)->daddr,
3480                                                  0, IPPROTO_TCP, 0));
3481         }
3482
3483         pbd->global_data |=
3484                 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3485 }
3486
3487 /**
3488  * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3489  *
3490  * @bp:                 driver handle
3491  * @skb:                packet skb
3492  * @parsing_data:       data to be updated
3493  * @xmit_type:          xmit flags
3494  *
3495  * 57712/578xx related, when skb has encapsulation
3496  */
3497 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3498                                  u32 *parsing_data, u32 xmit_type)
3499 {
3500         *parsing_data |=
3501                 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3502                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3503                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3504
3505         if (xmit_type & XMIT_CSUM_TCP) {
3506                 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3507                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3508                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3509
3510                 return skb_inner_transport_header(skb) +
3511                         inner_tcp_hdrlen(skb) - skb->data;
3512         }
3513
3514         /* We support checksum offload for TCP and UDP only.
3515          * No need to pass the UDP header length - it's a constant.
3516          */
3517         return skb_inner_transport_header(skb) +
3518                 sizeof(struct udphdr) - skb->data;
3519 }
3520
3521 /**
3522  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3523  *
3524  * @bp:                 driver handle
3525  * @skb:                packet skb
3526  * @parsing_data:       data to be updated
3527  * @xmit_type:          xmit flags
3528  *
3529  * 57712/578xx related
3530  */
3531 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3532                                 u32 *parsing_data, u32 xmit_type)
3533 {
3534         *parsing_data |=
3535                 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3536                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3537                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3538
3539         if (xmit_type & XMIT_CSUM_TCP) {
3540                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3541                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3542                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3543
3544                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3545         }
3546         /* We support checksum offload for TCP and UDP only.
3547          * No need to pass the UDP header length - it's a constant.
3548          */
3549         return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3550 }
3551
3552 /* set FW indication according to inner or outer protocols if tunneled */
3553 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3554                                struct eth_tx_start_bd *tx_start_bd,
3555                                u32 xmit_type)
3556 {
3557         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3558
3559         if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3560                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3561
3562         if (!(xmit_type & XMIT_CSUM_TCP))
3563                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3564 }
3565
3566 /**
3567  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3568  *
3569  * @bp:         driver handle
3570  * @skb:        packet skb
3571  * @pbd:        parse BD to be updated
3572  * @xmit_type:  xmit flags
3573  */
3574 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3575                              struct eth_tx_parse_bd_e1x *pbd,
3576                              u32 xmit_type)
3577 {
3578         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3579
3580         /* for now NS flag is not used in Linux */
3581         pbd->global_data =
3582                 cpu_to_le16(hlen |
3583                             ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3584                              ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3585
3586         pbd->ip_hlen_w = (skb_transport_header(skb) -
3587                         skb_network_header(skb)) >> 1;
3588
3589         hlen += pbd->ip_hlen_w;
3590
3591         /* We support checksum offload for TCP and UDP only */
3592         if (xmit_type & XMIT_CSUM_TCP)
3593                 hlen += tcp_hdrlen(skb) / 2;
3594         else
3595                 hlen += sizeof(struct udphdr) / 2;
3596
3597         pbd->total_hlen_w = cpu_to_le16(hlen);
3598         hlen = hlen*2;
3599
3600         if (xmit_type & XMIT_CSUM_TCP) {
3601                 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3602
3603         } else {
3604                 s8 fix = SKB_CS_OFF(skb); /* signed! */
3605
3606                 DP(NETIF_MSG_TX_QUEUED,
3607                    "hlen %d  fix %d  csum before fix %x\n",
3608                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3609
3610                 /* HW bug: fixup the CSUM */
3611                 pbd->tcp_pseudo_csum =
3612                         bnx2x_csum_fix(skb_transport_header(skb),
3613                                        SKB_CS(skb), fix);
3614
3615                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3616                    pbd->tcp_pseudo_csum);
3617         }
3618
3619         return hlen;
3620 }
3621
3622 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3623                                       struct eth_tx_parse_bd_e2 *pbd_e2,
3624                                       struct eth_tx_parse_2nd_bd *pbd2,
3625                                       u16 *global_data,
3626                                       u32 xmit_type)
3627 {
3628         u16 hlen_w = 0;
3629         u8 outerip_off, outerip_len = 0;
3630
3631         /* from outer IP to transport */
3632         hlen_w = (skb_inner_transport_header(skb) -
3633                   skb_network_header(skb)) >> 1;
3634
3635         /* transport len */
3636         hlen_w += inner_tcp_hdrlen(skb) >> 1;
3637
3638         pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3639
3640         /* outer IP header info */
3641         if (xmit_type & XMIT_CSUM_V4) {
3642                 struct iphdr *iph = ip_hdr(skb);
3643                 u32 csum = (__force u32)(~iph->check) -
3644                            (__force u32)iph->tot_len -
3645                            (__force u32)iph->frag_off;
3646
3647                 pbd2->fw_ip_csum_wo_len_flags_frag =
3648                         bswab16(csum_fold((__force __wsum)csum));
3649         } else {
3650                 pbd2->fw_ip_hdr_to_payload_w =
3651                         hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3652         }
3653
3654         pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3655
3656         pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3657
3658         if (xmit_type & XMIT_GSO_V4) {
3659                 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3660
3661                 pbd_e2->data.tunnel_data.pseudo_csum =
3662                         bswab16(~csum_tcpudp_magic(
3663                                         inner_ip_hdr(skb)->saddr,
3664                                         inner_ip_hdr(skb)->daddr,
3665                                         0, IPPROTO_TCP, 0));
3666
3667                 outerip_len = ip_hdr(skb)->ihl << 1;
3668         } else {
3669                 pbd_e2->data.tunnel_data.pseudo_csum =
3670                         bswab16(~csum_ipv6_magic(
3671                                         &inner_ipv6_hdr(skb)->saddr,
3672                                         &inner_ipv6_hdr(skb)->daddr,
3673                                         0, IPPROTO_TCP, 0));
3674         }
3675
3676         outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3677
3678         *global_data |=
3679                 outerip_off |
3680                 (!!(xmit_type & XMIT_CSUM_V6) <<
3681                         ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3682                 (outerip_len <<
3683                         ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3684                 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3685                         ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3686
3687         if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3688                 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3689                 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3690         }
3691 }
3692
3693 /* called with netif_tx_lock
3694  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3695  * netif_wake_queue()
3696  */
3697 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3698 {
3699         struct bnx2x *bp = netdev_priv(dev);
3700
3701         struct netdev_queue *txq;
3702         struct bnx2x_fp_txdata *txdata;
3703         struct sw_tx_bd *tx_buf;
3704         struct eth_tx_start_bd *tx_start_bd, *first_bd;
3705         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3706         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3707         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3708         struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3709         u32 pbd_e2_parsing_data = 0;
3710         u16 pkt_prod, bd_prod;
3711         int nbd, txq_index;
3712         dma_addr_t mapping;
3713         u32 xmit_type = bnx2x_xmit_type(bp, skb);
3714         int i;
3715         u8 hlen = 0;
3716         __le16 pkt_size = 0;
3717         struct ethhdr *eth;
3718         u8 mac_type = UNICAST_ADDRESS;
3719
3720 #ifdef BNX2X_STOP_ON_ERROR
3721         if (unlikely(bp->panic))
3722                 return NETDEV_TX_BUSY;
3723 #endif
3724
3725         txq_index = skb_get_queue_mapping(skb);
3726         txq = netdev_get_tx_queue(dev, txq_index);
3727
3728         BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3729
3730         txdata = &bp->bnx2x_txq[txq_index];
3731
3732         /* enable this debug print to view the transmission queue being used
3733         DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3734            txq_index, fp_index, txdata_index); */
3735
3736         /* enable this debug print to view the transmission details
3737         DP(NETIF_MSG_TX_QUEUED,
3738            "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3739            txdata->cid, fp_index, txdata_index, txdata, fp); */
3740
3741         if (unlikely(bnx2x_tx_avail(bp, txdata) <
3742                         skb_shinfo(skb)->nr_frags +
3743                         BDS_PER_TX_PKT +
3744                         NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3745                 /* Handle special storage cases separately */
3746                 if (txdata->tx_ring_size == 0) {
3747                         struct bnx2x_eth_q_stats *q_stats =
3748                                 bnx2x_fp_qstats(bp, txdata->parent_fp);
3749                         q_stats->driver_filtered_tx_pkt++;
3750                         dev_kfree_skb(skb);
3751                         return NETDEV_TX_OK;
3752                 }
3753                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3754                 netif_tx_stop_queue(txq);
3755                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3756
3757                 return NETDEV_TX_BUSY;
3758         }
3759
3760         DP(NETIF_MSG_TX_QUEUED,
3761            "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3762            txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3763            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3764            skb->len);
3765
3766         eth = (struct ethhdr *)skb->data;
3767
3768         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3769         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3770                 if (is_broadcast_ether_addr(eth->h_dest))
3771                         mac_type = BROADCAST_ADDRESS;
3772                 else
3773                         mac_type = MULTICAST_ADDRESS;
3774         }
3775
3776 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3777         /* First, check if we need to linearize the skb (due to FW
3778            restrictions). No need to check fragmentation if page size > 8K
3779            (there will be no violation to FW restrictions) */
3780         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3781                 /* Statistics of linearization */
3782                 bp->lin_cnt++;
3783                 if (skb_linearize(skb) != 0) {
3784                         DP(NETIF_MSG_TX_QUEUED,
3785                            "SKB linearization failed - silently dropping this SKB\n");
3786                         dev_kfree_skb_any(skb);
3787                         return NETDEV_TX_OK;
3788                 }
3789         }
3790 #endif
3791         /* Map skb linear data for DMA */
3792         mapping = dma_map_single(&bp->pdev->dev, skb->data,
3793                                  skb_headlen(skb), DMA_TO_DEVICE);
3794         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3795                 DP(NETIF_MSG_TX_QUEUED,
3796                    "SKB mapping failed - silently dropping this SKB\n");
3797                 dev_kfree_skb_any(skb);
3798                 return NETDEV_TX_OK;
3799         }
3800         /*
3801         Please read carefully. First we use one BD which we mark as start,
3802         then we have a parsing info BD (used for TSO or xsum),
3803         and only then we have the rest of the TSO BDs.
3804         (don't forget to mark the last one as last,
3805         and to unmap only AFTER you write to the BD ...)
3806         And above all, all pdb sizes are in words - NOT DWORDS!
3807         */
3808
3809         /* get current pkt produced now - advance it just before sending packet
3810          * since mapping of pages may fail and cause packet to be dropped
3811          */
3812         pkt_prod = txdata->tx_pkt_prod;
3813         bd_prod = TX_BD(txdata->tx_bd_prod);
3814
3815         /* get a tx_buf and first BD
3816          * tx_start_bd may be changed during SPLIT,
3817          * but first_bd will always stay first
3818          */
3819         tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3820         tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3821         first_bd = tx_start_bd;
3822
3823         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3824
3825         /* header nbd: indirectly zero other flags! */
3826         tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3827
3828         /* remember the first BD of the packet */
3829         tx_buf->first_bd = txdata->tx_bd_prod;
3830         tx_buf->skb = skb;
3831         tx_buf->flags = 0;
3832
3833         DP(NETIF_MSG_TX_QUEUED,
3834            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3835            pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3836
3837         if (vlan_tx_tag_present(skb)) {
3838                 tx_start_bd->vlan_or_ethertype =
3839                     cpu_to_le16(vlan_tx_tag_get(skb));
3840                 tx_start_bd->bd_flags.as_bitfield |=
3841                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3842         } else {
3843                 /* when transmitting in a vf, start bd must hold the ethertype
3844                  * for fw to enforce it
3845                  */
3846                 if (IS_VF(bp))
3847                         tx_start_bd->vlan_or_ethertype =
3848                                 cpu_to_le16(ntohs(eth->h_proto));
3849                 else
3850                         /* used by FW for packet accounting */
3851                         tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3852         }
3853
3854         nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3855
3856         /* turn on parsing and get a BD */
3857         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3858
3859         if (xmit_type & XMIT_CSUM)
3860                 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3861
3862         if (!CHIP_IS_E1x(bp)) {
3863                 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3864                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3865
3866                 if (xmit_type & XMIT_CSUM_ENC) {
3867                         u16 global_data = 0;
3868
3869                         /* Set PBD in enc checksum offload case */
3870                         hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3871                                                       &pbd_e2_parsing_data,
3872                                                       xmit_type);
3873
3874                         /* turn on 2nd parsing and get a BD */
3875                         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3876
3877                         pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3878
3879                         memset(pbd2, 0, sizeof(*pbd2));
3880
3881                         pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3882                                 (skb_inner_network_header(skb) -
3883                                  skb->data) >> 1;
3884
3885                         if (xmit_type & XMIT_GSO_ENC)
3886                                 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3887                                                           &global_data,
3888                                                           xmit_type);
3889
3890                         pbd2->global_data = cpu_to_le16(global_data);
3891
3892                         /* add addition parse BD indication to start BD */
3893                         SET_FLAG(tx_start_bd->general_data,
3894                                  ETH_TX_START_BD_PARSE_NBDS, 1);
3895                         /* set encapsulation flag in start BD */
3896                         SET_FLAG(tx_start_bd->general_data,
3897                                  ETH_TX_START_BD_TUNNEL_EXIST, 1);
3898
3899                         tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3900
3901                         nbd++;
3902                 } else if (xmit_type & XMIT_CSUM) {
3903                         /* Set PBD in checksum offload case w/o encapsulation */
3904                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3905                                                      &pbd_e2_parsing_data,
3906                                                      xmit_type);
3907                 }
3908
3909                 /* Add the macs to the parsing BD if this is a vf or if
3910                  * Tx Switching is enabled.
3911                  */
3912                 if (IS_VF(bp)) {
3913                         /* override GRE parameters in BD */
3914                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3915                                               &pbd_e2->data.mac_addr.src_mid,
3916                                               &pbd_e2->data.mac_addr.src_lo,
3917                                               eth->h_source);
3918
3919                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3920                                               &pbd_e2->data.mac_addr.dst_mid,
3921                                               &pbd_e2->data.mac_addr.dst_lo,
3922                                               eth->h_dest);
3923                 } else if (bp->flags & TX_SWITCHING) {
3924                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3925                                               &pbd_e2->data.mac_addr.dst_mid,
3926                                               &pbd_e2->data.mac_addr.dst_lo,
3927                                               eth->h_dest);
3928                 }
3929
3930                 SET_FLAG(pbd_e2_parsing_data,
3931                          ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3932         } else {
3933                 u16 global_data = 0;
3934                 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3935                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3936                 /* Set PBD in checksum offload case */
3937                 if (xmit_type & XMIT_CSUM)
3938                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3939
3940                 SET_FLAG(global_data,
3941                          ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3942                 pbd_e1x->global_data |= cpu_to_le16(global_data);
3943         }
3944
3945         /* Setup the data pointer of the first BD of the packet */
3946         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3947         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3948         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3949         pkt_size = tx_start_bd->nbytes;
3950
3951         DP(NETIF_MSG_TX_QUEUED,
3952            "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
3953            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3954            le16_to_cpu(tx_start_bd->nbytes),
3955            tx_start_bd->bd_flags.as_bitfield,
3956            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3957
3958         if (xmit_type & XMIT_GSO) {
3959
3960                 DP(NETIF_MSG_TX_QUEUED,
3961                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
3962                    skb->len, hlen, skb_headlen(skb),
3963                    skb_shinfo(skb)->gso_size);
3964
3965                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3966
3967                 if (unlikely(skb_headlen(skb) > hlen)) {
3968                         nbd++;
3969                         bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3970                                                  &tx_start_bd, hlen,
3971                                                  bd_prod);
3972                 }
3973                 if (!CHIP_IS_E1x(bp))
3974                         bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3975                                              xmit_type);
3976                 else
3977                         bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
3978         }
3979
3980         /* Set the PBD's parsing_data field if not zero
3981          * (for the chips newer than 57711).
3982          */
3983         if (pbd_e2_parsing_data)
3984                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3985
3986         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3987
3988         /* Handle fragmented skb */
3989         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3990                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3991
3992                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3993                                            skb_frag_size(frag), DMA_TO_DEVICE);
3994                 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3995                         unsigned int pkts_compl = 0, bytes_compl = 0;
3996
3997                         DP(NETIF_MSG_TX_QUEUED,
3998                            "Unable to map page - dropping packet...\n");
3999
4000                         /* we need unmap all buffers already mapped
4001                          * for this SKB;
4002                          * first_bd->nbd need to be properly updated
4003                          * before call to bnx2x_free_tx_pkt
4004                          */
4005                         first_bd->nbd = cpu_to_le16(nbd);
4006                         bnx2x_free_tx_pkt(bp, txdata,
4007                                           TX_BD(txdata->tx_pkt_prod),
4008                                           &pkts_compl, &bytes_compl);
4009                         return NETDEV_TX_OK;
4010                 }
4011
4012                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4013                 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4014                 if (total_pkt_bd == NULL)
4015                         total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4016
4017                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4018                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4019                 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4020                 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4021                 nbd++;
4022
4023                 DP(NETIF_MSG_TX_QUEUED,
4024                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
4025                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4026                    le16_to_cpu(tx_data_bd->nbytes));
4027         }
4028
4029         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4030
4031         /* update with actual num BDs */
4032         first_bd->nbd = cpu_to_le16(nbd);
4033
4034         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4035
4036         /* now send a tx doorbell, counting the next BD
4037          * if the packet contains or ends with it
4038          */
4039         if (TX_BD_POFF(bd_prod) < nbd)
4040                 nbd++;
4041
4042         /* total_pkt_bytes should be set on the first data BD if
4043          * it's not an LSO packet and there is more than one
4044          * data BD. In this case pkt_size is limited by an MTU value.
4045          * However we prefer to set it for an LSO packet (while we don't
4046          * have to) in order to save some CPU cycles in a none-LSO
4047          * case, when we much more care about them.
4048          */
4049         if (total_pkt_bd != NULL)
4050                 total_pkt_bd->total_pkt_bytes = pkt_size;
4051
4052         if (pbd_e1x)
4053                 DP(NETIF_MSG_TX_QUEUED,
4054                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
4055                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4056                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4057                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4058                     le16_to_cpu(pbd_e1x->total_hlen_w));
4059         if (pbd_e2)
4060                 DP(NETIF_MSG_TX_QUEUED,
4061                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
4062                    pbd_e2,
4063                    pbd_e2->data.mac_addr.dst_hi,
4064                    pbd_e2->data.mac_addr.dst_mid,
4065                    pbd_e2->data.mac_addr.dst_lo,
4066                    pbd_e2->data.mac_addr.src_hi,
4067                    pbd_e2->data.mac_addr.src_mid,
4068                    pbd_e2->data.mac_addr.src_lo,
4069                    pbd_e2->parsing_data);
4070         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
4071
4072         netdev_tx_sent_queue(txq, skb->len);
4073
4074         skb_tx_timestamp(skb);
4075
4076         txdata->tx_pkt_prod++;
4077         /*
4078          * Make sure that the BD data is updated before updating the producer
4079          * since FW might read the BD right after the producer is updated.
4080          * This is only applicable for weak-ordered memory model archs such
4081          * as IA-64. The following barrier is also mandatory since FW will
4082          * assumes packets must have BDs.
4083          */
4084         wmb();
4085
4086         txdata->tx_db.data.prod += nbd;
4087         barrier();
4088
4089         DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4090
4091         mmiowb();
4092
4093         txdata->tx_bd_prod += nbd;
4094
4095         if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4096                 netif_tx_stop_queue(txq);
4097
4098                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4099                  * ordering of set_bit() in netif_tx_stop_queue() and read of
4100                  * fp->bd_tx_cons */
4101                 smp_mb();
4102
4103                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4104                 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4105                         netif_tx_wake_queue(txq);
4106         }
4107         txdata->tx_pkt++;
4108
4109         return NETDEV_TX_OK;
4110 }
4111
4112 /**
4113  * bnx2x_setup_tc - routine to configure net_device for multi tc
4114  *
4115  * @netdev: net device to configure
4116  * @tc: number of traffic classes to enable
4117  *
4118  * callback connected to the ndo_setup_tc function pointer
4119  */
4120 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4121 {
4122         int cos, prio, count, offset;
4123         struct bnx2x *bp = netdev_priv(dev);
4124
4125         /* setup tc must be called under rtnl lock */
4126         ASSERT_RTNL();
4127
4128         /* no traffic classes requested. Aborting */
4129         if (!num_tc) {
4130                 netdev_reset_tc(dev);
4131                 return 0;
4132         }
4133
4134         /* requested to support too many traffic classes */
4135         if (num_tc > bp->max_cos) {
4136                 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4137                           num_tc, bp->max_cos);
4138                 return -EINVAL;
4139         }
4140
4141         /* declare amount of supported traffic classes */
4142         if (netdev_set_num_tc(dev, num_tc)) {
4143                 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4144                 return -EINVAL;
4145         }
4146
4147         /* configure priority to traffic class mapping */
4148         for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4149                 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4150                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4151                    "mapping priority %d to tc %d\n",
4152                    prio, bp->prio_to_cos[prio]);
4153         }
4154
4155         /* Use this configuration to differentiate tc0 from other COSes
4156            This can be used for ets or pfc, and save the effort of setting
4157            up a multio class queue disc or negotiating DCBX with a switch
4158         netdev_set_prio_tc_map(dev, 0, 0);
4159         DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4160         for (prio = 1; prio < 16; prio++) {
4161                 netdev_set_prio_tc_map(dev, prio, 1);
4162                 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4163         } */
4164
4165         /* configure traffic class to transmission queue mapping */
4166         for (cos = 0; cos < bp->max_cos; cos++) {
4167                 count = BNX2X_NUM_ETH_QUEUES(bp);
4168                 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4169                 netdev_set_tc_queue(dev, cos, count, offset);
4170                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4171                    "mapping tc %d to offset %d count %d\n",
4172                    cos, offset, count);
4173         }
4174
4175         return 0;
4176 }
4177
4178 /* called with rtnl_lock */
4179 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4180 {
4181         struct sockaddr *addr = p;
4182         struct bnx2x *bp = netdev_priv(dev);
4183         int rc = 0;
4184
4185         if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4186                 BNX2X_ERR("Requested MAC address is not valid\n");
4187                 return -EINVAL;
4188         }
4189
4190         if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4191             !is_zero_ether_addr(addr->sa_data)) {
4192                 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
4193                 return -EINVAL;
4194         }
4195
4196         if (netif_running(dev))  {
4197                 rc = bnx2x_set_eth_mac(bp, false);
4198                 if (rc)
4199                         return rc;
4200         }
4201
4202         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4203
4204         if (netif_running(dev))
4205                 rc = bnx2x_set_eth_mac(bp, true);
4206
4207         return rc;
4208 }
4209
4210 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4211 {
4212         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4213         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4214         u8 cos;
4215
4216         /* Common */
4217
4218         if (IS_FCOE_IDX(fp_index)) {
4219                 memset(sb, 0, sizeof(union host_hc_status_block));
4220                 fp->status_blk_mapping = 0;
4221         } else {
4222                 /* status blocks */
4223                 if (!CHIP_IS_E1x(bp))
4224                         BNX2X_PCI_FREE(sb->e2_sb,
4225                                        bnx2x_fp(bp, fp_index,
4226                                                 status_blk_mapping),
4227                                        sizeof(struct host_hc_status_block_e2));
4228                 else
4229                         BNX2X_PCI_FREE(sb->e1x_sb,
4230                                        bnx2x_fp(bp, fp_index,
4231                                                 status_blk_mapping),
4232                                        sizeof(struct host_hc_status_block_e1x));
4233         }
4234
4235         /* Rx */
4236         if (!skip_rx_queue(bp, fp_index)) {
4237                 bnx2x_free_rx_bds(fp);
4238
4239                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4240                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4241                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4242                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
4243                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
4244
4245                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4246                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
4247                                sizeof(struct eth_fast_path_rx_cqe) *
4248                                NUM_RCQ_BD);
4249
4250                 /* SGE ring */
4251                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4252                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4253                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
4254                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4255         }
4256
4257         /* Tx */
4258         if (!skip_tx_queue(bp, fp_index)) {
4259                 /* fastpath tx rings: tx_buf tx_desc */
4260                 for_each_cos_in_tx_queue(fp, cos) {
4261                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4262
4263                         DP(NETIF_MSG_IFDOWN,
4264                            "freeing tx memory of fp %d cos %d cid %d\n",
4265                            fp_index, cos, txdata->cid);
4266
4267                         BNX2X_FREE(txdata->tx_buf_ring);
4268                         BNX2X_PCI_FREE(txdata->tx_desc_ring,
4269                                 txdata->tx_desc_mapping,
4270                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4271                 }
4272         }
4273         /* end of fastpath */
4274 }
4275
4276 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4277 {
4278         int i;
4279         for_each_cnic_queue(bp, i)
4280                 bnx2x_free_fp_mem_at(bp, i);
4281 }
4282
4283 void bnx2x_free_fp_mem(struct bnx2x *bp)
4284 {
4285         int i;
4286         for_each_eth_queue(bp, i)
4287                 bnx2x_free_fp_mem_at(bp, i);
4288 }
4289
4290 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4291 {
4292         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4293         if (!CHIP_IS_E1x(bp)) {
4294                 bnx2x_fp(bp, index, sb_index_values) =
4295                         (__le16 *)status_blk.e2_sb->sb.index_values;
4296                 bnx2x_fp(bp, index, sb_running_index) =
4297                         (__le16 *)status_blk.e2_sb->sb.running_index;
4298         } else {
4299                 bnx2x_fp(bp, index, sb_index_values) =
4300                         (__le16 *)status_blk.e1x_sb->sb.index_values;
4301                 bnx2x_fp(bp, index, sb_running_index) =
4302                         (__le16 *)status_blk.e1x_sb->sb.running_index;
4303         }
4304 }
4305
4306 /* Returns the number of actually allocated BDs */
4307 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4308                               int rx_ring_size)
4309 {
4310         struct bnx2x *bp = fp->bp;
4311         u16 ring_prod, cqe_ring_prod;
4312         int i, failure_cnt = 0;
4313
4314         fp->rx_comp_cons = 0;
4315         cqe_ring_prod = ring_prod = 0;
4316
4317         /* This routine is called only during fo init so
4318          * fp->eth_q_stats.rx_skb_alloc_failed = 0
4319          */
4320         for (i = 0; i < rx_ring_size; i++) {
4321                 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4322                         failure_cnt++;
4323                         continue;
4324                 }
4325                 ring_prod = NEXT_RX_IDX(ring_prod);
4326                 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4327                 WARN_ON(ring_prod <= (i - failure_cnt));
4328         }
4329
4330         if (failure_cnt)
4331                 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4332                           i - failure_cnt, fp->index);
4333
4334         fp->rx_bd_prod = ring_prod;
4335         /* Limit the CQE producer by the CQE ring size */
4336         fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4337                                cqe_ring_prod);
4338         fp->rx_pkt = fp->rx_calls = 0;
4339
4340         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4341
4342         return i - failure_cnt;
4343 }
4344
4345 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4346 {
4347         int i;
4348
4349         for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4350                 struct eth_rx_cqe_next_page *nextpg;
4351
4352                 nextpg = (struct eth_rx_cqe_next_page *)
4353                         &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4354                 nextpg->addr_hi =
4355                         cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4356                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4357                 nextpg->addr_lo =
4358                         cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4359                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4360         }
4361 }
4362
4363 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4364 {
4365         union host_hc_status_block *sb;
4366         struct bnx2x_fastpath *fp = &bp->fp[index];
4367         int ring_size = 0;
4368         u8 cos;
4369         int rx_ring_size = 0;
4370
4371         if (!bp->rx_ring_size &&
4372             (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4373                 rx_ring_size = MIN_RX_SIZE_NONTPA;
4374                 bp->rx_ring_size = rx_ring_size;
4375         } else if (!bp->rx_ring_size) {
4376                 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4377
4378                 if (CHIP_IS_E3(bp)) {
4379                         u32 cfg = SHMEM_RD(bp,
4380                                            dev_info.port_hw_config[BP_PORT(bp)].
4381                                            default_cfg);
4382
4383                         /* Decrease ring size for 1G functions */
4384                         if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4385                             PORT_HW_CFG_NET_SERDES_IF_SGMII)
4386                                 rx_ring_size /= 10;
4387                 }
4388
4389                 /* allocate at least number of buffers required by FW */
4390                 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4391                                      MIN_RX_SIZE_TPA, rx_ring_size);
4392
4393                 bp->rx_ring_size = rx_ring_size;
4394         } else /* if rx_ring_size specified - use it */
4395                 rx_ring_size = bp->rx_ring_size;
4396
4397         DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4398
4399         /* Common */
4400         sb = &bnx2x_fp(bp, index, status_blk);
4401
4402         if (!IS_FCOE_IDX(index)) {
4403                 /* status blocks */
4404                 if (!CHIP_IS_E1x(bp)) {
4405                         sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4406                                                     sizeof(struct host_hc_status_block_e2));
4407                         if (!sb->e2_sb)
4408                                 goto alloc_mem_err;
4409                 } else {
4410                         sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4411                                                      sizeof(struct host_hc_status_block_e1x));
4412                         if (!sb->e1x_sb)
4413                                 goto alloc_mem_err;
4414                 }
4415         }
4416
4417         /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4418          * set shortcuts for it.
4419          */
4420         if (!IS_FCOE_IDX(index))
4421                 set_sb_shortcuts(bp, index);
4422
4423         /* Tx */
4424         if (!skip_tx_queue(bp, index)) {
4425                 /* fastpath tx rings: tx_buf tx_desc */
4426                 for_each_cos_in_tx_queue(fp, cos) {
4427                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4428
4429                         DP(NETIF_MSG_IFUP,
4430                            "allocating tx memory of fp %d cos %d\n",
4431                            index, cos);
4432
4433                         txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4434                                                       sizeof(struct sw_tx_bd),
4435                                                       GFP_KERNEL);
4436                         if (!txdata->tx_buf_ring)
4437                                 goto alloc_mem_err;
4438                         txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4439                                                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4440                         if (!txdata->tx_desc_ring)
4441                                 goto alloc_mem_err;
4442                 }
4443         }
4444
4445         /* Rx */
4446         if (!skip_rx_queue(bp, index)) {
4447                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4448                 bnx2x_fp(bp, index, rx_buf_ring) =
4449                         kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4450                 if (!bnx2x_fp(bp, index, rx_buf_ring))
4451                         goto alloc_mem_err;
4452                 bnx2x_fp(bp, index, rx_desc_ring) =
4453                         BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4454                                         sizeof(struct eth_rx_bd) * NUM_RX_BD);
4455                 if (!bnx2x_fp(bp, index, rx_desc_ring))
4456                         goto alloc_mem_err;
4457
4458                 /* Seed all CQEs by 1s */
4459                 bnx2x_fp(bp, index, rx_comp_ring) =
4460                         BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4461                                          sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4462                 if (!bnx2x_fp(bp, index, rx_comp_ring))
4463                         goto alloc_mem_err;
4464
4465                 /* SGE ring */
4466                 bnx2x_fp(bp, index, rx_page_ring) =
4467                         kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4468                                 GFP_KERNEL);
4469                 if (!bnx2x_fp(bp, index, rx_page_ring))
4470                         goto alloc_mem_err;
4471                 bnx2x_fp(bp, index, rx_sge_ring) =
4472                         BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4473                                         BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4474                 if (!bnx2x_fp(bp, index, rx_sge_ring))
4475                         goto alloc_mem_err;
4476                 /* RX BD ring */
4477                 bnx2x_set_next_page_rx_bd(fp);
4478
4479                 /* CQ ring */
4480                 bnx2x_set_next_page_rx_cq(fp);
4481
4482                 /* BDs */
4483                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4484                 if (ring_size < rx_ring_size)
4485                         goto alloc_mem_err;
4486         }
4487
4488         return 0;
4489
4490 /* handles low memory cases */
4491 alloc_mem_err:
4492         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4493                                                 index, ring_size);
4494         /* FW will drop all packets if queue is not big enough,
4495          * In these cases we disable the queue
4496          * Min size is different for OOO, TPA and non-TPA queues
4497          */
4498         if (ring_size < (fp->disable_tpa ?
4499                                 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4500                         /* release memory allocated for this queue */
4501                         bnx2x_free_fp_mem_at(bp, index);
4502                         return -ENOMEM;
4503         }
4504         return 0;
4505 }
4506
4507 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4508 {
4509         if (!NO_FCOE(bp))
4510                 /* FCoE */
4511                 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4512                         /* we will fail load process instead of mark
4513                          * NO_FCOE_FLAG
4514                          */
4515                         return -ENOMEM;
4516
4517         return 0;
4518 }
4519
4520 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4521 {
4522         int i;
4523
4524         /* 1. Allocate FP for leading - fatal if error
4525          * 2. Allocate RSS - fix number of queues if error
4526          */
4527
4528         /* leading */
4529         if (bnx2x_alloc_fp_mem_at(bp, 0))
4530                 return -ENOMEM;
4531
4532         /* RSS */
4533         for_each_nondefault_eth_queue(bp, i)
4534                 if (bnx2x_alloc_fp_mem_at(bp, i))
4535                         break;
4536
4537         /* handle memory failures */
4538         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4539                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4540
4541                 WARN_ON(delta < 0);
4542                 bnx2x_shrink_eth_fp(bp, delta);
4543                 if (CNIC_SUPPORT(bp))
4544                         /* move non eth FPs next to last eth FP
4545                          * must be done in that order
4546                          * FCOE_IDX < FWD_IDX < OOO_IDX
4547                          */
4548
4549                         /* move FCoE fp even NO_FCOE_FLAG is on */
4550                         bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4551                 bp->num_ethernet_queues -= delta;
4552                 bp->num_queues = bp->num_ethernet_queues +
4553                                  bp->num_cnic_queues;
4554                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4555                           bp->num_queues + delta, bp->num_queues);
4556         }
4557
4558         return 0;
4559 }
4560
4561 void bnx2x_free_mem_bp(struct bnx2x *bp)
4562 {
4563         int i;
4564
4565         for (i = 0; i < bp->fp_array_size; i++)
4566                 kfree(bp->fp[i].tpa_info);
4567         kfree(bp->fp);
4568         kfree(bp->sp_objs);
4569         kfree(bp->fp_stats);
4570         kfree(bp->bnx2x_txq);
4571         kfree(bp->msix_table);
4572         kfree(bp->ilt);
4573 }
4574
4575 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4576 {
4577         struct bnx2x_fastpath *fp;
4578         struct msix_entry *tbl;
4579         struct bnx2x_ilt *ilt;
4580         int msix_table_size = 0;
4581         int fp_array_size, txq_array_size;
4582         int i;
4583
4584         /*
4585          * The biggest MSI-X table we might need is as a maximum number of fast
4586          * path IGU SBs plus default SB (for PF only).
4587          */
4588         msix_table_size = bp->igu_sb_cnt;
4589         if (IS_PF(bp))
4590                 msix_table_size++;
4591         BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4592
4593         /* fp array: RSS plus CNIC related L2 queues */
4594         fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4595         bp->fp_array_size = fp_array_size;
4596         BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4597
4598         fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4599         if (!fp)
4600                 goto alloc_err;
4601         for (i = 0; i < bp->fp_array_size; i++) {
4602                 fp[i].tpa_info =
4603                         kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4604                                 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4605                 if (!(fp[i].tpa_info))
4606                         goto alloc_err;
4607         }
4608
4609         bp->fp = fp;
4610
4611         /* allocate sp objs */
4612         bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4613                               GFP_KERNEL);
4614         if (!bp->sp_objs)
4615                 goto alloc_err;
4616
4617         /* allocate fp_stats */
4618         bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4619                                GFP_KERNEL);
4620         if (!bp->fp_stats)
4621                 goto alloc_err;
4622
4623         /* Allocate memory for the transmission queues array */
4624         txq_array_size =
4625                 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4626         BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4627
4628         bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4629                                 GFP_KERNEL);
4630         if (!bp->bnx2x_txq)
4631                 goto alloc_err;
4632
4633         /* msix table */
4634         tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4635         if (!tbl)
4636                 goto alloc_err;
4637         bp->msix_table = tbl;
4638
4639         /* ilt */
4640         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4641         if (!ilt)
4642                 goto alloc_err;
4643         bp->ilt = ilt;
4644
4645         return 0;
4646 alloc_err:
4647         bnx2x_free_mem_bp(bp);
4648         return -ENOMEM;
4649 }
4650
4651 int bnx2x_reload_if_running(struct net_device *dev)
4652 {
4653         struct bnx2x *bp = netdev_priv(dev);
4654
4655         if (unlikely(!netif_running(dev)))
4656                 return 0;
4657
4658         bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4659         return bnx2x_nic_load(bp, LOAD_NORMAL);
4660 }
4661
4662 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4663 {
4664         u32 sel_phy_idx = 0;
4665         if (bp->link_params.num_phys <= 1)
4666                 return INT_PHY;
4667
4668         if (bp->link_vars.link_up) {
4669                 sel_phy_idx = EXT_PHY1;
4670                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4671                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4672                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4673                         sel_phy_idx = EXT_PHY2;
4674         } else {
4675
4676                 switch (bnx2x_phy_selection(&bp->link_params)) {
4677                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4678                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4679                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4680                        sel_phy_idx = EXT_PHY1;
4681                        break;
4682                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4683                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4684                        sel_phy_idx = EXT_PHY2;
4685                        break;
4686                 }
4687         }
4688
4689         return sel_phy_idx;
4690 }
4691 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4692 {
4693         u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4694         /*
4695          * The selected activated PHY is always after swapping (in case PHY
4696          * swapping is enabled). So when swapping is enabled, we need to reverse
4697          * the configuration
4698          */
4699
4700         if (bp->link_params.multi_phy_config &
4701             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4702                 if (sel_phy_idx == EXT_PHY1)
4703                         sel_phy_idx = EXT_PHY2;
4704                 else if (sel_phy_idx == EXT_PHY2)
4705                         sel_phy_idx = EXT_PHY1;
4706         }
4707         return LINK_CONFIG_IDX(sel_phy_idx);
4708 }
4709
4710 #ifdef NETDEV_FCOE_WWNN
4711 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4712 {
4713         struct bnx2x *bp = netdev_priv(dev);
4714         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4715
4716         switch (type) {
4717         case NETDEV_FCOE_WWNN:
4718                 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4719                                 cp->fcoe_wwn_node_name_lo);
4720                 break;
4721         case NETDEV_FCOE_WWPN:
4722                 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4723                                 cp->fcoe_wwn_port_name_lo);
4724                 break;
4725         default:
4726                 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4727                 return -EINVAL;
4728         }
4729
4730         return 0;
4731 }
4732 #endif
4733
4734 /* called with rtnl_lock */
4735 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4736 {
4737         struct bnx2x *bp = netdev_priv(dev);
4738
4739         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4740                 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4741                 return -EAGAIN;
4742         }
4743
4744         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4745             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4746                 BNX2X_ERR("Can't support requested MTU size\n");
4747                 return -EINVAL;
4748         }
4749
4750         /* This does not race with packet allocation
4751          * because the actual alloc size is
4752          * only updated as part of load
4753          */
4754         dev->mtu = new_mtu;
4755
4756         return bnx2x_reload_if_running(dev);
4757 }
4758
4759 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4760                                      netdev_features_t features)
4761 {
4762         struct bnx2x *bp = netdev_priv(dev);
4763
4764         /* TPA requires Rx CSUM offloading */
4765         if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4766                 features &= ~NETIF_F_LRO;
4767                 features &= ~NETIF_F_GRO;
4768         }
4769
4770         return features;
4771 }
4772
4773 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4774 {
4775         struct bnx2x *bp = netdev_priv(dev);
4776         u32 flags = bp->flags;
4777         u32 changes;
4778         bool bnx2x_reload = false;
4779
4780         if (features & NETIF_F_LRO)
4781                 flags |= TPA_ENABLE_FLAG;
4782         else
4783                 flags &= ~TPA_ENABLE_FLAG;
4784
4785         if (features & NETIF_F_GRO)
4786                 flags |= GRO_ENABLE_FLAG;
4787         else
4788                 flags &= ~GRO_ENABLE_FLAG;
4789
4790         if (features & NETIF_F_LOOPBACK) {
4791                 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4792                         bp->link_params.loopback_mode = LOOPBACK_BMAC;
4793                         bnx2x_reload = true;
4794                 }
4795         } else {
4796                 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4797                         bp->link_params.loopback_mode = LOOPBACK_NONE;
4798                         bnx2x_reload = true;
4799                 }
4800         }
4801
4802         changes = flags ^ bp->flags;
4803
4804         /* if GRO is changed while LRO is enabled, don't force a reload */
4805         if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4806                 changes &= ~GRO_ENABLE_FLAG;
4807
4808         if (changes)
4809                 bnx2x_reload = true;
4810
4811         bp->flags = flags;
4812
4813         if (bnx2x_reload) {
4814                 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4815                         return bnx2x_reload_if_running(dev);
4816                 /* else: bnx2x_nic_load() will be called at end of recovery */
4817         }
4818
4819         return 0;
4820 }
4821
4822 void bnx2x_tx_timeout(struct net_device *dev)
4823 {
4824         struct bnx2x *bp = netdev_priv(dev);
4825
4826 #ifdef BNX2X_STOP_ON_ERROR
4827         if (!bp->panic)
4828                 bnx2x_panic();
4829 #endif
4830
4831         /* This allows the netif to be shutdown gracefully before resetting */
4832         bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4833 }
4834
4835 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4836 {
4837         struct net_device *dev = pci_get_drvdata(pdev);
4838         struct bnx2x *bp;
4839
4840         if (!dev) {
4841                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4842                 return -ENODEV;
4843         }
4844         bp = netdev_priv(dev);
4845
4846         rtnl_lock();
4847
4848         pci_save_state(pdev);
4849
4850         if (!netif_running(dev)) {
4851                 rtnl_unlock();
4852                 return 0;
4853         }
4854
4855         netif_device_detach(dev);
4856
4857         bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4858
4859         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4860
4861         rtnl_unlock();
4862
4863         return 0;
4864 }
4865
4866 int bnx2x_resume(struct pci_dev *pdev)
4867 {
4868         struct net_device *dev = pci_get_drvdata(pdev);
4869         struct bnx2x *bp;
4870         int rc;
4871
4872         if (!dev) {
4873                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4874                 return -ENODEV;
4875         }
4876         bp = netdev_priv(dev);
4877
4878         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4879                 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4880                 return -EAGAIN;
4881         }
4882
4883         rtnl_lock();
4884
4885         pci_restore_state(pdev);
4886
4887         if (!netif_running(dev)) {
4888                 rtnl_unlock();
4889                 return 0;
4890         }
4891
4892         bnx2x_set_power_state(bp, PCI_D0);
4893         netif_device_attach(dev);
4894
4895         rc = bnx2x_nic_load(bp, LOAD_OPEN);
4896
4897         rtnl_unlock();
4898
4899         return rc;
4900 }
4901
4902 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4903                               u32 cid)
4904 {
4905         if (!cxt) {
4906                 BNX2X_ERR("bad context pointer %p\n", cxt);
4907                 return;
4908         }
4909
4910         /* ustorm cxt validation */
4911         cxt->ustorm_ag_context.cdu_usage =
4912                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4913                         CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4914         /* xcontext validation */
4915         cxt->xstorm_ag_context.cdu_reserved =
4916                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4917                         CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4918 }
4919
4920 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4921                                     u8 fw_sb_id, u8 sb_index,
4922                                     u8 ticks)
4923 {
4924         u32 addr = BAR_CSTRORM_INTMEM +
4925                    CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4926         REG_WR8(bp, addr, ticks);
4927         DP(NETIF_MSG_IFUP,
4928            "port %x fw_sb_id %d sb_index %d ticks %d\n",
4929            port, fw_sb_id, sb_index, ticks);
4930 }
4931
4932 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4933                                     u16 fw_sb_id, u8 sb_index,
4934                                     u8 disable)
4935 {
4936         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4937         u32 addr = BAR_CSTRORM_INTMEM +
4938                    CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4939         u8 flags = REG_RD8(bp, addr);
4940         /* clear and set */
4941         flags &= ~HC_INDEX_DATA_HC_ENABLED;
4942         flags |= enable_flag;
4943         REG_WR8(bp, addr, flags);
4944         DP(NETIF_MSG_IFUP,
4945            "port %x fw_sb_id %d sb_index %d disable %d\n",
4946            port, fw_sb_id, sb_index, disable);
4947 }
4948
4949 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4950                                     u8 sb_index, u8 disable, u16 usec)
4951 {
4952         int port = BP_PORT(bp);
4953         u8 ticks = usec / BNX2X_BTR;
4954
4955         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4956
4957         disable = disable ? 1 : (usec ? 0 : 1);
4958         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4959 }
4960
4961 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
4962                             u32 verbose)
4963 {
4964         smp_mb__before_atomic();
4965         set_bit(flag, &bp->sp_rtnl_state);
4966         smp_mb__after_atomic();
4967         DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
4968            flag);
4969         schedule_delayed_work(&bp->sp_rtnl_task, 0);
4970 }
4971 EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);