Merge remote branch 'wireless-next/master' into ath6kl-next
[cascardo/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29
30 static unsigned int num_vfs;
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
33
34 static ushort rx_frag_size = 2048;
35 module_param(rx_frag_size, ushort, S_IRUGO);
36 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
38 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
39         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
41         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
43         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
46         { 0 }
47 };
48 MODULE_DEVICE_TABLE(pci, be_dev_ids);
49 /* UE Status Low CSR */
50 static const char * const ue_status_low_desc[] = {
51         "CEV",
52         "CTX",
53         "DBUF",
54         "ERX",
55         "Host",
56         "MPU",
57         "NDMA",
58         "PTC ",
59         "RDMA ",
60         "RXF ",
61         "RXIPS ",
62         "RXULP0 ",
63         "RXULP1 ",
64         "RXULP2 ",
65         "TIM ",
66         "TPOST ",
67         "TPRE ",
68         "TXIPS ",
69         "TXULP0 ",
70         "TXULP1 ",
71         "UC ",
72         "WDMA ",
73         "TXULP2 ",
74         "HOST1 ",
75         "P0_OB_LINK ",
76         "P1_OB_LINK ",
77         "HOST_GPIO ",
78         "MBOX ",
79         "AXGMAC0",
80         "AXGMAC1",
81         "JTAG",
82         "MPU_INTPEND"
83 };
84 /* UE Status High CSR */
85 static const char * const ue_status_hi_desc[] = {
86         "LPCMEMHOST",
87         "MGMT_MAC",
88         "PCS0ONLINE",
89         "MPU_IRAM",
90         "PCS1ONLINE",
91         "PCTL0",
92         "PCTL1",
93         "PMEM",
94         "RR",
95         "TXPB",
96         "RXPP",
97         "XAUI",
98         "TXP",
99         "ARM",
100         "IPC",
101         "HOST2",
102         "HOST3",
103         "HOST4",
104         "HOST5",
105         "HOST6",
106         "HOST7",
107         "HOST8",
108         "HOST9",
109         "NETC",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown"
118 };
119
120 /* Is BE in a multi-channel mode */
121 static inline bool be_is_mc(struct be_adapter *adapter) {
122         return (adapter->function_mode & FLEX10_MODE ||
123                 adapter->function_mode & VNIC_MODE ||
124                 adapter->function_mode & UMC_ENABLED);
125 }
126
127 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128 {
129         struct be_dma_mem *mem = &q->dma_mem;
130         if (mem->va)
131                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132                                   mem->dma);
133 }
134
135 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136                 u16 len, u16 entry_size)
137 {
138         struct be_dma_mem *mem = &q->dma_mem;
139
140         memset(q, 0, sizeof(*q));
141         q->len = len;
142         q->entry_size = entry_size;
143         mem->size = len * entry_size;
144         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145                                      GFP_KERNEL);
146         if (!mem->va)
147                 return -1;
148         memset(mem->va, 0, mem->size);
149         return 0;
150 }
151
152 static void be_intr_set(struct be_adapter *adapter, bool enable)
153 {
154         u32 reg, enabled;
155
156         if (adapter->eeh_err)
157                 return;
158
159         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160                                 &reg);
161         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163         if (!enabled && enable)
164                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else if (enabled && !enable)
166                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else
168                 return;
169
170         pci_write_config_dword(adapter->pdev,
171                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173
174 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
175 {
176         u32 val = 0;
177         val |= qid & DB_RQ_RING_ID_MASK;
178         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
179
180         wmb();
181         iowrite32(val, adapter->db + DB_RQ_OFFSET);
182 }
183
184 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
185 {
186         u32 val = 0;
187         val |= qid & DB_TXULP_RING_ID_MASK;
188         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
189
190         wmb();
191         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
192 }
193
194 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
195                 bool arm, bool clear_int, u16 num_popped)
196 {
197         u32 val = 0;
198         val |= qid & DB_EQ_RING_ID_MASK;
199         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
200                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
201
202         if (adapter->eeh_err)
203                 return;
204
205         if (arm)
206                 val |= 1 << DB_EQ_REARM_SHIFT;
207         if (clear_int)
208                 val |= 1 << DB_EQ_CLR_SHIFT;
209         val |= 1 << DB_EQ_EVNT_SHIFT;
210         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
211         iowrite32(val, adapter->db + DB_EQ_OFFSET);
212 }
213
214 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
215 {
216         u32 val = 0;
217         val |= qid & DB_CQ_RING_ID_MASK;
218         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
219                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
220
221         if (adapter->eeh_err)
222                 return;
223
224         if (arm)
225                 val |= 1 << DB_CQ_REARM_SHIFT;
226         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
227         iowrite32(val, adapter->db + DB_CQ_OFFSET);
228 }
229
230 static int be_mac_addr_set(struct net_device *netdev, void *p)
231 {
232         struct be_adapter *adapter = netdev_priv(netdev);
233         struct sockaddr *addr = p;
234         int status = 0;
235         u8 current_mac[ETH_ALEN];
236         u32 pmac_id = adapter->pmac_id;
237
238         if (!is_valid_ether_addr(addr->sa_data))
239                 return -EADDRNOTAVAIL;
240
241         status = be_cmd_mac_addr_query(adapter, current_mac,
242                                 MAC_ADDRESS_TYPE_NETWORK, false,
243                                 adapter->if_handle, 0);
244         if (status)
245                 goto err;
246
247         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
248                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
249                                 adapter->if_handle, &adapter->pmac_id, 0);
250                 if (status)
251                         goto err;
252
253                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
254         }
255         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
256         return 0;
257 err:
258         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
259         return status;
260 }
261
262 static void populate_be2_stats(struct be_adapter *adapter)
263 {
264         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
265         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
266         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
267         struct be_port_rxf_stats_v0 *port_stats =
268                                         &rxf_stats->port[adapter->port_num];
269         struct be_drv_stats *drvs = &adapter->drv_stats;
270
271         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
272         drvs->rx_pause_frames = port_stats->rx_pause_frames;
273         drvs->rx_crc_errors = port_stats->rx_crc_errors;
274         drvs->rx_control_frames = port_stats->rx_control_frames;
275         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
276         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
277         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
278         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
279         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
280         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
281         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
282         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
283         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
284         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
285         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
286         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
287         drvs->rx_dropped_header_too_small =
288                 port_stats->rx_dropped_header_too_small;
289         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
290         drvs->rx_alignment_symbol_errors =
291                 port_stats->rx_alignment_symbol_errors;
292
293         drvs->tx_pauseframes = port_stats->tx_pauseframes;
294         drvs->tx_controlframes = port_stats->tx_controlframes;
295
296         if (adapter->port_num)
297                 drvs->jabber_events = rxf_stats->port1_jabber_events;
298         else
299                 drvs->jabber_events = rxf_stats->port0_jabber_events;
300         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
301         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
302         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
303         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
304         drvs->forwarded_packets = rxf_stats->forwarded_packets;
305         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
306         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
307         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
308         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
309 }
310
311 static void populate_be3_stats(struct be_adapter *adapter)
312 {
313         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
314         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
315         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
316         struct be_port_rxf_stats_v1 *port_stats =
317                                         &rxf_stats->port[adapter->port_num];
318         struct be_drv_stats *drvs = &adapter->drv_stats;
319
320         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
321         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
322         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
323         drvs->rx_pause_frames = port_stats->rx_pause_frames;
324         drvs->rx_crc_errors = port_stats->rx_crc_errors;
325         drvs->rx_control_frames = port_stats->rx_control_frames;
326         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
327         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
328         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
329         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
330         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
331         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
332         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
333         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
334         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
335         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
336         drvs->rx_dropped_header_too_small =
337                 port_stats->rx_dropped_header_too_small;
338         drvs->rx_input_fifo_overflow_drop =
339                 port_stats->rx_input_fifo_overflow_drop;
340         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
341         drvs->rx_alignment_symbol_errors =
342                 port_stats->rx_alignment_symbol_errors;
343         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
344         drvs->tx_pauseframes = port_stats->tx_pauseframes;
345         drvs->tx_controlframes = port_stats->tx_controlframes;
346         drvs->jabber_events = port_stats->jabber_events;
347         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
348         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
349         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
350         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
351         drvs->forwarded_packets = rxf_stats->forwarded_packets;
352         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356 }
357
358 static void populate_lancer_stats(struct be_adapter *adapter)
359 {
360
361         struct be_drv_stats *drvs = &adapter->drv_stats;
362         struct lancer_pport_stats *pport_stats =
363                                         pport_stats_from_cmd(adapter);
364
365         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375         drvs->rx_dropped_tcp_length =
376                                 pport_stats->rx_dropped_invalid_tcp_length;
377         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380         drvs->rx_dropped_header_too_small =
381                                 pport_stats->rx_dropped_header_too_small;
382         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
384         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
385         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
386         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
387         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
388         drvs->jabber_events = pport_stats->rx_jabbers;
389         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
390         drvs->forwarded_packets = pport_stats->num_forwards_lo;
391         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
392         drvs->rx_drops_too_many_frags =
393                                 pport_stats->rx_drops_too_many_frags_lo;
394 }
395
396 static void accumulate_16bit_val(u32 *acc, u16 val)
397 {
398 #define lo(x)                   (x & 0xFFFF)
399 #define hi(x)                   (x & 0xFFFF0000)
400         bool wrapped = val < lo(*acc);
401         u32 newacc = hi(*acc) + val;
402
403         if (wrapped)
404                 newacc += 65536;
405         ACCESS_ONCE(*acc) = newacc;
406 }
407
408 void be_parse_stats(struct be_adapter *adapter)
409 {
410         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
411         struct be_rx_obj *rxo;
412         int i;
413
414         if (adapter->generation == BE_GEN3) {
415                 if (lancer_chip(adapter))
416                         populate_lancer_stats(adapter);
417                  else
418                         populate_be3_stats(adapter);
419         } else {
420                 populate_be2_stats(adapter);
421         }
422
423         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
424         for_all_rx_queues(adapter, rxo, i) {
425                 /* below erx HW counter can actually wrap around after
426                  * 65535. Driver accumulates a 32-bit value
427                  */
428                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
429                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
430         }
431 }
432
433 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
434                                         struct rtnl_link_stats64 *stats)
435 {
436         struct be_adapter *adapter = netdev_priv(netdev);
437         struct be_drv_stats *drvs = &adapter->drv_stats;
438         struct be_rx_obj *rxo;
439         struct be_tx_obj *txo;
440         u64 pkts, bytes;
441         unsigned int start;
442         int i;
443
444         for_all_rx_queues(adapter, rxo, i) {
445                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
446                 do {
447                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
448                         pkts = rx_stats(rxo)->rx_pkts;
449                         bytes = rx_stats(rxo)->rx_bytes;
450                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
451                 stats->rx_packets += pkts;
452                 stats->rx_bytes += bytes;
453                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
454                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
455                                         rx_stats(rxo)->rx_drops_no_frags;
456         }
457
458         for_all_tx_queues(adapter, txo, i) {
459                 const struct be_tx_stats *tx_stats = tx_stats(txo);
460                 do {
461                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
462                         pkts = tx_stats(txo)->tx_pkts;
463                         bytes = tx_stats(txo)->tx_bytes;
464                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
465                 stats->tx_packets += pkts;
466                 stats->tx_bytes += bytes;
467         }
468
469         /* bad pkts received */
470         stats->rx_errors = drvs->rx_crc_errors +
471                 drvs->rx_alignment_symbol_errors +
472                 drvs->rx_in_range_errors +
473                 drvs->rx_out_range_errors +
474                 drvs->rx_frame_too_long +
475                 drvs->rx_dropped_too_small +
476                 drvs->rx_dropped_too_short +
477                 drvs->rx_dropped_header_too_small +
478                 drvs->rx_dropped_tcp_length +
479                 drvs->rx_dropped_runt;
480
481         /* detailed rx errors */
482         stats->rx_length_errors = drvs->rx_in_range_errors +
483                 drvs->rx_out_range_errors +
484                 drvs->rx_frame_too_long;
485
486         stats->rx_crc_errors = drvs->rx_crc_errors;
487
488         /* frame alignment errors */
489         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
490
491         /* receiver fifo overrun */
492         /* drops_no_pbuf is no per i/f, it's per BE card */
493         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
494                                 drvs->rx_input_fifo_overflow_drop +
495                                 drvs->rx_drops_no_pbuf;
496         return stats;
497 }
498
499 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
500 {
501         struct net_device *netdev = adapter->netdev;
502
503         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
504                 netif_carrier_off(netdev);
505                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
506         }
507
508         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
509                 netif_carrier_on(netdev);
510         else
511                 netif_carrier_off(netdev);
512 }
513
514 static void be_tx_stats_update(struct be_tx_obj *txo,
515                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
516 {
517         struct be_tx_stats *stats = tx_stats(txo);
518
519         u64_stats_update_begin(&stats->sync);
520         stats->tx_reqs++;
521         stats->tx_wrbs += wrb_cnt;
522         stats->tx_bytes += copied;
523         stats->tx_pkts += (gso_segs ? gso_segs : 1);
524         if (stopped)
525                 stats->tx_stops++;
526         u64_stats_update_end(&stats->sync);
527 }
528
529 /* Determine number of WRB entries needed to xmit data in an skb */
530 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
531                                                                 bool *dummy)
532 {
533         int cnt = (skb->len > skb->data_len);
534
535         cnt += skb_shinfo(skb)->nr_frags;
536
537         /* to account for hdr wrb */
538         cnt++;
539         if (lancer_chip(adapter) || !(cnt & 1)) {
540                 *dummy = false;
541         } else {
542                 /* add a dummy to make it an even num */
543                 cnt++;
544                 *dummy = true;
545         }
546         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
547         return cnt;
548 }
549
550 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
551 {
552         wrb->frag_pa_hi = upper_32_bits(addr);
553         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
554         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
555 }
556
557 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
558                                         struct sk_buff *skb)
559 {
560         u8 vlan_prio;
561         u16 vlan_tag;
562
563         vlan_tag = vlan_tx_tag_get(skb);
564         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
565         /* If vlan priority provided by OS is NOT in available bmap */
566         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
567                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
568                                 adapter->recommended_prio;
569
570         return vlan_tag;
571 }
572
573 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
574                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
575 {
576         u16 vlan_tag;
577
578         memset(hdr, 0, sizeof(*hdr));
579
580         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
581
582         if (skb_is_gso(skb)) {
583                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
584                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
585                         hdr, skb_shinfo(skb)->gso_size);
586                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
587                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
588                 if (lancer_chip(adapter) && adapter->sli_family  ==
589                                                         LANCER_A0_SLI_FAMILY) {
590                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
591                         if (is_tcp_pkt(skb))
592                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
593                                                                 tcpcs, hdr, 1);
594                         else if (is_udp_pkt(skb))
595                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
596                                                                 udpcs, hdr, 1);
597                 }
598         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
599                 if (is_tcp_pkt(skb))
600                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
601                 else if (is_udp_pkt(skb))
602                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
603         }
604
605         if (vlan_tx_tag_present(skb)) {
606                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
607                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
608                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
609         }
610
611         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
612         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
613         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
614         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
615 }
616
617 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
618                 bool unmap_single)
619 {
620         dma_addr_t dma;
621
622         be_dws_le_to_cpu(wrb, sizeof(*wrb));
623
624         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
625         if (wrb->frag_len) {
626                 if (unmap_single)
627                         dma_unmap_single(dev, dma, wrb->frag_len,
628                                          DMA_TO_DEVICE);
629                 else
630                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
631         }
632 }
633
634 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
635                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
636 {
637         dma_addr_t busaddr;
638         int i, copied = 0;
639         struct device *dev = &adapter->pdev->dev;
640         struct sk_buff *first_skb = skb;
641         struct be_eth_wrb *wrb;
642         struct be_eth_hdr_wrb *hdr;
643         bool map_single = false;
644         u16 map_head;
645
646         hdr = queue_head_node(txq);
647         queue_head_inc(txq);
648         map_head = txq->head;
649
650         if (skb->len > skb->data_len) {
651                 int len = skb_headlen(skb);
652                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
653                 if (dma_mapping_error(dev, busaddr))
654                         goto dma_err;
655                 map_single = true;
656                 wrb = queue_head_node(txq);
657                 wrb_fill(wrb, busaddr, len);
658                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
659                 queue_head_inc(txq);
660                 copied += len;
661         }
662
663         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
664                 const struct skb_frag_struct *frag =
665                         &skb_shinfo(skb)->frags[i];
666                 busaddr = skb_frag_dma_map(dev, frag, 0,
667                                            skb_frag_size(frag), DMA_TO_DEVICE);
668                 if (dma_mapping_error(dev, busaddr))
669                         goto dma_err;
670                 wrb = queue_head_node(txq);
671                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
672                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
673                 queue_head_inc(txq);
674                 copied += skb_frag_size(frag);
675         }
676
677         if (dummy_wrb) {
678                 wrb = queue_head_node(txq);
679                 wrb_fill(wrb, 0, 0);
680                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
681                 queue_head_inc(txq);
682         }
683
684         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
685         be_dws_cpu_to_le(hdr, sizeof(*hdr));
686
687         return copied;
688 dma_err:
689         txq->head = map_head;
690         while (copied) {
691                 wrb = queue_head_node(txq);
692                 unmap_tx_frag(dev, wrb, map_single);
693                 map_single = false;
694                 copied -= wrb->frag_len;
695                 queue_head_inc(txq);
696         }
697         return 0;
698 }
699
700 static netdev_tx_t be_xmit(struct sk_buff *skb,
701                         struct net_device *netdev)
702 {
703         struct be_adapter *adapter = netdev_priv(netdev);
704         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
705         struct be_queue_info *txq = &txo->q;
706         u32 wrb_cnt = 0, copied = 0;
707         u32 start = txq->head;
708         bool dummy_wrb, stopped = false;
709
710         /* For vlan tagged pkts, BE
711          * 1) calculates checksum even when CSO is not requested
712          * 2) calculates checksum wrongly for padded pkt less than
713          * 60 bytes long.
714          * As a workaround disable TX vlan offloading in such cases.
715          */
716         if (unlikely(vlan_tx_tag_present(skb) &&
717                      (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
718                 skb = skb_share_check(skb, GFP_ATOMIC);
719                 if (unlikely(!skb))
720                         goto tx_drop;
721
722                 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
723                 if (unlikely(!skb))
724                         goto tx_drop;
725
726                 skb->vlan_tci = 0;
727         }
728
729         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
730
731         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
732         if (copied) {
733                 /* record the sent skb in the sent_skb table */
734                 BUG_ON(txo->sent_skb_list[start]);
735                 txo->sent_skb_list[start] = skb;
736
737                 /* Ensure txq has space for the next skb; Else stop the queue
738                  * *BEFORE* ringing the tx doorbell, so that we serialze the
739                  * tx compls of the current transmit which'll wake up the queue
740                  */
741                 atomic_add(wrb_cnt, &txq->used);
742                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
743                                                                 txq->len) {
744                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
745                         stopped = true;
746                 }
747
748                 be_txq_notify(adapter, txq->id, wrb_cnt);
749
750                 be_tx_stats_update(txo, wrb_cnt, copied,
751                                 skb_shinfo(skb)->gso_segs, stopped);
752         } else {
753                 txq->head = start;
754                 dev_kfree_skb_any(skb);
755         }
756 tx_drop:
757         return NETDEV_TX_OK;
758 }
759
760 static int be_change_mtu(struct net_device *netdev, int new_mtu)
761 {
762         struct be_adapter *adapter = netdev_priv(netdev);
763         if (new_mtu < BE_MIN_MTU ||
764                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
765                                         (ETH_HLEN + ETH_FCS_LEN))) {
766                 dev_info(&adapter->pdev->dev,
767                         "MTU must be between %d and %d bytes\n",
768                         BE_MIN_MTU,
769                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
770                 return -EINVAL;
771         }
772         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
773                         netdev->mtu, new_mtu);
774         netdev->mtu = new_mtu;
775         return 0;
776 }
777
778 /*
779  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
780  * If the user configures more, place BE in vlan promiscuous mode.
781  */
782 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
783 {
784         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
785         u16 vtag[BE_NUM_VLANS_SUPPORTED];
786         u16 ntags = 0, i;
787         int status = 0;
788
789         if (vf) {
790                 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
791                 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
792                                             1, 1, 0);
793         }
794
795         /* No need to further configure vids if in promiscuous mode */
796         if (adapter->promiscuous)
797                 return 0;
798
799         if (adapter->vlans_added <= adapter->max_vlans)  {
800                 /* Construct VLAN Table to give to HW */
801                 for (i = 0; i < VLAN_N_VID; i++) {
802                         if (adapter->vlan_tag[i]) {
803                                 vtag[ntags] = cpu_to_le16(i);
804                                 ntags++;
805                         }
806                 }
807                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
808                                         vtag, ntags, 1, 0);
809         } else {
810                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
811                                         NULL, 0, 1, 1);
812         }
813
814         return status;
815 }
816
817 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
818 {
819         struct be_adapter *adapter = netdev_priv(netdev);
820         int status = 0;
821
822         if (!be_physfn(adapter)) {
823                 status = -EINVAL;
824                 goto ret;
825         }
826
827         adapter->vlan_tag[vid] = 1;
828         if (adapter->vlans_added <= (adapter->max_vlans + 1))
829                 status = be_vid_config(adapter, false, 0);
830
831         if (!status)
832                 adapter->vlans_added++;
833         else
834                 adapter->vlan_tag[vid] = 0;
835 ret:
836         return status;
837 }
838
839 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
840 {
841         struct be_adapter *adapter = netdev_priv(netdev);
842         int status = 0;
843
844         if (!be_physfn(adapter)) {
845                 status = -EINVAL;
846                 goto ret;
847         }
848
849         adapter->vlan_tag[vid] = 0;
850         if (adapter->vlans_added <= adapter->max_vlans)
851                 status = be_vid_config(adapter, false, 0);
852
853         if (!status)
854                 adapter->vlans_added--;
855         else
856                 adapter->vlan_tag[vid] = 1;
857 ret:
858         return status;
859 }
860
861 static void be_set_rx_mode(struct net_device *netdev)
862 {
863         struct be_adapter *adapter = netdev_priv(netdev);
864
865         if (netdev->flags & IFF_PROMISC) {
866                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
867                 adapter->promiscuous = true;
868                 goto done;
869         }
870
871         /* BE was previously in promiscuous mode; disable it */
872         if (adapter->promiscuous) {
873                 adapter->promiscuous = false;
874                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
875
876                 if (adapter->vlans_added)
877                         be_vid_config(adapter, false, 0);
878         }
879
880         /* Enable multicast promisc if num configured exceeds what we support */
881         if (netdev->flags & IFF_ALLMULTI ||
882                         netdev_mc_count(netdev) > BE_MAX_MC) {
883                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
884                 goto done;
885         }
886
887         be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
888 done:
889         return;
890 }
891
892 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
893 {
894         struct be_adapter *adapter = netdev_priv(netdev);
895         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
896         int status;
897
898         if (!sriov_enabled(adapter))
899                 return -EPERM;
900
901         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
902                 return -EINVAL;
903
904         if (lancer_chip(adapter)) {
905                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
906         } else {
907                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
908                                          vf_cfg->pmac_id, vf + 1);
909
910                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
911                                          &vf_cfg->pmac_id, vf + 1);
912         }
913
914         if (status)
915                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
916                                 mac, vf);
917         else
918                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
919
920         return status;
921 }
922
923 static int be_get_vf_config(struct net_device *netdev, int vf,
924                         struct ifla_vf_info *vi)
925 {
926         struct be_adapter *adapter = netdev_priv(netdev);
927         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
928
929         if (!sriov_enabled(adapter))
930                 return -EPERM;
931
932         if (vf >= adapter->num_vfs)
933                 return -EINVAL;
934
935         vi->vf = vf;
936         vi->tx_rate = vf_cfg->tx_rate;
937         vi->vlan = vf_cfg->vlan_tag;
938         vi->qos = 0;
939         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
940
941         return 0;
942 }
943
944 static int be_set_vf_vlan(struct net_device *netdev,
945                         int vf, u16 vlan, u8 qos)
946 {
947         struct be_adapter *adapter = netdev_priv(netdev);
948         int status = 0;
949
950         if (!sriov_enabled(adapter))
951                 return -EPERM;
952
953         if (vf >= adapter->num_vfs || vlan > 4095)
954                 return -EINVAL;
955
956         if (vlan) {
957                 adapter->vf_cfg[vf].vlan_tag = vlan;
958                 adapter->vlans_added++;
959         } else {
960                 adapter->vf_cfg[vf].vlan_tag = 0;
961                 adapter->vlans_added--;
962         }
963
964         status = be_vid_config(adapter, true, vf);
965
966         if (status)
967                 dev_info(&adapter->pdev->dev,
968                                 "VLAN %d config on VF %d failed\n", vlan, vf);
969         return status;
970 }
971
972 static int be_set_vf_tx_rate(struct net_device *netdev,
973                         int vf, int rate)
974 {
975         struct be_adapter *adapter = netdev_priv(netdev);
976         int status = 0;
977
978         if (!sriov_enabled(adapter))
979                 return -EPERM;
980
981         if (vf >= adapter->num_vfs)
982                 return -EINVAL;
983
984         if (rate < 100 || rate > 10000) {
985                 dev_err(&adapter->pdev->dev,
986                         "tx rate must be between 100 and 10000 Mbps\n");
987                 return -EINVAL;
988         }
989
990         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
991
992         if (status)
993                 dev_err(&adapter->pdev->dev,
994                                 "tx rate %d on VF %d failed\n", rate, vf);
995         else
996                 adapter->vf_cfg[vf].tx_rate = rate;
997         return status;
998 }
999
1000 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
1001 {
1002         struct be_eq_obj *rx_eq = &rxo->rx_eq;
1003         struct be_rx_stats *stats = rx_stats(rxo);
1004         ulong now = jiffies;
1005         ulong delta = now - stats->rx_jiffies;
1006         u64 pkts;
1007         unsigned int start, eqd;
1008
1009         if (!rx_eq->enable_aic)
1010                 return;
1011
1012         /* Wrapped around */
1013         if (time_before(now, stats->rx_jiffies)) {
1014                 stats->rx_jiffies = now;
1015                 return;
1016         }
1017
1018         /* Update once a second */
1019         if (delta < HZ)
1020                 return;
1021
1022         do {
1023                 start = u64_stats_fetch_begin_bh(&stats->sync);
1024                 pkts = stats->rx_pkts;
1025         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1026
1027         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1028         stats->rx_pkts_prev = pkts;
1029         stats->rx_jiffies = now;
1030         eqd = stats->rx_pps / 110000;
1031         eqd = eqd << 3;
1032         if (eqd > rx_eq->max_eqd)
1033                 eqd = rx_eq->max_eqd;
1034         if (eqd < rx_eq->min_eqd)
1035                 eqd = rx_eq->min_eqd;
1036         if (eqd < 10)
1037                 eqd = 0;
1038         if (eqd != rx_eq->cur_eqd) {
1039                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
1040                 rx_eq->cur_eqd = eqd;
1041         }
1042 }
1043
1044 static void be_rx_stats_update(struct be_rx_obj *rxo,
1045                 struct be_rx_compl_info *rxcp)
1046 {
1047         struct be_rx_stats *stats = rx_stats(rxo);
1048
1049         u64_stats_update_begin(&stats->sync);
1050         stats->rx_compl++;
1051         stats->rx_bytes += rxcp->pkt_size;
1052         stats->rx_pkts++;
1053         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1054                 stats->rx_mcast_pkts++;
1055         if (rxcp->err)
1056                 stats->rx_compl_err++;
1057         u64_stats_update_end(&stats->sync);
1058 }
1059
1060 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1061 {
1062         /* L4 checksum is not reliable for non TCP/UDP packets.
1063          * Also ignore ipcksm for ipv6 pkts */
1064         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1065                                 (rxcp->ip_csum || rxcp->ipv6);
1066 }
1067
1068 static struct be_rx_page_info *
1069 get_rx_page_info(struct be_adapter *adapter,
1070                 struct be_rx_obj *rxo,
1071                 u16 frag_idx)
1072 {
1073         struct be_rx_page_info *rx_page_info;
1074         struct be_queue_info *rxq = &rxo->q;
1075
1076         rx_page_info = &rxo->page_info_tbl[frag_idx];
1077         BUG_ON(!rx_page_info->page);
1078
1079         if (rx_page_info->last_page_user) {
1080                 dma_unmap_page(&adapter->pdev->dev,
1081                                dma_unmap_addr(rx_page_info, bus),
1082                                adapter->big_page_size, DMA_FROM_DEVICE);
1083                 rx_page_info->last_page_user = false;
1084         }
1085
1086         atomic_dec(&rxq->used);
1087         return rx_page_info;
1088 }
1089
1090 /* Throwaway the data in the Rx completion */
1091 static void be_rx_compl_discard(struct be_adapter *adapter,
1092                 struct be_rx_obj *rxo,
1093                 struct be_rx_compl_info *rxcp)
1094 {
1095         struct be_queue_info *rxq = &rxo->q;
1096         struct be_rx_page_info *page_info;
1097         u16 i, num_rcvd = rxcp->num_rcvd;
1098
1099         for (i = 0; i < num_rcvd; i++) {
1100                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1101                 put_page(page_info->page);
1102                 memset(page_info, 0, sizeof(*page_info));
1103                 index_inc(&rxcp->rxq_idx, rxq->len);
1104         }
1105 }
1106
1107 /*
1108  * skb_fill_rx_data forms a complete skb for an ether frame
1109  * indicated by rxcp.
1110  */
1111 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1112                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1113 {
1114         struct be_queue_info *rxq = &rxo->q;
1115         struct be_rx_page_info *page_info;
1116         u16 i, j;
1117         u16 hdr_len, curr_frag_len, remaining;
1118         u8 *start;
1119
1120         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1121         start = page_address(page_info->page) + page_info->page_offset;
1122         prefetch(start);
1123
1124         /* Copy data in the first descriptor of this completion */
1125         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1126
1127         /* Copy the header portion into skb_data */
1128         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1129         memcpy(skb->data, start, hdr_len);
1130         skb->len = curr_frag_len;
1131         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1132                 /* Complete packet has now been moved to data */
1133                 put_page(page_info->page);
1134                 skb->data_len = 0;
1135                 skb->tail += curr_frag_len;
1136         } else {
1137                 skb_shinfo(skb)->nr_frags = 1;
1138                 skb_frag_set_page(skb, 0, page_info->page);
1139                 skb_shinfo(skb)->frags[0].page_offset =
1140                                         page_info->page_offset + hdr_len;
1141                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1142                 skb->data_len = curr_frag_len - hdr_len;
1143                 skb->truesize += rx_frag_size;
1144                 skb->tail += hdr_len;
1145         }
1146         page_info->page = NULL;
1147
1148         if (rxcp->pkt_size <= rx_frag_size) {
1149                 BUG_ON(rxcp->num_rcvd != 1);
1150                 return;
1151         }
1152
1153         /* More frags present for this completion */
1154         index_inc(&rxcp->rxq_idx, rxq->len);
1155         remaining = rxcp->pkt_size - curr_frag_len;
1156         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1157                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1158                 curr_frag_len = min(remaining, rx_frag_size);
1159
1160                 /* Coalesce all frags from the same physical page in one slot */
1161                 if (page_info->page_offset == 0) {
1162                         /* Fresh page */
1163                         j++;
1164                         skb_frag_set_page(skb, j, page_info->page);
1165                         skb_shinfo(skb)->frags[j].page_offset =
1166                                                         page_info->page_offset;
1167                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1168                         skb_shinfo(skb)->nr_frags++;
1169                 } else {
1170                         put_page(page_info->page);
1171                 }
1172
1173                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1174                 skb->len += curr_frag_len;
1175                 skb->data_len += curr_frag_len;
1176                 skb->truesize += rx_frag_size;
1177                 remaining -= curr_frag_len;
1178                 index_inc(&rxcp->rxq_idx, rxq->len);
1179                 page_info->page = NULL;
1180         }
1181         BUG_ON(j > MAX_SKB_FRAGS);
1182 }
1183
1184 /* Process the RX completion indicated by rxcp when GRO is disabled */
1185 static void be_rx_compl_process(struct be_adapter *adapter,
1186                         struct be_rx_obj *rxo,
1187                         struct be_rx_compl_info *rxcp)
1188 {
1189         struct net_device *netdev = adapter->netdev;
1190         struct sk_buff *skb;
1191
1192         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1193         if (unlikely(!skb)) {
1194                 rx_stats(rxo)->rx_drops_no_skbs++;
1195                 be_rx_compl_discard(adapter, rxo, rxcp);
1196                 return;
1197         }
1198
1199         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1200
1201         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1202                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1203         else
1204                 skb_checksum_none_assert(skb);
1205
1206         skb->protocol = eth_type_trans(skb, netdev);
1207         if (adapter->netdev->features & NETIF_F_RXHASH)
1208                 skb->rxhash = rxcp->rss_hash;
1209
1210
1211         if (rxcp->vlanf)
1212                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1213
1214         netif_receive_skb(skb);
1215 }
1216
1217 /* Process the RX completion indicated by rxcp when GRO is enabled */
1218 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1219                 struct be_rx_obj *rxo,
1220                 struct be_rx_compl_info *rxcp)
1221 {
1222         struct be_rx_page_info *page_info;
1223         struct sk_buff *skb = NULL;
1224         struct be_queue_info *rxq = &rxo->q;
1225         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1226         u16 remaining, curr_frag_len;
1227         u16 i, j;
1228
1229         skb = napi_get_frags(&eq_obj->napi);
1230         if (!skb) {
1231                 be_rx_compl_discard(adapter, rxo, rxcp);
1232                 return;
1233         }
1234
1235         remaining = rxcp->pkt_size;
1236         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1237                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1238
1239                 curr_frag_len = min(remaining, rx_frag_size);
1240
1241                 /* Coalesce all frags from the same physical page in one slot */
1242                 if (i == 0 || page_info->page_offset == 0) {
1243                         /* First frag or Fresh page */
1244                         j++;
1245                         skb_frag_set_page(skb, j, page_info->page);
1246                         skb_shinfo(skb)->frags[j].page_offset =
1247                                                         page_info->page_offset;
1248                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1249                 } else {
1250                         put_page(page_info->page);
1251                 }
1252                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1253                 skb->truesize += rx_frag_size;
1254                 remaining -= curr_frag_len;
1255                 index_inc(&rxcp->rxq_idx, rxq->len);
1256                 memset(page_info, 0, sizeof(*page_info));
1257         }
1258         BUG_ON(j > MAX_SKB_FRAGS);
1259
1260         skb_shinfo(skb)->nr_frags = j + 1;
1261         skb->len = rxcp->pkt_size;
1262         skb->data_len = rxcp->pkt_size;
1263         skb->ip_summed = CHECKSUM_UNNECESSARY;
1264         if (adapter->netdev->features & NETIF_F_RXHASH)
1265                 skb->rxhash = rxcp->rss_hash;
1266
1267         if (rxcp->vlanf)
1268                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1269
1270         napi_gro_frags(&eq_obj->napi);
1271 }
1272
1273 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1274                                 struct be_eth_rx_compl *compl,
1275                                 struct be_rx_compl_info *rxcp)
1276 {
1277         rxcp->pkt_size =
1278                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1279         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1280         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1281         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1282         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1283         rxcp->ip_csum =
1284                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1285         rxcp->l4_csum =
1286                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1287         rxcp->ipv6 =
1288                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1289         rxcp->rxq_idx =
1290                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1291         rxcp->num_rcvd =
1292                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1293         rxcp->pkt_type =
1294                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1295         rxcp->rss_hash =
1296                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1297         if (rxcp->vlanf) {
1298                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1299                                           compl);
1300                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1301                                                compl);
1302         }
1303         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1304 }
1305
1306 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1307                                 struct be_eth_rx_compl *compl,
1308                                 struct be_rx_compl_info *rxcp)
1309 {
1310         rxcp->pkt_size =
1311                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1312         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1313         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1314         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1315         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1316         rxcp->ip_csum =
1317                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1318         rxcp->l4_csum =
1319                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1320         rxcp->ipv6 =
1321                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1322         rxcp->rxq_idx =
1323                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1324         rxcp->num_rcvd =
1325                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1326         rxcp->pkt_type =
1327                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1328         rxcp->rss_hash =
1329                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1330         if (rxcp->vlanf) {
1331                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1332                                           compl);
1333                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1334                                                compl);
1335         }
1336         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1337 }
1338
1339 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1340 {
1341         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1342         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1343         struct be_adapter *adapter = rxo->adapter;
1344
1345         /* For checking the valid bit it is Ok to use either definition as the
1346          * valid bit is at the same position in both v0 and v1 Rx compl */
1347         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1348                 return NULL;
1349
1350         rmb();
1351         be_dws_le_to_cpu(compl, sizeof(*compl));
1352
1353         if (adapter->be3_native)
1354                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1355         else
1356                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1357
1358         if (rxcp->vlanf) {
1359                 /* vlanf could be wrongly set in some cards.
1360                  * ignore if vtm is not set */
1361                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1362                         rxcp->vlanf = 0;
1363
1364                 if (!lancer_chip(adapter))
1365                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1366
1367                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1368                     !adapter->vlan_tag[rxcp->vlan_tag])
1369                         rxcp->vlanf = 0;
1370         }
1371
1372         /* As the compl has been parsed, reset it; we wont touch it again */
1373         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1374
1375         queue_tail_inc(&rxo->cq);
1376         return rxcp;
1377 }
1378
1379 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1380 {
1381         u32 order = get_order(size);
1382
1383         if (order > 0)
1384                 gfp |= __GFP_COMP;
1385         return  alloc_pages(gfp, order);
1386 }
1387
1388 /*
1389  * Allocate a page, split it to fragments of size rx_frag_size and post as
1390  * receive buffers to BE
1391  */
1392 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1393 {
1394         struct be_adapter *adapter = rxo->adapter;
1395         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1396         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1397         struct be_queue_info *rxq = &rxo->q;
1398         struct page *pagep = NULL;
1399         struct be_eth_rx_d *rxd;
1400         u64 page_dmaaddr = 0, frag_dmaaddr;
1401         u32 posted, page_offset = 0;
1402
1403         page_info = &rxo->page_info_tbl[rxq->head];
1404         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1405                 if (!pagep) {
1406                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1407                         if (unlikely(!pagep)) {
1408                                 rx_stats(rxo)->rx_post_fail++;
1409                                 break;
1410                         }
1411                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1412                                                     0, adapter->big_page_size,
1413                                                     DMA_FROM_DEVICE);
1414                         page_info->page_offset = 0;
1415                 } else {
1416                         get_page(pagep);
1417                         page_info->page_offset = page_offset + rx_frag_size;
1418                 }
1419                 page_offset = page_info->page_offset;
1420                 page_info->page = pagep;
1421                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1422                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1423
1424                 rxd = queue_head_node(rxq);
1425                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1426                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1427
1428                 /* Any space left in the current big page for another frag? */
1429                 if ((page_offset + rx_frag_size + rx_frag_size) >
1430                                         adapter->big_page_size) {
1431                         pagep = NULL;
1432                         page_info->last_page_user = true;
1433                 }
1434
1435                 prev_page_info = page_info;
1436                 queue_head_inc(rxq);
1437                 page_info = &page_info_tbl[rxq->head];
1438         }
1439         if (pagep)
1440                 prev_page_info->last_page_user = true;
1441
1442         if (posted) {
1443                 atomic_add(posted, &rxq->used);
1444                 be_rxq_notify(adapter, rxq->id, posted);
1445         } else if (atomic_read(&rxq->used) == 0) {
1446                 /* Let be_worker replenish when memory is available */
1447                 rxo->rx_post_starved = true;
1448         }
1449 }
1450
1451 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1452 {
1453         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1454
1455         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1456                 return NULL;
1457
1458         rmb();
1459         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1460
1461         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1462
1463         queue_tail_inc(tx_cq);
1464         return txcp;
1465 }
1466
1467 static u16 be_tx_compl_process(struct be_adapter *adapter,
1468                 struct be_tx_obj *txo, u16 last_index)
1469 {
1470         struct be_queue_info *txq = &txo->q;
1471         struct be_eth_wrb *wrb;
1472         struct sk_buff **sent_skbs = txo->sent_skb_list;
1473         struct sk_buff *sent_skb;
1474         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1475         bool unmap_skb_hdr = true;
1476
1477         sent_skb = sent_skbs[txq->tail];
1478         BUG_ON(!sent_skb);
1479         sent_skbs[txq->tail] = NULL;
1480
1481         /* skip header wrb */
1482         queue_tail_inc(txq);
1483
1484         do {
1485                 cur_index = txq->tail;
1486                 wrb = queue_tail_node(txq);
1487                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1488                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1489                 unmap_skb_hdr = false;
1490
1491                 num_wrbs++;
1492                 queue_tail_inc(txq);
1493         } while (cur_index != last_index);
1494
1495         kfree_skb(sent_skb);
1496         return num_wrbs;
1497 }
1498
1499 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1500 {
1501         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1502
1503         if (!eqe->evt)
1504                 return NULL;
1505
1506         rmb();
1507         eqe->evt = le32_to_cpu(eqe->evt);
1508         queue_tail_inc(&eq_obj->q);
1509         return eqe;
1510 }
1511
1512 static int event_handle(struct be_adapter *adapter,
1513                         struct be_eq_obj *eq_obj,
1514                         bool rearm)
1515 {
1516         struct be_eq_entry *eqe;
1517         u16 num = 0;
1518
1519         while ((eqe = event_get(eq_obj)) != NULL) {
1520                 eqe->evt = 0;
1521                 num++;
1522         }
1523
1524         /* Deal with any spurious interrupts that come
1525          * without events
1526          */
1527         if (!num)
1528                 rearm = true;
1529
1530         be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1531         if (num)
1532                 napi_schedule(&eq_obj->napi);
1533
1534         return num;
1535 }
1536
1537 /* Just read and notify events without processing them.
1538  * Used at the time of destroying event queues */
1539 static void be_eq_clean(struct be_adapter *adapter,
1540                         struct be_eq_obj *eq_obj)
1541 {
1542         struct be_eq_entry *eqe;
1543         u16 num = 0;
1544
1545         while ((eqe = event_get(eq_obj)) != NULL) {
1546                 eqe->evt = 0;
1547                 num++;
1548         }
1549
1550         if (num)
1551                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1552 }
1553
1554 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1555 {
1556         struct be_rx_page_info *page_info;
1557         struct be_queue_info *rxq = &rxo->q;
1558         struct be_queue_info *rx_cq = &rxo->cq;
1559         struct be_rx_compl_info *rxcp;
1560         u16 tail;
1561
1562         /* First cleanup pending rx completions */
1563         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1564                 be_rx_compl_discard(adapter, rxo, rxcp);
1565                 be_cq_notify(adapter, rx_cq->id, false, 1);
1566         }
1567
1568         /* Then free posted rx buffer that were not used */
1569         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1570         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1571                 page_info = get_rx_page_info(adapter, rxo, tail);
1572                 put_page(page_info->page);
1573                 memset(page_info, 0, sizeof(*page_info));
1574         }
1575         BUG_ON(atomic_read(&rxq->used));
1576         rxq->tail = rxq->head = 0;
1577 }
1578
1579 static void be_tx_compl_clean(struct be_adapter *adapter,
1580                                 struct be_tx_obj *txo)
1581 {
1582         struct be_queue_info *tx_cq = &txo->cq;
1583         struct be_queue_info *txq = &txo->q;
1584         struct be_eth_tx_compl *txcp;
1585         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1586         struct sk_buff **sent_skbs = txo->sent_skb_list;
1587         struct sk_buff *sent_skb;
1588         bool dummy_wrb;
1589
1590         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1591         do {
1592                 while ((txcp = be_tx_compl_get(tx_cq))) {
1593                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1594                                         wrb_index, txcp);
1595                         num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1596                         cmpl++;
1597                 }
1598                 if (cmpl) {
1599                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1600                         atomic_sub(num_wrbs, &txq->used);
1601                         cmpl = 0;
1602                         num_wrbs = 0;
1603                 }
1604
1605                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1606                         break;
1607
1608                 mdelay(1);
1609         } while (true);
1610
1611         if (atomic_read(&txq->used))
1612                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1613                         atomic_read(&txq->used));
1614
1615         /* free posted tx for which compls will never arrive */
1616         while (atomic_read(&txq->used)) {
1617                 sent_skb = sent_skbs[txq->tail];
1618                 end_idx = txq->tail;
1619                 index_adv(&end_idx,
1620                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1621                         txq->len);
1622                 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1623                 atomic_sub(num_wrbs, &txq->used);
1624         }
1625 }
1626
1627 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1628 {
1629         struct be_queue_info *q;
1630
1631         q = &adapter->mcc_obj.q;
1632         if (q->created)
1633                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1634         be_queue_free(adapter, q);
1635
1636         q = &adapter->mcc_obj.cq;
1637         if (q->created)
1638                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1639         be_queue_free(adapter, q);
1640 }
1641
1642 /* Must be called only after TX qs are created as MCC shares TX EQ */
1643 static int be_mcc_queues_create(struct be_adapter *adapter)
1644 {
1645         struct be_queue_info *q, *cq;
1646
1647         /* Alloc MCC compl queue */
1648         cq = &adapter->mcc_obj.cq;
1649         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1650                         sizeof(struct be_mcc_compl)))
1651                 goto err;
1652
1653         /* Ask BE to create MCC compl queue; share TX's eq */
1654         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1655                 goto mcc_cq_free;
1656
1657         /* Alloc MCC queue */
1658         q = &adapter->mcc_obj.q;
1659         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1660                 goto mcc_cq_destroy;
1661
1662         /* Ask BE to create MCC queue */
1663         if (be_cmd_mccq_create(adapter, q, cq))
1664                 goto mcc_q_free;
1665
1666         return 0;
1667
1668 mcc_q_free:
1669         be_queue_free(adapter, q);
1670 mcc_cq_destroy:
1671         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1672 mcc_cq_free:
1673         be_queue_free(adapter, cq);
1674 err:
1675         return -1;
1676 }
1677
1678 static void be_tx_queues_destroy(struct be_adapter *adapter)
1679 {
1680         struct be_queue_info *q;
1681         struct be_tx_obj *txo;
1682         u8 i;
1683
1684         for_all_tx_queues(adapter, txo, i) {
1685                 q = &txo->q;
1686                 if (q->created)
1687                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1688                 be_queue_free(adapter, q);
1689
1690                 q = &txo->cq;
1691                 if (q->created)
1692                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1693                 be_queue_free(adapter, q);
1694         }
1695
1696         /* Clear any residual events */
1697         be_eq_clean(adapter, &adapter->tx_eq);
1698
1699         q = &adapter->tx_eq.q;
1700         if (q->created)
1701                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1702         be_queue_free(adapter, q);
1703 }
1704
1705 static int be_num_txqs_want(struct be_adapter *adapter)
1706 {
1707         if (sriov_enabled(adapter) || be_is_mc(adapter) ||
1708                 lancer_chip(adapter) || !be_physfn(adapter) ||
1709                 adapter->generation == BE_GEN2)
1710                 return 1;
1711         else
1712                 return MAX_TX_QS;
1713 }
1714
1715 /* One TX event queue is shared by all TX compl qs */
1716 static int be_tx_queues_create(struct be_adapter *adapter)
1717 {
1718         struct be_queue_info *eq, *q, *cq;
1719         struct be_tx_obj *txo;
1720         u8 i;
1721
1722         adapter->num_tx_qs = be_num_txqs_want(adapter);
1723         if (adapter->num_tx_qs != MAX_TX_QS) {
1724                 rtnl_lock();
1725                 netif_set_real_num_tx_queues(adapter->netdev,
1726                         adapter->num_tx_qs);
1727                 rtnl_unlock();
1728         }
1729
1730         adapter->tx_eq.max_eqd = 0;
1731         adapter->tx_eq.min_eqd = 0;
1732         adapter->tx_eq.cur_eqd = 96;
1733         adapter->tx_eq.enable_aic = false;
1734
1735         eq = &adapter->tx_eq.q;
1736         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1737                 sizeof(struct be_eq_entry)))
1738                 return -1;
1739
1740         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1741                 goto err;
1742         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1743
1744         for_all_tx_queues(adapter, txo, i) {
1745                 cq = &txo->cq;
1746                 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1747                         sizeof(struct be_eth_tx_compl)))
1748                         goto err;
1749
1750                 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1751                         goto err;
1752
1753                 q = &txo->q;
1754                 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1755                         sizeof(struct be_eth_wrb)))
1756                         goto err;
1757         }
1758         return 0;
1759
1760 err:
1761         be_tx_queues_destroy(adapter);
1762         return -1;
1763 }
1764
1765 static void be_rx_queues_destroy(struct be_adapter *adapter)
1766 {
1767         struct be_queue_info *q;
1768         struct be_rx_obj *rxo;
1769         int i;
1770
1771         for_all_rx_queues(adapter, rxo, i) {
1772                 be_queue_free(adapter, &rxo->q);
1773
1774                 q = &rxo->cq;
1775                 if (q->created)
1776                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1777                 be_queue_free(adapter, q);
1778
1779                 q = &rxo->rx_eq.q;
1780                 if (q->created)
1781                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1782                 be_queue_free(adapter, q);
1783         }
1784 }
1785
1786 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1787 {
1788         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1789              !sriov_enabled(adapter) && be_physfn(adapter) &&
1790              !be_is_mc(adapter)) {
1791                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1792         } else {
1793                 dev_warn(&adapter->pdev->dev,
1794                         "No support for multiple RX queues\n");
1795                 return 1;
1796         }
1797 }
1798
1799 static int be_rx_queues_create(struct be_adapter *adapter)
1800 {
1801         struct be_queue_info *eq, *q, *cq;
1802         struct be_rx_obj *rxo;
1803         int rc, i;
1804
1805         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1806                                 msix_enabled(adapter) ?
1807                                         adapter->num_msix_vec - 1 : 1);
1808         if (adapter->num_rx_qs != MAX_RX_QS)
1809                 dev_warn(&adapter->pdev->dev,
1810                         "Can create only %d RX queues", adapter->num_rx_qs);
1811
1812         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1813         for_all_rx_queues(adapter, rxo, i) {
1814                 rxo->adapter = adapter;
1815                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1816                 rxo->rx_eq.enable_aic = true;
1817
1818                 /* EQ */
1819                 eq = &rxo->rx_eq.q;
1820                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1821                                         sizeof(struct be_eq_entry));
1822                 if (rc)
1823                         goto err;
1824
1825                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1826                 if (rc)
1827                         goto err;
1828
1829                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1830
1831                 /* CQ */
1832                 cq = &rxo->cq;
1833                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1834                                 sizeof(struct be_eth_rx_compl));
1835                 if (rc)
1836                         goto err;
1837
1838                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1839                 if (rc)
1840                         goto err;
1841
1842                 /* Rx Q - will be created in be_open() */
1843                 q = &rxo->q;
1844                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1845                                 sizeof(struct be_eth_rx_d));
1846                 if (rc)
1847                         goto err;
1848
1849         }
1850
1851         return 0;
1852 err:
1853         be_rx_queues_destroy(adapter);
1854         return -1;
1855 }
1856
1857 static bool event_peek(struct be_eq_obj *eq_obj)
1858 {
1859         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1860         if (!eqe->evt)
1861                 return false;
1862         else
1863                 return true;
1864 }
1865
1866 static irqreturn_t be_intx(int irq, void *dev)
1867 {
1868         struct be_adapter *adapter = dev;
1869         struct be_rx_obj *rxo;
1870         int isr, i, tx = 0 , rx = 0;
1871
1872         if (lancer_chip(adapter)) {
1873                 if (event_peek(&adapter->tx_eq))
1874                         tx = event_handle(adapter, &adapter->tx_eq, false);
1875                 for_all_rx_queues(adapter, rxo, i) {
1876                         if (event_peek(&rxo->rx_eq))
1877                                 rx |= event_handle(adapter, &rxo->rx_eq, true);
1878                 }
1879
1880                 if (!(tx || rx))
1881                         return IRQ_NONE;
1882
1883         } else {
1884                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1885                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1886                 if (!isr)
1887                         return IRQ_NONE;
1888
1889                 if ((1 << adapter->tx_eq.eq_idx & isr))
1890                         event_handle(adapter, &adapter->tx_eq, false);
1891
1892                 for_all_rx_queues(adapter, rxo, i) {
1893                         if ((1 << rxo->rx_eq.eq_idx & isr))
1894                                 event_handle(adapter, &rxo->rx_eq, true);
1895                 }
1896         }
1897
1898         return IRQ_HANDLED;
1899 }
1900
1901 static irqreturn_t be_msix_rx(int irq, void *dev)
1902 {
1903         struct be_rx_obj *rxo = dev;
1904         struct be_adapter *adapter = rxo->adapter;
1905
1906         event_handle(adapter, &rxo->rx_eq, true);
1907
1908         return IRQ_HANDLED;
1909 }
1910
1911 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1912 {
1913         struct be_adapter *adapter = dev;
1914
1915         event_handle(adapter, &adapter->tx_eq, false);
1916
1917         return IRQ_HANDLED;
1918 }
1919
1920 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1921 {
1922         return (rxcp->tcpf && !rxcp->err) ? true : false;
1923 }
1924
1925 static int be_poll_rx(struct napi_struct *napi, int budget)
1926 {
1927         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1928         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1929         struct be_adapter *adapter = rxo->adapter;
1930         struct be_queue_info *rx_cq = &rxo->cq;
1931         struct be_rx_compl_info *rxcp;
1932         u32 work_done;
1933
1934         rx_stats(rxo)->rx_polls++;
1935         for (work_done = 0; work_done < budget; work_done++) {
1936                 rxcp = be_rx_compl_get(rxo);
1937                 if (!rxcp)
1938                         break;
1939
1940                 /* Is it a flush compl that has no data */
1941                 if (unlikely(rxcp->num_rcvd == 0))
1942                         goto loop_continue;
1943
1944                 /* Discard compl with partial DMA Lancer B0 */
1945                 if (unlikely(!rxcp->pkt_size)) {
1946                         be_rx_compl_discard(adapter, rxo, rxcp);
1947                         goto loop_continue;
1948                 }
1949
1950                 /* On BE drop pkts that arrive due to imperfect filtering in
1951                  * promiscuous mode on some skews
1952                  */
1953                 if (unlikely(rxcp->port != adapter->port_num &&
1954                                 !lancer_chip(adapter))) {
1955                         be_rx_compl_discard(adapter, rxo, rxcp);
1956                         goto loop_continue;
1957                 }
1958
1959                 if (do_gro(rxcp))
1960                         be_rx_compl_process_gro(adapter, rxo, rxcp);
1961                 else
1962                         be_rx_compl_process(adapter, rxo, rxcp);
1963 loop_continue:
1964                 be_rx_stats_update(rxo, rxcp);
1965         }
1966
1967         be_cq_notify(adapter, rx_cq->id, false, work_done);
1968
1969         /* Refill the queue */
1970         if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1971                 be_post_rx_frags(rxo, GFP_ATOMIC);
1972
1973         /* All consumed */
1974         if (work_done < budget) {
1975                 napi_complete(napi);
1976                 /* Arm CQ */
1977                 be_cq_notify(adapter, rx_cq->id, true, 0);
1978         }
1979         return work_done;
1980 }
1981
1982 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1983  * For TX/MCC we don't honour budget; consume everything
1984  */
1985 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1986 {
1987         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1988         struct be_adapter *adapter =
1989                 container_of(tx_eq, struct be_adapter, tx_eq);
1990         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1991         struct be_tx_obj *txo;
1992         struct be_eth_tx_compl *txcp;
1993         int tx_compl, mcc_compl, status = 0;
1994         u8 i;
1995         u16 num_wrbs;
1996
1997         for_all_tx_queues(adapter, txo, i) {
1998                 tx_compl = 0;
1999                 num_wrbs = 0;
2000                 while ((txcp = be_tx_compl_get(&txo->cq))) {
2001                         num_wrbs += be_tx_compl_process(adapter, txo,
2002                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2003                                         wrb_index, txcp));
2004                         tx_compl++;
2005                 }
2006                 if (tx_compl) {
2007                         be_cq_notify(adapter, txo->cq.id, true, tx_compl);
2008
2009                         atomic_sub(num_wrbs, &txo->q.used);
2010
2011                         /* As Tx wrbs have been freed up, wake up netdev queue
2012                          * if it was stopped due to lack of tx wrbs.  */
2013                         if (__netif_subqueue_stopped(adapter->netdev, i) &&
2014                                 atomic_read(&txo->q.used) < txo->q.len / 2) {
2015                                 netif_wake_subqueue(adapter->netdev, i);
2016                         }
2017
2018                         u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2019                         tx_stats(txo)->tx_compl += tx_compl;
2020                         u64_stats_update_end(&tx_stats(txo)->sync_compl);
2021                 }
2022         }
2023
2024         mcc_compl = be_process_mcc(adapter, &status);
2025
2026         if (mcc_compl) {
2027                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2028         }
2029
2030         napi_complete(napi);
2031
2032         /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
2033         if (lancer_chip(adapter) && !msix_enabled(adapter)) {
2034                 for_all_tx_queues(adapter, txo, i)
2035                         be_cq_notify(adapter, txo->cq.id, true, 0);
2036
2037                 be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
2038         }
2039
2040         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2041         adapter->drv_stats.tx_events++;
2042         return 1;
2043 }
2044
2045 void be_detect_dump_ue(struct be_adapter *adapter)
2046 {
2047         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2048         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2049         u32 i;
2050
2051         if (adapter->eeh_err || adapter->ue_detected)
2052                 return;
2053
2054         if (lancer_chip(adapter)) {
2055                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2056                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2057                         sliport_err1 = ioread32(adapter->db +
2058                                         SLIPORT_ERROR1_OFFSET);
2059                         sliport_err2 = ioread32(adapter->db +
2060                                         SLIPORT_ERROR2_OFFSET);
2061                 }
2062         } else {
2063                 pci_read_config_dword(adapter->pdev,
2064                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2065                 pci_read_config_dword(adapter->pdev,
2066                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2067                 pci_read_config_dword(adapter->pdev,
2068                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2069                 pci_read_config_dword(adapter->pdev,
2070                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2071
2072                 ue_lo = (ue_lo & (~ue_lo_mask));
2073                 ue_hi = (ue_hi & (~ue_hi_mask));
2074         }
2075
2076         if (ue_lo || ue_hi ||
2077                 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2078                 adapter->ue_detected = true;
2079                 adapter->eeh_err = true;
2080                 dev_err(&adapter->pdev->dev,
2081                         "Unrecoverable error in the card\n");
2082         }
2083
2084         if (ue_lo) {
2085                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2086                         if (ue_lo & 1)
2087                                 dev_err(&adapter->pdev->dev,
2088                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2089                 }
2090         }
2091         if (ue_hi) {
2092                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2093                         if (ue_hi & 1)
2094                                 dev_err(&adapter->pdev->dev,
2095                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2096                 }
2097         }
2098
2099         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2100                 dev_err(&adapter->pdev->dev,
2101                         "sliport status 0x%x\n", sliport_status);
2102                 dev_err(&adapter->pdev->dev,
2103                         "sliport error1 0x%x\n", sliport_err1);
2104                 dev_err(&adapter->pdev->dev,
2105                         "sliport error2 0x%x\n", sliport_err2);
2106         }
2107 }
2108
2109 static void be_msix_disable(struct be_adapter *adapter)
2110 {
2111         if (msix_enabled(adapter)) {
2112                 pci_disable_msix(adapter->pdev);
2113                 adapter->num_msix_vec = 0;
2114         }
2115 }
2116
2117 static void be_msix_enable(struct be_adapter *adapter)
2118 {
2119 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2120         int i, status, num_vec;
2121
2122         num_vec = be_num_rxqs_want(adapter) + 1;
2123
2124         for (i = 0; i < num_vec; i++)
2125                 adapter->msix_entries[i].entry = i;
2126
2127         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2128         if (status == 0) {
2129                 goto done;
2130         } else if (status >= BE_MIN_MSIX_VECTORS) {
2131                 num_vec = status;
2132                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2133                                 num_vec) == 0)
2134                         goto done;
2135         }
2136         return;
2137 done:
2138         adapter->num_msix_vec = num_vec;
2139         return;
2140 }
2141
2142 static int be_sriov_enable(struct be_adapter *adapter)
2143 {
2144         be_check_sriov_fn_type(adapter);
2145
2146 #ifdef CONFIG_PCI_IOV
2147         if (be_physfn(adapter) && num_vfs) {
2148                 int status, pos;
2149                 u16 dev_vfs;
2150
2151                 pos = pci_find_ext_capability(adapter->pdev,
2152                                                 PCI_EXT_CAP_ID_SRIOV);
2153                 pci_read_config_word(adapter->pdev,
2154                                      pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
2155
2156                 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2157                 if (adapter->num_vfs != num_vfs)
2158                         dev_info(&adapter->pdev->dev,
2159                                  "Device supports %d VFs and not %d\n",
2160                                  adapter->num_vfs, num_vfs);
2161
2162                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2163                 if (status)
2164                         adapter->num_vfs = 0;
2165
2166                 if (adapter->num_vfs) {
2167                         adapter->vf_cfg = kcalloc(num_vfs,
2168                                                 sizeof(struct be_vf_cfg),
2169                                                 GFP_KERNEL);
2170                         if (!adapter->vf_cfg)
2171                                 return -ENOMEM;
2172                 }
2173         }
2174 #endif
2175         return 0;
2176 }
2177
2178 static void be_sriov_disable(struct be_adapter *adapter)
2179 {
2180 #ifdef CONFIG_PCI_IOV
2181         if (sriov_enabled(adapter)) {
2182                 pci_disable_sriov(adapter->pdev);
2183                 kfree(adapter->vf_cfg);
2184                 adapter->num_vfs = 0;
2185         }
2186 #endif
2187 }
2188
2189 static inline int be_msix_vec_get(struct be_adapter *adapter,
2190                                         struct be_eq_obj *eq_obj)
2191 {
2192         return adapter->msix_entries[eq_obj->eq_idx].vector;
2193 }
2194
2195 static int be_request_irq(struct be_adapter *adapter,
2196                 struct be_eq_obj *eq_obj,
2197                 void *handler, char *desc, void *context)
2198 {
2199         struct net_device *netdev = adapter->netdev;
2200         int vec;
2201
2202         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2203         vec = be_msix_vec_get(adapter, eq_obj);
2204         return request_irq(vec, handler, 0, eq_obj->desc, context);
2205 }
2206
2207 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2208                         void *context)
2209 {
2210         int vec = be_msix_vec_get(adapter, eq_obj);
2211         free_irq(vec, context);
2212 }
2213
2214 static int be_msix_register(struct be_adapter *adapter)
2215 {
2216         struct be_rx_obj *rxo;
2217         int status, i;
2218         char qname[10];
2219
2220         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2221                                 adapter);
2222         if (status)
2223                 goto err;
2224
2225         for_all_rx_queues(adapter, rxo, i) {
2226                 sprintf(qname, "rxq%d", i);
2227                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2228                                 qname, rxo);
2229                 if (status)
2230                         goto err_msix;
2231         }
2232
2233         return 0;
2234
2235 err_msix:
2236         be_free_irq(adapter, &adapter->tx_eq, adapter);
2237
2238         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2239                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2240
2241 err:
2242         dev_warn(&adapter->pdev->dev,
2243                 "MSIX Request IRQ failed - err %d\n", status);
2244         be_msix_disable(adapter);
2245         return status;
2246 }
2247
2248 static int be_irq_register(struct be_adapter *adapter)
2249 {
2250         struct net_device *netdev = adapter->netdev;
2251         int status;
2252
2253         if (msix_enabled(adapter)) {
2254                 status = be_msix_register(adapter);
2255                 if (status == 0)
2256                         goto done;
2257                 /* INTx is not supported for VF */
2258                 if (!be_physfn(adapter))
2259                         return status;
2260         }
2261
2262         /* INTx */
2263         netdev->irq = adapter->pdev->irq;
2264         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2265                         adapter);
2266         if (status) {
2267                 dev_err(&adapter->pdev->dev,
2268                         "INTx request IRQ failed - err %d\n", status);
2269                 return status;
2270         }
2271 done:
2272         adapter->isr_registered = true;
2273         return 0;
2274 }
2275
2276 static void be_irq_unregister(struct be_adapter *adapter)
2277 {
2278         struct net_device *netdev = adapter->netdev;
2279         struct be_rx_obj *rxo;
2280         int i;
2281
2282         if (!adapter->isr_registered)
2283                 return;
2284
2285         /* INTx */
2286         if (!msix_enabled(adapter)) {
2287                 free_irq(netdev->irq, adapter);
2288                 goto done;
2289         }
2290
2291         /* MSIx */
2292         be_free_irq(adapter, &adapter->tx_eq, adapter);
2293
2294         for_all_rx_queues(adapter, rxo, i)
2295                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2296
2297 done:
2298         adapter->isr_registered = false;
2299 }
2300
2301 static void be_rx_queues_clear(struct be_adapter *adapter)
2302 {
2303         struct be_queue_info *q;
2304         struct be_rx_obj *rxo;
2305         int i;
2306
2307         for_all_rx_queues(adapter, rxo, i) {
2308                 q = &rxo->q;
2309                 if (q->created) {
2310                         be_cmd_rxq_destroy(adapter, q);
2311                         /* After the rxq is invalidated, wait for a grace time
2312                          * of 1ms for all dma to end and the flush compl to
2313                          * arrive
2314                          */
2315                         mdelay(1);
2316                         be_rx_q_clean(adapter, rxo);
2317                 }
2318
2319                 /* Clear any residual events */
2320                 q = &rxo->rx_eq.q;
2321                 if (q->created)
2322                         be_eq_clean(adapter, &rxo->rx_eq);
2323         }
2324 }
2325
2326 static int be_close(struct net_device *netdev)
2327 {
2328         struct be_adapter *adapter = netdev_priv(netdev);
2329         struct be_rx_obj *rxo;
2330         struct be_tx_obj *txo;
2331         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2332         int vec, i;
2333
2334         be_async_mcc_disable(adapter);
2335
2336         if (!lancer_chip(adapter))
2337                 be_intr_set(adapter, false);
2338
2339         for_all_rx_queues(adapter, rxo, i)
2340                 napi_disable(&rxo->rx_eq.napi);
2341
2342         napi_disable(&tx_eq->napi);
2343
2344         if (lancer_chip(adapter)) {
2345                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2346                 for_all_rx_queues(adapter, rxo, i)
2347                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2348                 for_all_tx_queues(adapter, txo, i)
2349                          be_cq_notify(adapter, txo->cq.id, false, 0);
2350         }
2351
2352         if (msix_enabled(adapter)) {
2353                 vec = be_msix_vec_get(adapter, tx_eq);
2354                 synchronize_irq(vec);
2355
2356                 for_all_rx_queues(adapter, rxo, i) {
2357                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2358                         synchronize_irq(vec);
2359                 }
2360         } else {
2361                 synchronize_irq(netdev->irq);
2362         }
2363         be_irq_unregister(adapter);
2364
2365         /* Wait for all pending tx completions to arrive so that
2366          * all tx skbs are freed.
2367          */
2368         for_all_tx_queues(adapter, txo, i)
2369                 be_tx_compl_clean(adapter, txo);
2370
2371         be_rx_queues_clear(adapter);
2372         return 0;
2373 }
2374
2375 static int be_rx_queues_setup(struct be_adapter *adapter)
2376 {
2377         struct be_rx_obj *rxo;
2378         int rc, i, j;
2379         u8 rsstable[128];
2380
2381         for_all_rx_queues(adapter, rxo, i) {
2382                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2383                         rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2384                         adapter->if_handle,
2385                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2386                 if (rc)
2387                         return rc;
2388         }
2389
2390         if (be_multi_rxq(adapter)) {
2391                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2392                         for_all_rss_queues(adapter, rxo, i) {
2393                                 if ((j + i) >= 128)
2394                                         break;
2395                                 rsstable[j + i] = rxo->rss_id;
2396                         }
2397                 }
2398                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2399
2400                 if (rc)
2401                         return rc;
2402         }
2403
2404         /* First time posting */
2405         for_all_rx_queues(adapter, rxo, i) {
2406                 be_post_rx_frags(rxo, GFP_KERNEL);
2407                 napi_enable(&rxo->rx_eq.napi);
2408         }
2409         return 0;
2410 }
2411
2412 static int be_open(struct net_device *netdev)
2413 {
2414         struct be_adapter *adapter = netdev_priv(netdev);
2415         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2416         struct be_rx_obj *rxo;
2417         u8 link_status;
2418         int status, i;
2419
2420         status = be_rx_queues_setup(adapter);
2421         if (status)
2422                 goto err;
2423
2424         napi_enable(&tx_eq->napi);
2425
2426         be_irq_register(adapter);
2427
2428         if (!lancer_chip(adapter))
2429                 be_intr_set(adapter, true);
2430
2431         /* The evt queues are created in unarmed state; arm them */
2432         for_all_rx_queues(adapter, rxo, i) {
2433                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2434                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2435         }
2436         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2437
2438         /* Now that interrupts are on we can process async mcc */
2439         be_async_mcc_enable(adapter);
2440
2441         status = be_cmd_link_status_query(adapter, NULL, NULL,
2442                                           &link_status, 0);
2443         if (!status)
2444                 be_link_status_update(adapter, link_status);
2445
2446         return 0;
2447 err:
2448         be_close(adapter->netdev);
2449         return -EIO;
2450 }
2451
2452 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2453 {
2454         struct be_dma_mem cmd;
2455         int status = 0;
2456         u8 mac[ETH_ALEN];
2457
2458         memset(mac, 0, ETH_ALEN);
2459
2460         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2461         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2462                                     GFP_KERNEL);
2463         if (cmd.va == NULL)
2464                 return -1;
2465         memset(cmd.va, 0, cmd.size);
2466
2467         if (enable) {
2468                 status = pci_write_config_dword(adapter->pdev,
2469                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2470                 if (status) {
2471                         dev_err(&adapter->pdev->dev,
2472                                 "Could not enable Wake-on-lan\n");
2473                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2474                                           cmd.dma);
2475                         return status;
2476                 }
2477                 status = be_cmd_enable_magic_wol(adapter,
2478                                 adapter->netdev->dev_addr, &cmd);
2479                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2480                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2481         } else {
2482                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2483                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2484                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2485         }
2486
2487         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2488         return status;
2489 }
2490
2491 /*
2492  * Generate a seed MAC address from the PF MAC Address using jhash.
2493  * MAC Address for VFs are assigned incrementally starting from the seed.
2494  * These addresses are programmed in the ASIC by the PF and the VF driver
2495  * queries for the MAC address during its probe.
2496  */
2497 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2498 {
2499         u32 vf;
2500         int status = 0;
2501         u8 mac[ETH_ALEN];
2502         struct be_vf_cfg *vf_cfg;
2503
2504         be_vf_eth_addr_generate(adapter, mac);
2505
2506         for_all_vfs(adapter, vf_cfg, vf) {
2507                 if (lancer_chip(adapter)) {
2508                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2509                 } else {
2510                         status = be_cmd_pmac_add(adapter, mac,
2511                                                  vf_cfg->if_handle,
2512                                                  &vf_cfg->pmac_id, vf + 1);
2513                 }
2514
2515                 if (status)
2516                         dev_err(&adapter->pdev->dev,
2517                         "Mac address assignment failed for VF %d\n", vf);
2518                 else
2519                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2520
2521                 mac[5] += 1;
2522         }
2523         return status;
2524 }
2525
2526 static void be_vf_clear(struct be_adapter *adapter)
2527 {
2528         struct be_vf_cfg *vf_cfg;
2529         u32 vf;
2530
2531         for_all_vfs(adapter, vf_cfg, vf) {
2532                 if (lancer_chip(adapter))
2533                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2534                 else
2535                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2536                                         vf_cfg->pmac_id, vf + 1);
2537
2538                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2539         }
2540 }
2541
2542 static int be_clear(struct be_adapter *adapter)
2543 {
2544         if (sriov_enabled(adapter))
2545                 be_vf_clear(adapter);
2546
2547         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2548
2549         be_mcc_queues_destroy(adapter);
2550         be_rx_queues_destroy(adapter);
2551         be_tx_queues_destroy(adapter);
2552
2553         /* tell fw we're done with firing cmds */
2554         be_cmd_fw_clean(adapter);
2555         return 0;
2556 }
2557
2558 static void be_vf_setup_init(struct be_adapter *adapter)
2559 {
2560         struct be_vf_cfg *vf_cfg;
2561         int vf;
2562
2563         for_all_vfs(adapter, vf_cfg, vf) {
2564                 vf_cfg->if_handle = -1;
2565                 vf_cfg->pmac_id = -1;
2566         }
2567 }
2568
2569 static int be_vf_setup(struct be_adapter *adapter)
2570 {
2571         struct be_vf_cfg *vf_cfg;
2572         u32 cap_flags, en_flags, vf;
2573         u16 lnk_speed;
2574         int status;
2575
2576         be_vf_setup_init(adapter);
2577
2578         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2579                                 BE_IF_FLAGS_MULTICAST;
2580         for_all_vfs(adapter, vf_cfg, vf) {
2581                 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2582                                           &vf_cfg->if_handle, NULL, vf + 1);
2583                 if (status)
2584                         goto err;
2585         }
2586
2587         status = be_vf_eth_addr_config(adapter);
2588         if (status)
2589                 goto err;
2590
2591         for_all_vfs(adapter, vf_cfg, vf) {
2592                 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2593                                                   NULL, vf + 1);
2594                 if (status)
2595                         goto err;
2596                 vf_cfg->tx_rate = lnk_speed * 10;
2597         }
2598         return 0;
2599 err:
2600         return status;
2601 }
2602
2603 static void be_setup_init(struct be_adapter *adapter)
2604 {
2605         adapter->vlan_prio_bmap = 0xff;
2606         adapter->link_speed = -1;
2607         adapter->if_handle = -1;
2608         adapter->be3_native = false;
2609         adapter->promiscuous = false;
2610         adapter->eq_next_idx = 0;
2611 }
2612
2613 static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2614 {
2615         u32 pmac_id;
2616         int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2617         if (status != 0)
2618                 goto do_none;
2619         status = be_cmd_mac_addr_query(adapter, mac,
2620                         MAC_ADDRESS_TYPE_NETWORK,
2621                         false, adapter->if_handle, pmac_id);
2622         if (status != 0)
2623                 goto do_none;
2624         status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2625                         &adapter->pmac_id, 0);
2626 do_none:
2627         return status;
2628 }
2629
2630 static int be_setup(struct be_adapter *adapter)
2631 {
2632         struct net_device *netdev = adapter->netdev;
2633         u32 cap_flags, en_flags;
2634         u32 tx_fc, rx_fc;
2635         int status, i;
2636         u8 mac[ETH_ALEN];
2637         struct be_tx_obj *txo;
2638
2639         be_setup_init(adapter);
2640
2641         be_cmd_req_native_mode(adapter);
2642
2643         status = be_tx_queues_create(adapter);
2644         if (status != 0)
2645                 goto err;
2646
2647         status = be_rx_queues_create(adapter);
2648         if (status != 0)
2649                 goto err;
2650
2651         status = be_mcc_queues_create(adapter);
2652         if (status != 0)
2653                 goto err;
2654
2655         memset(mac, 0, ETH_ALEN);
2656         status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2657                         true /*permanent */, 0, 0);
2658         if (status)
2659                 return status;
2660         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2661         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2662
2663         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2664                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2665         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2666                         BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2667
2668         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2669                 cap_flags |= BE_IF_FLAGS_RSS;
2670                 en_flags |= BE_IF_FLAGS_RSS;
2671         }
2672         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2673                         netdev->dev_addr, &adapter->if_handle,
2674                         &adapter->pmac_id, 0);
2675         if (status != 0)
2676                 goto err;
2677
2678          for_all_tx_queues(adapter, txo, i) {
2679                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2680                 if (status)
2681                         goto err;
2682         }
2683
2684          /* The VF's permanent mac queried from card is incorrect.
2685           * For BEx: Query the mac configued by the PF using if_handle
2686           * For Lancer: Get and use mac_list to obtain mac address.
2687           */
2688         if (!be_physfn(adapter)) {
2689                 if (lancer_chip(adapter))
2690                         status = be_configure_mac_from_list(adapter, mac);
2691                 else
2692                         status = be_cmd_mac_addr_query(adapter, mac,
2693                                         MAC_ADDRESS_TYPE_NETWORK, false,
2694                                         adapter->if_handle, 0);
2695                 if (!status) {
2696                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2697                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2698                 }
2699         }
2700
2701         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2702
2703         status = be_vid_config(adapter, false, 0);
2704         if (status)
2705                 goto err;
2706
2707         be_set_rx_mode(adapter->netdev);
2708
2709         status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2710         /* For Lancer: It is legal for this cmd to fail on VF */
2711         if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2712                 goto err;
2713
2714         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2715                 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2716                                         adapter->rx_fc);
2717                 /* For Lancer: It is legal for this cmd to fail on VF */
2718                 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2719                         goto err;
2720         }
2721
2722         pcie_set_readrq(adapter->pdev, 4096);
2723
2724         if (sriov_enabled(adapter)) {
2725                 status = be_vf_setup(adapter);
2726                 if (status)
2727                         goto err;
2728         }
2729
2730         return 0;
2731 err:
2732         be_clear(adapter);
2733         return status;
2734 }
2735
2736 #ifdef CONFIG_NET_POLL_CONTROLLER
2737 static void be_netpoll(struct net_device *netdev)
2738 {
2739         struct be_adapter *adapter = netdev_priv(netdev);
2740         struct be_rx_obj *rxo;
2741         int i;
2742
2743         event_handle(adapter, &adapter->tx_eq, false);
2744         for_all_rx_queues(adapter, rxo, i)
2745                 event_handle(adapter, &rxo->rx_eq, true);
2746 }
2747 #endif
2748
2749 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2750 static bool be_flash_redboot(struct be_adapter *adapter,
2751                         const u8 *p, u32 img_start, int image_size,
2752                         int hdr_size)
2753 {
2754         u32 crc_offset;
2755         u8 flashed_crc[4];
2756         int status;
2757
2758         crc_offset = hdr_size + img_start + image_size - 4;
2759
2760         p += crc_offset;
2761
2762         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2763                         (image_size - 4));
2764         if (status) {
2765                 dev_err(&adapter->pdev->dev,
2766                 "could not get crc from flash, not flashing redboot\n");
2767                 return false;
2768         }
2769
2770         /*update redboot only if crc does not match*/
2771         if (!memcmp(flashed_crc, p, 4))
2772                 return false;
2773         else
2774                 return true;
2775 }
2776
2777 static bool phy_flashing_required(struct be_adapter *adapter)
2778 {
2779         int status = 0;
2780         struct be_phy_info phy_info;
2781
2782         status = be_cmd_get_phy_info(adapter, &phy_info);
2783         if (status)
2784                 return false;
2785         if ((phy_info.phy_type == TN_8022) &&
2786                 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2787                 return true;
2788         }
2789         return false;
2790 }
2791
2792 static int be_flash_data(struct be_adapter *adapter,
2793                         const struct firmware *fw,
2794                         struct be_dma_mem *flash_cmd, int num_of_images)
2795
2796 {
2797         int status = 0, i, filehdr_size = 0;
2798         u32 total_bytes = 0, flash_op;
2799         int num_bytes;
2800         const u8 *p = fw->data;
2801         struct be_cmd_write_flashrom *req = flash_cmd->va;
2802         const struct flash_comp *pflashcomp;
2803         int num_comp;
2804
2805         static const struct flash_comp gen3_flash_types[10] = {
2806                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2807                         FLASH_IMAGE_MAX_SIZE_g3},
2808                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2809                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2810                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2811                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2812                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2813                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2814                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2815                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2816                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2817                         FLASH_IMAGE_MAX_SIZE_g3},
2818                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2819                         FLASH_IMAGE_MAX_SIZE_g3},
2820                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2821                         FLASH_IMAGE_MAX_SIZE_g3},
2822                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2823                         FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2824                 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2825                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2826         };
2827         static const struct flash_comp gen2_flash_types[8] = {
2828                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2829                         FLASH_IMAGE_MAX_SIZE_g2},
2830                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2831                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2832                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2833                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2834                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2835                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2836                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2837                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2838                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2839                         FLASH_IMAGE_MAX_SIZE_g2},
2840                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2841                         FLASH_IMAGE_MAX_SIZE_g2},
2842                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2843                          FLASH_IMAGE_MAX_SIZE_g2}
2844         };
2845
2846         if (adapter->generation == BE_GEN3) {
2847                 pflashcomp = gen3_flash_types;
2848                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2849                 num_comp = ARRAY_SIZE(gen3_flash_types);
2850         } else {
2851                 pflashcomp = gen2_flash_types;
2852                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2853                 num_comp = ARRAY_SIZE(gen2_flash_types);
2854         }
2855         for (i = 0; i < num_comp; i++) {
2856                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2857                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2858                         continue;
2859                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2860                         if (!phy_flashing_required(adapter))
2861                                 continue;
2862                 }
2863                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2864                         (!be_flash_redboot(adapter, fw->data,
2865                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2866                         (num_of_images * sizeof(struct image_hdr)))))
2867                         continue;
2868                 p = fw->data;
2869                 p += filehdr_size + pflashcomp[i].offset
2870                         + (num_of_images * sizeof(struct image_hdr));
2871                 if (p + pflashcomp[i].size > fw->data + fw->size)
2872                         return -1;
2873                 total_bytes = pflashcomp[i].size;
2874                 while (total_bytes) {
2875                         if (total_bytes > 32*1024)
2876                                 num_bytes = 32*1024;
2877                         else
2878                                 num_bytes = total_bytes;
2879                         total_bytes -= num_bytes;
2880                         if (!total_bytes) {
2881                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2882                                         flash_op = FLASHROM_OPER_PHY_FLASH;
2883                                 else
2884                                         flash_op = FLASHROM_OPER_FLASH;
2885                         } else {
2886                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2887                                         flash_op = FLASHROM_OPER_PHY_SAVE;
2888                                 else
2889                                         flash_op = FLASHROM_OPER_SAVE;
2890                         }
2891                         memcpy(req->params.data_buf, p, num_bytes);
2892                         p += num_bytes;
2893                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2894                                 pflashcomp[i].optype, flash_op, num_bytes);
2895                         if (status) {
2896                                 if ((status == ILLEGAL_IOCTL_REQ) &&
2897                                         (pflashcomp[i].optype ==
2898                                                 IMG_TYPE_PHY_FW))
2899                                         break;
2900                                 dev_err(&adapter->pdev->dev,
2901                                         "cmd to write to flash rom failed.\n");
2902                                 return -1;
2903                         }
2904                 }
2905         }
2906         return 0;
2907 }
2908
2909 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2910 {
2911         if (fhdr == NULL)
2912                 return 0;
2913         if (fhdr->build[0] == '3')
2914                 return BE_GEN3;
2915         else if (fhdr->build[0] == '2')
2916                 return BE_GEN2;
2917         else
2918                 return 0;
2919 }
2920
2921 static int lancer_fw_download(struct be_adapter *adapter,
2922                                 const struct firmware *fw)
2923 {
2924 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2925 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2926         struct be_dma_mem flash_cmd;
2927         const u8 *data_ptr = NULL;
2928         u8 *dest_image_ptr = NULL;
2929         size_t image_size = 0;
2930         u32 chunk_size = 0;
2931         u32 data_written = 0;
2932         u32 offset = 0;
2933         int status = 0;
2934         u8 add_status = 0;
2935
2936         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2937                 dev_err(&adapter->pdev->dev,
2938                         "FW Image not properly aligned. "
2939                         "Length must be 4 byte aligned.\n");
2940                 status = -EINVAL;
2941                 goto lancer_fw_exit;
2942         }
2943
2944         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2945                                 + LANCER_FW_DOWNLOAD_CHUNK;
2946         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2947                                                 &flash_cmd.dma, GFP_KERNEL);
2948         if (!flash_cmd.va) {
2949                 status = -ENOMEM;
2950                 dev_err(&adapter->pdev->dev,
2951                         "Memory allocation failure while flashing\n");
2952                 goto lancer_fw_exit;
2953         }
2954
2955         dest_image_ptr = flash_cmd.va +
2956                                 sizeof(struct lancer_cmd_req_write_object);
2957         image_size = fw->size;
2958         data_ptr = fw->data;
2959
2960         while (image_size) {
2961                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2962
2963                 /* Copy the image chunk content. */
2964                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2965
2966                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2967                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2968                                 &data_written, &add_status);
2969
2970                 if (status)
2971                         break;
2972
2973                 offset += data_written;
2974                 data_ptr += data_written;
2975                 image_size -= data_written;
2976         }
2977
2978         if (!status) {
2979                 /* Commit the FW written */
2980                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2981                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2982                                         &data_written, &add_status);
2983         }
2984
2985         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2986                                 flash_cmd.dma);
2987         if (status) {
2988                 dev_err(&adapter->pdev->dev,
2989                         "Firmware load error. "
2990                         "Status code: 0x%x Additional Status: 0x%x\n",
2991                         status, add_status);
2992                 goto lancer_fw_exit;
2993         }
2994
2995         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2996 lancer_fw_exit:
2997         return status;
2998 }
2999
3000 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3001 {
3002         struct flash_file_hdr_g2 *fhdr;
3003         struct flash_file_hdr_g3 *fhdr3;
3004         struct image_hdr *img_hdr_ptr = NULL;
3005         struct be_dma_mem flash_cmd;
3006         const u8 *p;
3007         int status = 0, i = 0, num_imgs = 0;
3008
3009         p = fw->data;
3010         fhdr = (struct flash_file_hdr_g2 *) p;
3011
3012         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
3013         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3014                                           &flash_cmd.dma, GFP_KERNEL);
3015         if (!flash_cmd.va) {
3016                 status = -ENOMEM;
3017                 dev_err(&adapter->pdev->dev,
3018                         "Memory allocation failure while flashing\n");
3019                 goto be_fw_exit;
3020         }
3021
3022         if ((adapter->generation == BE_GEN3) &&
3023                         (get_ufigen_type(fhdr) == BE_GEN3)) {
3024                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3025                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3026                 for (i = 0; i < num_imgs; i++) {
3027                         img_hdr_ptr = (struct image_hdr *) (fw->data +
3028                                         (sizeof(struct flash_file_hdr_g3) +
3029                                          i * sizeof(struct image_hdr)));
3030                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3031                                 status = be_flash_data(adapter, fw, &flash_cmd,
3032                                                         num_imgs);
3033                 }
3034         } else if ((adapter->generation == BE_GEN2) &&
3035                         (get_ufigen_type(fhdr) == BE_GEN2)) {
3036                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3037         } else {
3038                 dev_err(&adapter->pdev->dev,
3039                         "UFI and Interface are not compatible for flashing\n");
3040                 status = -1;
3041         }
3042
3043         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3044                           flash_cmd.dma);
3045         if (status) {
3046                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3047                 goto be_fw_exit;
3048         }
3049
3050         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3051
3052 be_fw_exit:
3053         return status;
3054 }
3055
3056 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3057 {
3058         const struct firmware *fw;
3059         int status;
3060
3061         if (!netif_running(adapter->netdev)) {
3062                 dev_err(&adapter->pdev->dev,
3063                         "Firmware load not allowed (interface is down)\n");
3064                 return -1;
3065         }
3066
3067         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3068         if (status)
3069                 goto fw_exit;
3070
3071         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3072
3073         if (lancer_chip(adapter))
3074                 status = lancer_fw_download(adapter, fw);
3075         else
3076                 status = be_fw_download(adapter, fw);
3077
3078 fw_exit:
3079         release_firmware(fw);
3080         return status;
3081 }
3082
3083 static const struct net_device_ops be_netdev_ops = {
3084         .ndo_open               = be_open,
3085         .ndo_stop               = be_close,
3086         .ndo_start_xmit         = be_xmit,
3087         .ndo_set_rx_mode        = be_set_rx_mode,
3088         .ndo_set_mac_address    = be_mac_addr_set,
3089         .ndo_change_mtu         = be_change_mtu,
3090         .ndo_get_stats64        = be_get_stats64,
3091         .ndo_validate_addr      = eth_validate_addr,
3092         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3093         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3094         .ndo_set_vf_mac         = be_set_vf_mac,
3095         .ndo_set_vf_vlan        = be_set_vf_vlan,
3096         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3097         .ndo_get_vf_config      = be_get_vf_config,
3098 #ifdef CONFIG_NET_POLL_CONTROLLER
3099         .ndo_poll_controller    = be_netpoll,
3100 #endif
3101 };
3102
3103 static void be_netdev_init(struct net_device *netdev)
3104 {
3105         struct be_adapter *adapter = netdev_priv(netdev);
3106         struct be_rx_obj *rxo;
3107         int i;
3108
3109         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3110                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3111                 NETIF_F_HW_VLAN_TX;
3112         if (be_multi_rxq(adapter))
3113                 netdev->hw_features |= NETIF_F_RXHASH;
3114
3115         netdev->features |= netdev->hw_features |
3116                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3117
3118         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3119                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3120
3121         netdev->flags |= IFF_MULTICAST;
3122
3123         netif_set_gso_max_size(netdev, 65535);
3124
3125         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3126
3127         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3128
3129         for_all_rx_queues(adapter, rxo, i)
3130                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3131                                 BE_NAPI_WEIGHT);
3132
3133         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
3134                 BE_NAPI_WEIGHT);
3135 }
3136
3137 static void be_unmap_pci_bars(struct be_adapter *adapter)
3138 {
3139         if (adapter->csr)
3140                 iounmap(adapter->csr);
3141         if (adapter->db)
3142                 iounmap(adapter->db);
3143 }
3144
3145 static int be_map_pci_bars(struct be_adapter *adapter)
3146 {
3147         u8 __iomem *addr;
3148         int db_reg;
3149
3150         if (lancer_chip(adapter)) {
3151                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3152                         pci_resource_len(adapter->pdev, 0));
3153                 if (addr == NULL)
3154                         return -ENOMEM;
3155                 adapter->db = addr;
3156                 return 0;
3157         }
3158
3159         if (be_physfn(adapter)) {
3160                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3161                                 pci_resource_len(adapter->pdev, 2));
3162                 if (addr == NULL)
3163                         return -ENOMEM;
3164                 adapter->csr = addr;
3165         }
3166
3167         if (adapter->generation == BE_GEN2) {
3168                 db_reg = 4;
3169         } else {
3170                 if (be_physfn(adapter))
3171                         db_reg = 4;
3172                 else
3173                         db_reg = 0;
3174         }
3175         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3176                                 pci_resource_len(adapter->pdev, db_reg));
3177         if (addr == NULL)
3178                 goto pci_map_err;
3179         adapter->db = addr;
3180
3181         return 0;
3182 pci_map_err:
3183         be_unmap_pci_bars(adapter);
3184         return -ENOMEM;
3185 }
3186
3187
3188 static void be_ctrl_cleanup(struct be_adapter *adapter)
3189 {
3190         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3191
3192         be_unmap_pci_bars(adapter);
3193
3194         if (mem->va)
3195                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3196                                   mem->dma);
3197
3198         mem = &adapter->rx_filter;
3199         if (mem->va)
3200                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3201                                   mem->dma);
3202 }
3203
3204 static int be_ctrl_init(struct be_adapter *adapter)
3205 {
3206         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3207         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3208         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3209         int status;
3210
3211         status = be_map_pci_bars(adapter);
3212         if (status)
3213                 goto done;
3214
3215         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3216         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3217                                                 mbox_mem_alloc->size,
3218                                                 &mbox_mem_alloc->dma,
3219                                                 GFP_KERNEL);
3220         if (!mbox_mem_alloc->va) {
3221                 status = -ENOMEM;
3222                 goto unmap_pci_bars;
3223         }
3224         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3225         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3226         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3227         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3228
3229         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3230         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3231                                         &rx_filter->dma, GFP_KERNEL);
3232         if (rx_filter->va == NULL) {
3233                 status = -ENOMEM;
3234                 goto free_mbox;
3235         }
3236         memset(rx_filter->va, 0, rx_filter->size);
3237
3238         mutex_init(&adapter->mbox_lock);
3239         spin_lock_init(&adapter->mcc_lock);
3240         spin_lock_init(&adapter->mcc_cq_lock);
3241
3242         init_completion(&adapter->flash_compl);
3243         pci_save_state(adapter->pdev);
3244         return 0;
3245
3246 free_mbox:
3247         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3248                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3249
3250 unmap_pci_bars:
3251         be_unmap_pci_bars(adapter);
3252
3253 done:
3254         return status;
3255 }
3256
3257 static void be_stats_cleanup(struct be_adapter *adapter)
3258 {
3259         struct be_dma_mem *cmd = &adapter->stats_cmd;
3260
3261         if (cmd->va)
3262                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3263                                   cmd->va, cmd->dma);
3264 }
3265
3266 static int be_stats_init(struct be_adapter *adapter)
3267 {
3268         struct be_dma_mem *cmd = &adapter->stats_cmd;
3269
3270         if (adapter->generation == BE_GEN2) {
3271                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3272         } else {
3273                 if (lancer_chip(adapter))
3274                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3275                 else
3276                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3277         }
3278         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3279                                      GFP_KERNEL);
3280         if (cmd->va == NULL)
3281                 return -1;
3282         memset(cmd->va, 0, cmd->size);
3283         return 0;
3284 }
3285
3286 static void __devexit be_remove(struct pci_dev *pdev)
3287 {
3288         struct be_adapter *adapter = pci_get_drvdata(pdev);
3289
3290         if (!adapter)
3291                 return;
3292
3293         cancel_delayed_work_sync(&adapter->work);
3294
3295         unregister_netdev(adapter->netdev);
3296
3297         be_clear(adapter);
3298
3299         be_stats_cleanup(adapter);
3300
3301         be_ctrl_cleanup(adapter);
3302
3303         be_sriov_disable(adapter);
3304
3305         be_msix_disable(adapter);
3306
3307         pci_set_drvdata(pdev, NULL);
3308         pci_release_regions(pdev);
3309         pci_disable_device(pdev);
3310
3311         free_netdev(adapter->netdev);
3312 }
3313
3314 static int be_get_config(struct be_adapter *adapter)
3315 {
3316         int status;
3317
3318         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3319                         &adapter->function_mode, &adapter->function_caps);
3320         if (status)
3321                 return status;
3322
3323         if (adapter->function_mode & FLEX10_MODE)
3324                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3325         else
3326                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3327
3328         status = be_cmd_get_cntl_attributes(adapter);
3329         if (status)
3330                 return status;
3331
3332         return 0;
3333 }
3334
3335 static int be_dev_family_check(struct be_adapter *adapter)
3336 {
3337         struct pci_dev *pdev = adapter->pdev;
3338         u32 sli_intf = 0, if_type;
3339
3340         switch (pdev->device) {
3341         case BE_DEVICE_ID1:
3342         case OC_DEVICE_ID1:
3343                 adapter->generation = BE_GEN2;
3344                 break;
3345         case BE_DEVICE_ID2:
3346         case OC_DEVICE_ID2:
3347         case OC_DEVICE_ID5:
3348                 adapter->generation = BE_GEN3;
3349                 break;
3350         case OC_DEVICE_ID3:
3351         case OC_DEVICE_ID4:
3352                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3353                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3354                                                 SLI_INTF_IF_TYPE_SHIFT;
3355
3356                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3357                         if_type != 0x02) {
3358                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3359                         return -EINVAL;
3360                 }
3361                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3362                                          SLI_INTF_FAMILY_SHIFT);
3363                 adapter->generation = BE_GEN3;
3364                 break;
3365         default:
3366                 adapter->generation = 0;
3367         }
3368         return 0;
3369 }
3370
3371 static int lancer_wait_ready(struct be_adapter *adapter)
3372 {
3373 #define SLIPORT_READY_TIMEOUT 30
3374         u32 sliport_status;
3375         int status = 0, i;
3376
3377         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3378                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3379                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3380                         break;
3381
3382                 msleep(1000);
3383         }
3384
3385         if (i == SLIPORT_READY_TIMEOUT)
3386                 status = -1;
3387
3388         return status;
3389 }
3390
3391 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3392 {
3393         int status;
3394         u32 sliport_status, err, reset_needed;
3395         status = lancer_wait_ready(adapter);
3396         if (!status) {
3397                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3398                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3399                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3400                 if (err && reset_needed) {
3401                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3402                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3403
3404                         /* check adapter has corrected the error */
3405                         status = lancer_wait_ready(adapter);
3406                         sliport_status = ioread32(adapter->db +
3407                                                         SLIPORT_STATUS_OFFSET);
3408                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3409                                                 SLIPORT_STATUS_RN_MASK);
3410                         if (status || sliport_status)
3411                                 status = -1;
3412                 } else if (err || reset_needed) {
3413                         status = -1;
3414                 }
3415         }
3416         return status;
3417 }
3418
3419 static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3420 {
3421         int status;
3422         u32 sliport_status;
3423
3424         if (adapter->eeh_err || adapter->ue_detected)
3425                 return;
3426
3427         sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3428
3429         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3430                 dev_err(&adapter->pdev->dev,
3431                                 "Adapter in error state."
3432                                 "Trying to recover.\n");
3433
3434                 status = lancer_test_and_set_rdy_state(adapter);
3435                 if (status)
3436                         goto err;
3437
3438                 netif_device_detach(adapter->netdev);
3439
3440                 if (netif_running(adapter->netdev))
3441                         be_close(adapter->netdev);
3442
3443                 be_clear(adapter);
3444
3445                 adapter->fw_timeout = false;
3446
3447                 status = be_setup(adapter);
3448                 if (status)
3449                         goto err;
3450
3451                 if (netif_running(adapter->netdev)) {
3452                         status = be_open(adapter->netdev);
3453                         if (status)
3454                                 goto err;
3455                 }
3456
3457                 netif_device_attach(adapter->netdev);
3458
3459                 dev_err(&adapter->pdev->dev,
3460                                 "Adapter error recovery succeeded\n");
3461         }
3462         return;
3463 err:
3464         dev_err(&adapter->pdev->dev,
3465                         "Adapter error recovery failed\n");
3466 }
3467
3468 static void be_worker(struct work_struct *work)
3469 {
3470         struct be_adapter *adapter =
3471                 container_of(work, struct be_adapter, work.work);
3472         struct be_rx_obj *rxo;
3473         int i;
3474
3475         if (lancer_chip(adapter))
3476                 lancer_test_and_recover_fn_err(adapter);
3477
3478         be_detect_dump_ue(adapter);
3479
3480         /* when interrupts are not yet enabled, just reap any pending
3481         * mcc completions */
3482         if (!netif_running(adapter->netdev)) {
3483                 int mcc_compl, status = 0;
3484
3485                 mcc_compl = be_process_mcc(adapter, &status);
3486
3487                 if (mcc_compl) {
3488                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3489                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3490                 }
3491
3492                 goto reschedule;
3493         }
3494
3495         if (!adapter->stats_cmd_sent) {
3496                 if (lancer_chip(adapter))
3497                         lancer_cmd_get_pport_stats(adapter,
3498                                                 &adapter->stats_cmd);
3499                 else
3500                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
3501         }
3502
3503         for_all_rx_queues(adapter, rxo, i) {
3504                 be_rx_eqd_update(adapter, rxo);
3505
3506                 if (rxo->rx_post_starved) {
3507                         rxo->rx_post_starved = false;
3508                         be_post_rx_frags(rxo, GFP_KERNEL);
3509                 }
3510         }
3511
3512 reschedule:
3513         adapter->work_counter++;
3514         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3515 }
3516
3517 static int __devinit be_probe(struct pci_dev *pdev,
3518                         const struct pci_device_id *pdev_id)
3519 {
3520         int status = 0;
3521         struct be_adapter *adapter;
3522         struct net_device *netdev;
3523
3524         status = pci_enable_device(pdev);
3525         if (status)
3526                 goto do_none;
3527
3528         status = pci_request_regions(pdev, DRV_NAME);
3529         if (status)
3530                 goto disable_dev;
3531         pci_set_master(pdev);
3532
3533         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3534         if (netdev == NULL) {
3535                 status = -ENOMEM;
3536                 goto rel_reg;
3537         }
3538         adapter = netdev_priv(netdev);
3539         adapter->pdev = pdev;
3540         pci_set_drvdata(pdev, adapter);
3541
3542         status = be_dev_family_check(adapter);
3543         if (status)
3544                 goto free_netdev;
3545
3546         adapter->netdev = netdev;
3547         SET_NETDEV_DEV(netdev, &pdev->dev);
3548
3549         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3550         if (!status) {
3551                 netdev->features |= NETIF_F_HIGHDMA;
3552         } else {
3553                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3554                 if (status) {
3555                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3556                         goto free_netdev;
3557                 }
3558         }
3559
3560         status = be_sriov_enable(adapter);
3561         if (status)
3562                 goto free_netdev;
3563
3564         status = be_ctrl_init(adapter);
3565         if (status)
3566                 goto disable_sriov;
3567
3568         if (lancer_chip(adapter)) {
3569                 status = lancer_wait_ready(adapter);
3570                 if (!status) {
3571                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3572                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3573                         status = lancer_test_and_set_rdy_state(adapter);
3574                 }
3575                 if (status) {
3576                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3577                         goto ctrl_clean;
3578                 }
3579         }
3580
3581         /* sync up with fw's ready state */
3582         if (be_physfn(adapter)) {
3583                 status = be_cmd_POST(adapter);
3584                 if (status)
3585                         goto ctrl_clean;
3586         }
3587
3588         /* tell fw we're ready to fire cmds */
3589         status = be_cmd_fw_init(adapter);
3590         if (status)
3591                 goto ctrl_clean;
3592
3593         status = be_cmd_reset_function(adapter);
3594         if (status)
3595                 goto ctrl_clean;
3596
3597         status = be_stats_init(adapter);
3598         if (status)
3599                 goto ctrl_clean;
3600
3601         status = be_get_config(adapter);
3602         if (status)
3603                 goto stats_clean;
3604
3605         /* The INTR bit may be set in the card when probed by a kdump kernel
3606          * after a crash.
3607          */
3608         if (!lancer_chip(adapter))
3609                 be_intr_set(adapter, false);
3610
3611         be_msix_enable(adapter);
3612
3613         INIT_DELAYED_WORK(&adapter->work, be_worker);
3614         adapter->rx_fc = adapter->tx_fc = true;
3615
3616         status = be_setup(adapter);
3617         if (status)
3618                 goto msix_disable;
3619
3620         be_netdev_init(netdev);
3621         status = register_netdev(netdev);
3622         if (status != 0)
3623                 goto unsetup;
3624
3625         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3626
3627         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3628         return 0;
3629
3630 unsetup:
3631         be_clear(adapter);
3632 msix_disable:
3633         be_msix_disable(adapter);
3634 stats_clean:
3635         be_stats_cleanup(adapter);
3636 ctrl_clean:
3637         be_ctrl_cleanup(adapter);
3638 disable_sriov:
3639         be_sriov_disable(adapter);
3640 free_netdev:
3641         free_netdev(netdev);
3642         pci_set_drvdata(pdev, NULL);
3643 rel_reg:
3644         pci_release_regions(pdev);
3645 disable_dev:
3646         pci_disable_device(pdev);
3647 do_none:
3648         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3649         return status;
3650 }
3651
3652 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3653 {
3654         struct be_adapter *adapter = pci_get_drvdata(pdev);
3655         struct net_device *netdev =  adapter->netdev;
3656
3657         cancel_delayed_work_sync(&adapter->work);
3658         if (adapter->wol)
3659                 be_setup_wol(adapter, true);
3660
3661         netif_device_detach(netdev);
3662         if (netif_running(netdev)) {
3663                 rtnl_lock();
3664                 be_close(netdev);
3665                 rtnl_unlock();
3666         }
3667         be_clear(adapter);
3668
3669         be_msix_disable(adapter);
3670         pci_save_state(pdev);
3671         pci_disable_device(pdev);
3672         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3673         return 0;
3674 }
3675
3676 static int be_resume(struct pci_dev *pdev)
3677 {
3678         int status = 0;
3679         struct be_adapter *adapter = pci_get_drvdata(pdev);
3680         struct net_device *netdev =  adapter->netdev;
3681
3682         netif_device_detach(netdev);
3683
3684         status = pci_enable_device(pdev);
3685         if (status)
3686                 return status;
3687
3688         pci_set_power_state(pdev, 0);
3689         pci_restore_state(pdev);
3690
3691         be_msix_enable(adapter);
3692         /* tell fw we're ready to fire cmds */
3693         status = be_cmd_fw_init(adapter);
3694         if (status)
3695                 return status;
3696
3697         be_setup(adapter);
3698         if (netif_running(netdev)) {
3699                 rtnl_lock();
3700                 be_open(netdev);
3701                 rtnl_unlock();
3702         }
3703         netif_device_attach(netdev);
3704
3705         if (adapter->wol)
3706                 be_setup_wol(adapter, false);
3707
3708         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3709         return 0;
3710 }
3711
3712 /*
3713  * An FLR will stop BE from DMAing any data.
3714  */
3715 static void be_shutdown(struct pci_dev *pdev)
3716 {
3717         struct be_adapter *adapter = pci_get_drvdata(pdev);
3718
3719         if (!adapter)
3720                 return;
3721
3722         cancel_delayed_work_sync(&adapter->work);
3723
3724         netif_device_detach(adapter->netdev);
3725
3726         if (adapter->wol)
3727                 be_setup_wol(adapter, true);
3728
3729         be_cmd_reset_function(adapter);
3730
3731         pci_disable_device(pdev);
3732 }
3733
3734 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3735                                 pci_channel_state_t state)
3736 {
3737         struct be_adapter *adapter = pci_get_drvdata(pdev);
3738         struct net_device *netdev =  adapter->netdev;
3739
3740         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3741
3742         adapter->eeh_err = true;
3743
3744         netif_device_detach(netdev);
3745
3746         if (netif_running(netdev)) {
3747                 rtnl_lock();
3748                 be_close(netdev);
3749                 rtnl_unlock();
3750         }
3751         be_clear(adapter);
3752
3753         if (state == pci_channel_io_perm_failure)
3754                 return PCI_ERS_RESULT_DISCONNECT;
3755
3756         pci_disable_device(pdev);
3757
3758         return PCI_ERS_RESULT_NEED_RESET;
3759 }
3760
3761 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3762 {
3763         struct be_adapter *adapter = pci_get_drvdata(pdev);
3764         int status;
3765
3766         dev_info(&adapter->pdev->dev, "EEH reset\n");
3767         adapter->eeh_err = false;
3768         adapter->ue_detected = false;
3769         adapter->fw_timeout = false;
3770
3771         status = pci_enable_device(pdev);
3772         if (status)
3773                 return PCI_ERS_RESULT_DISCONNECT;
3774
3775         pci_set_master(pdev);
3776         pci_set_power_state(pdev, 0);
3777         pci_restore_state(pdev);
3778
3779         /* Check if card is ok and fw is ready */
3780         status = be_cmd_POST(adapter);
3781         if (status)
3782                 return PCI_ERS_RESULT_DISCONNECT;
3783
3784         return PCI_ERS_RESULT_RECOVERED;
3785 }
3786
3787 static void be_eeh_resume(struct pci_dev *pdev)
3788 {
3789         int status = 0;
3790         struct be_adapter *adapter = pci_get_drvdata(pdev);
3791         struct net_device *netdev =  adapter->netdev;
3792
3793         dev_info(&adapter->pdev->dev, "EEH resume\n");
3794
3795         pci_save_state(pdev);
3796
3797         /* tell fw we're ready to fire cmds */
3798         status = be_cmd_fw_init(adapter);
3799         if (status)
3800                 goto err;
3801
3802         status = be_setup(adapter);
3803         if (status)
3804                 goto err;
3805
3806         if (netif_running(netdev)) {
3807                 status = be_open(netdev);
3808                 if (status)
3809                         goto err;
3810         }
3811         netif_device_attach(netdev);
3812         return;
3813 err:
3814         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3815 }
3816
3817 static struct pci_error_handlers be_eeh_handlers = {
3818         .error_detected = be_eeh_err_detected,
3819         .slot_reset = be_eeh_reset,
3820         .resume = be_eeh_resume,
3821 };
3822
3823 static struct pci_driver be_driver = {
3824         .name = DRV_NAME,
3825         .id_table = be_dev_ids,
3826         .probe = be_probe,
3827         .remove = be_remove,
3828         .suspend = be_suspend,
3829         .resume = be_resume,
3830         .shutdown = be_shutdown,
3831         .err_handler = &be_eeh_handlers
3832 };
3833
3834 static int __init be_init_module(void)
3835 {
3836         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3837             rx_frag_size != 2048) {
3838                 printk(KERN_WARNING DRV_NAME
3839                         " : Module param rx_frag_size must be 2048/4096/8192."
3840                         " Using 2048\n");
3841                 rx_frag_size = 2048;
3842         }
3843
3844         return pci_register_driver(&be_driver);
3845 }
3846 module_init(be_init_module);
3847
3848 static void __exit be_exit_module(void)
3849 {
3850         pci_unregister_driver(&be_driver);
3851 }
3852 module_exit(be_exit_module);