7236280c1af02544029c01aebcb557683eaea6e0
[cascardo/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29
30 static ushort rx_frag_size = 2048;
31 static unsigned int num_vfs;
32 module_param(rx_frag_size, ushort, S_IRUGO);
33 module_param(num_vfs, uint, S_IRUGO);
34 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36
37 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
38         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
39         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
40         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
42         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
43         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
44         { 0 }
45 };
46 MODULE_DEVICE_TABLE(pci, be_dev_ids);
47 /* UE Status Low CSR */
48 static const char * const ue_status_low_desc[] = {
49         "CEV",
50         "CTX",
51         "DBUF",
52         "ERX",
53         "Host",
54         "MPU",
55         "NDMA",
56         "PTC ",
57         "RDMA ",
58         "RXF ",
59         "RXIPS ",
60         "RXULP0 ",
61         "RXULP1 ",
62         "RXULP2 ",
63         "TIM ",
64         "TPOST ",
65         "TPRE ",
66         "TXIPS ",
67         "TXULP0 ",
68         "TXULP1 ",
69         "UC ",
70         "WDMA ",
71         "TXULP2 ",
72         "HOST1 ",
73         "P0_OB_LINK ",
74         "P1_OB_LINK ",
75         "HOST_GPIO ",
76         "MBOX ",
77         "AXGMAC0",
78         "AXGMAC1",
79         "JTAG",
80         "MPU_INTPEND"
81 };
82 /* UE Status High CSR */
83 static const char * const ue_status_hi_desc[] = {
84         "LPCMEMHOST",
85         "MGMT_MAC",
86         "PCS0ONLINE",
87         "MPU_IRAM",
88         "PCS1ONLINE",
89         "PCTL0",
90         "PCTL1",
91         "PMEM",
92         "RR",
93         "TXPB",
94         "RXPP",
95         "XAUI",
96         "TXP",
97         "ARM",
98         "IPC",
99         "HOST2",
100         "HOST3",
101         "HOST4",
102         "HOST5",
103         "HOST6",
104         "HOST7",
105         "HOST8",
106         "HOST9",
107         "NETC",
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown"
116 };
117
118 /* Is BE in a multi-channel mode */
119 static inline bool be_is_mc(struct be_adapter *adapter) {
120         return (adapter->function_mode & FLEX10_MODE ||
121                 adapter->function_mode & VNIC_MODE ||
122                 adapter->function_mode & UMC_ENABLED);
123 }
124
125 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126 {
127         struct be_dma_mem *mem = &q->dma_mem;
128         if (mem->va)
129                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130                                   mem->dma);
131 }
132
133 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134                 u16 len, u16 entry_size)
135 {
136         struct be_dma_mem *mem = &q->dma_mem;
137
138         memset(q, 0, sizeof(*q));
139         q->len = len;
140         q->entry_size = entry_size;
141         mem->size = len * entry_size;
142         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
143                                      GFP_KERNEL);
144         if (!mem->va)
145                 return -1;
146         memset(mem->va, 0, mem->size);
147         return 0;
148 }
149
150 static void be_intr_set(struct be_adapter *adapter, bool enable)
151 {
152         u32 reg, enabled;
153
154         if (adapter->eeh_err)
155                 return;
156
157         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158                                 &reg);
159         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
161         if (!enabled && enable)
162                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163         else if (enabled && !enable)
164                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else
166                 return;
167
168         pci_write_config_dword(adapter->pdev,
169                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
170 }
171
172 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
173 {
174         u32 val = 0;
175         val |= qid & DB_RQ_RING_ID_MASK;
176         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
177
178         wmb();
179         iowrite32(val, adapter->db + DB_RQ_OFFSET);
180 }
181
182 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
183 {
184         u32 val = 0;
185         val |= qid & DB_TXULP_RING_ID_MASK;
186         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
187
188         wmb();
189         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
190 }
191
192 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
193                 bool arm, bool clear_int, u16 num_popped)
194 {
195         u32 val = 0;
196         val |= qid & DB_EQ_RING_ID_MASK;
197         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
199
200         if (adapter->eeh_err)
201                 return;
202
203         if (arm)
204                 val |= 1 << DB_EQ_REARM_SHIFT;
205         if (clear_int)
206                 val |= 1 << DB_EQ_CLR_SHIFT;
207         val |= 1 << DB_EQ_EVNT_SHIFT;
208         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
209         iowrite32(val, adapter->db + DB_EQ_OFFSET);
210 }
211
212 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
213 {
214         u32 val = 0;
215         val |= qid & DB_CQ_RING_ID_MASK;
216         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
218
219         if (adapter->eeh_err)
220                 return;
221
222         if (arm)
223                 val |= 1 << DB_CQ_REARM_SHIFT;
224         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
225         iowrite32(val, adapter->db + DB_CQ_OFFSET);
226 }
227
228 static int be_mac_addr_set(struct net_device *netdev, void *p)
229 {
230         struct be_adapter *adapter = netdev_priv(netdev);
231         struct sockaddr *addr = p;
232         int status = 0;
233         u8 current_mac[ETH_ALEN];
234         u32 pmac_id = adapter->pmac_id;
235
236         if (!is_valid_ether_addr(addr->sa_data))
237                 return -EADDRNOTAVAIL;
238
239         status = be_cmd_mac_addr_query(adapter, current_mac,
240                                 MAC_ADDRESS_TYPE_NETWORK, false,
241                                 adapter->if_handle, 0);
242         if (status)
243                 goto err;
244
245         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
246                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
247                                 adapter->if_handle, &adapter->pmac_id, 0);
248                 if (status)
249                         goto err;
250
251                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
252         }
253         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
254         return 0;
255 err:
256         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
257         return status;
258 }
259
260 static void populate_be2_stats(struct be_adapter *adapter)
261 {
262         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
263         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
264         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
265         struct be_port_rxf_stats_v0 *port_stats =
266                                         &rxf_stats->port[adapter->port_num];
267         struct be_drv_stats *drvs = &adapter->drv_stats;
268
269         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
270         drvs->rx_pause_frames = port_stats->rx_pause_frames;
271         drvs->rx_crc_errors = port_stats->rx_crc_errors;
272         drvs->rx_control_frames = port_stats->rx_control_frames;
273         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
274         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
275         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
276         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
277         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
278         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
279         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
280         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
281         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
282         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
283         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
284         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
285         drvs->rx_dropped_header_too_small =
286                 port_stats->rx_dropped_header_too_small;
287         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
288         drvs->rx_alignment_symbol_errors =
289                 port_stats->rx_alignment_symbol_errors;
290
291         drvs->tx_pauseframes = port_stats->tx_pauseframes;
292         drvs->tx_controlframes = port_stats->tx_controlframes;
293
294         if (adapter->port_num)
295                 drvs->jabber_events = rxf_stats->port1_jabber_events;
296         else
297                 drvs->jabber_events = rxf_stats->port0_jabber_events;
298         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
299         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
300         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
301         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
302         drvs->forwarded_packets = rxf_stats->forwarded_packets;
303         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
304         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
305         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
306         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
307 }
308
309 static void populate_be3_stats(struct be_adapter *adapter)
310 {
311         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
312         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
313         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
314         struct be_port_rxf_stats_v1 *port_stats =
315                                         &rxf_stats->port[adapter->port_num];
316         struct be_drv_stats *drvs = &adapter->drv_stats;
317
318         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
319         drvs->rx_pause_frames = port_stats->rx_pause_frames;
320         drvs->rx_crc_errors = port_stats->rx_crc_errors;
321         drvs->rx_control_frames = port_stats->rx_control_frames;
322         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
323         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
324         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
325         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
326         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
327         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
328         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
329         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
330         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
331         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
332         drvs->rx_dropped_header_too_small =
333                 port_stats->rx_dropped_header_too_small;
334         drvs->rx_input_fifo_overflow_drop =
335                 port_stats->rx_input_fifo_overflow_drop;
336         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
337         drvs->rx_alignment_symbol_errors =
338                 port_stats->rx_alignment_symbol_errors;
339         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
340         drvs->tx_pauseframes = port_stats->tx_pauseframes;
341         drvs->tx_controlframes = port_stats->tx_controlframes;
342         drvs->jabber_events = port_stats->jabber_events;
343         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
344         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
345         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
346         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
347         drvs->forwarded_packets = rxf_stats->forwarded_packets;
348         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
349         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
350         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
351         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
352 }
353
354 static void populate_lancer_stats(struct be_adapter *adapter)
355 {
356
357         struct be_drv_stats *drvs = &adapter->drv_stats;
358         struct lancer_pport_stats *pport_stats =
359                                         pport_stats_from_cmd(adapter);
360
361         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
362         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
363         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
364         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
365         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
366         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
367         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
368         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
369         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
370         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
371         drvs->rx_dropped_tcp_length =
372                                 pport_stats->rx_dropped_invalid_tcp_length;
373         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
374         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
375         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
376         drvs->rx_dropped_header_too_small =
377                                 pport_stats->rx_dropped_header_too_small;
378         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
379         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
380         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
381         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
382         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
383         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
384         drvs->jabber_events = pport_stats->rx_jabbers;
385         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
386         drvs->forwarded_packets = pport_stats->num_forwards_lo;
387         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
388         drvs->rx_drops_too_many_frags =
389                                 pport_stats->rx_drops_too_many_frags_lo;
390 }
391
392 static void accumulate_16bit_val(u32 *acc, u16 val)
393 {
394 #define lo(x)                   (x & 0xFFFF)
395 #define hi(x)                   (x & 0xFFFF0000)
396         bool wrapped = val < lo(*acc);
397         u32 newacc = hi(*acc) + val;
398
399         if (wrapped)
400                 newacc += 65536;
401         ACCESS_ONCE(*acc) = newacc;
402 }
403
404 void be_parse_stats(struct be_adapter *adapter)
405 {
406         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
407         struct be_rx_obj *rxo;
408         int i;
409
410         if (adapter->generation == BE_GEN3) {
411                 if (lancer_chip(adapter))
412                         populate_lancer_stats(adapter);
413                  else
414                         populate_be3_stats(adapter);
415         } else {
416                 populate_be2_stats(adapter);
417         }
418
419         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
420         for_all_rx_queues(adapter, rxo, i) {
421                 /* below erx HW counter can actually wrap around after
422                  * 65535. Driver accumulates a 32-bit value
423                  */
424                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
425                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
426         }
427 }
428
429 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
430                                         struct rtnl_link_stats64 *stats)
431 {
432         struct be_adapter *adapter = netdev_priv(netdev);
433         struct be_drv_stats *drvs = &adapter->drv_stats;
434         struct be_rx_obj *rxo;
435         struct be_tx_obj *txo;
436         u64 pkts, bytes;
437         unsigned int start;
438         int i;
439
440         for_all_rx_queues(adapter, rxo, i) {
441                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
442                 do {
443                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
444                         pkts = rx_stats(rxo)->rx_pkts;
445                         bytes = rx_stats(rxo)->rx_bytes;
446                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
447                 stats->rx_packets += pkts;
448                 stats->rx_bytes += bytes;
449                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
450                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
451                                         rx_stats(rxo)->rx_drops_no_frags;
452         }
453
454         for_all_tx_queues(adapter, txo, i) {
455                 const struct be_tx_stats *tx_stats = tx_stats(txo);
456                 do {
457                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
458                         pkts = tx_stats(txo)->tx_pkts;
459                         bytes = tx_stats(txo)->tx_bytes;
460                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
461                 stats->tx_packets += pkts;
462                 stats->tx_bytes += bytes;
463         }
464
465         /* bad pkts received */
466         stats->rx_errors = drvs->rx_crc_errors +
467                 drvs->rx_alignment_symbol_errors +
468                 drvs->rx_in_range_errors +
469                 drvs->rx_out_range_errors +
470                 drvs->rx_frame_too_long +
471                 drvs->rx_dropped_too_small +
472                 drvs->rx_dropped_too_short +
473                 drvs->rx_dropped_header_too_small +
474                 drvs->rx_dropped_tcp_length +
475                 drvs->rx_dropped_runt;
476
477         /* detailed rx errors */
478         stats->rx_length_errors = drvs->rx_in_range_errors +
479                 drvs->rx_out_range_errors +
480                 drvs->rx_frame_too_long;
481
482         stats->rx_crc_errors = drvs->rx_crc_errors;
483
484         /* frame alignment errors */
485         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
486
487         /* receiver fifo overrun */
488         /* drops_no_pbuf is no per i/f, it's per BE card */
489         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
490                                 drvs->rx_input_fifo_overflow_drop +
491                                 drvs->rx_drops_no_pbuf;
492         return stats;
493 }
494
495 void be_link_status_update(struct be_adapter *adapter, u32 link_status)
496 {
497         struct net_device *netdev = adapter->netdev;
498
499         /* when link status changes, link speed must be re-queried from card */
500         adapter->link_speed = -1;
501         if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
502                 netif_carrier_on(netdev);
503                 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
504         } else {
505                 netif_carrier_off(netdev);
506                 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
507         }
508 }
509
510 static void be_tx_stats_update(struct be_tx_obj *txo,
511                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
512 {
513         struct be_tx_stats *stats = tx_stats(txo);
514
515         u64_stats_update_begin(&stats->sync);
516         stats->tx_reqs++;
517         stats->tx_wrbs += wrb_cnt;
518         stats->tx_bytes += copied;
519         stats->tx_pkts += (gso_segs ? gso_segs : 1);
520         if (stopped)
521                 stats->tx_stops++;
522         u64_stats_update_end(&stats->sync);
523 }
524
525 /* Determine number of WRB entries needed to xmit data in an skb */
526 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
527                                                                 bool *dummy)
528 {
529         int cnt = (skb->len > skb->data_len);
530
531         cnt += skb_shinfo(skb)->nr_frags;
532
533         /* to account for hdr wrb */
534         cnt++;
535         if (lancer_chip(adapter) || !(cnt & 1)) {
536                 *dummy = false;
537         } else {
538                 /* add a dummy to make it an even num */
539                 cnt++;
540                 *dummy = true;
541         }
542         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
543         return cnt;
544 }
545
546 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
547 {
548         wrb->frag_pa_hi = upper_32_bits(addr);
549         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
550         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
551 }
552
553 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
554                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
555 {
556         u8 vlan_prio = 0;
557         u16 vlan_tag = 0;
558
559         memset(hdr, 0, sizeof(*hdr));
560
561         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
562
563         if (skb_is_gso(skb)) {
564                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
565                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
566                         hdr, skb_shinfo(skb)->gso_size);
567                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
568                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
569                 if (lancer_chip(adapter) && adapter->sli_family  ==
570                                                         LANCER_A0_SLI_FAMILY) {
571                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
572                         if (is_tcp_pkt(skb))
573                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
574                                                                 tcpcs, hdr, 1);
575                         else if (is_udp_pkt(skb))
576                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
577                                                                 udpcs, hdr, 1);
578                 }
579         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
580                 if (is_tcp_pkt(skb))
581                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
582                 else if (is_udp_pkt(skb))
583                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
584         }
585
586         if (vlan_tx_tag_present(skb)) {
587                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
588                 vlan_tag = vlan_tx_tag_get(skb);
589                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
590                 /* If vlan priority provided by OS is NOT in available bmap */
591                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
592                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
593                                         adapter->recommended_prio;
594                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
595         }
596
597         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
598         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
599         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
600         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
601 }
602
603 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
604                 bool unmap_single)
605 {
606         dma_addr_t dma;
607
608         be_dws_le_to_cpu(wrb, sizeof(*wrb));
609
610         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
611         if (wrb->frag_len) {
612                 if (unmap_single)
613                         dma_unmap_single(dev, dma, wrb->frag_len,
614                                          DMA_TO_DEVICE);
615                 else
616                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
617         }
618 }
619
620 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
621                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
622 {
623         dma_addr_t busaddr;
624         int i, copied = 0;
625         struct device *dev = &adapter->pdev->dev;
626         struct sk_buff *first_skb = skb;
627         struct be_eth_wrb *wrb;
628         struct be_eth_hdr_wrb *hdr;
629         bool map_single = false;
630         u16 map_head;
631
632         hdr = queue_head_node(txq);
633         queue_head_inc(txq);
634         map_head = txq->head;
635
636         if (skb->len > skb->data_len) {
637                 int len = skb_headlen(skb);
638                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
639                 if (dma_mapping_error(dev, busaddr))
640                         goto dma_err;
641                 map_single = true;
642                 wrb = queue_head_node(txq);
643                 wrb_fill(wrb, busaddr, len);
644                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
645                 queue_head_inc(txq);
646                 copied += len;
647         }
648
649         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
650                 const struct skb_frag_struct *frag =
651                         &skb_shinfo(skb)->frags[i];
652                 busaddr = skb_frag_dma_map(dev, frag, 0,
653                                            skb_frag_size(frag), DMA_TO_DEVICE);
654                 if (dma_mapping_error(dev, busaddr))
655                         goto dma_err;
656                 wrb = queue_head_node(txq);
657                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
658                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
659                 queue_head_inc(txq);
660                 copied += skb_frag_size(frag);
661         }
662
663         if (dummy_wrb) {
664                 wrb = queue_head_node(txq);
665                 wrb_fill(wrb, 0, 0);
666                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
667                 queue_head_inc(txq);
668         }
669
670         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
671         be_dws_cpu_to_le(hdr, sizeof(*hdr));
672
673         return copied;
674 dma_err:
675         txq->head = map_head;
676         while (copied) {
677                 wrb = queue_head_node(txq);
678                 unmap_tx_frag(dev, wrb, map_single);
679                 map_single = false;
680                 copied -= wrb->frag_len;
681                 queue_head_inc(txq);
682         }
683         return 0;
684 }
685
686 static netdev_tx_t be_xmit(struct sk_buff *skb,
687                         struct net_device *netdev)
688 {
689         struct be_adapter *adapter = netdev_priv(netdev);
690         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
691         struct be_queue_info *txq = &txo->q;
692         u32 wrb_cnt = 0, copied = 0;
693         u32 start = txq->head;
694         bool dummy_wrb, stopped = false;
695
696         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
697
698         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
699         if (copied) {
700                 /* record the sent skb in the sent_skb table */
701                 BUG_ON(txo->sent_skb_list[start]);
702                 txo->sent_skb_list[start] = skb;
703
704                 /* Ensure txq has space for the next skb; Else stop the queue
705                  * *BEFORE* ringing the tx doorbell, so that we serialze the
706                  * tx compls of the current transmit which'll wake up the queue
707                  */
708                 atomic_add(wrb_cnt, &txq->used);
709                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
710                                                                 txq->len) {
711                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
712                         stopped = true;
713                 }
714
715                 be_txq_notify(adapter, txq->id, wrb_cnt);
716
717                 be_tx_stats_update(txo, wrb_cnt, copied,
718                                 skb_shinfo(skb)->gso_segs, stopped);
719         } else {
720                 txq->head = start;
721                 dev_kfree_skb_any(skb);
722         }
723         return NETDEV_TX_OK;
724 }
725
726 static int be_change_mtu(struct net_device *netdev, int new_mtu)
727 {
728         struct be_adapter *adapter = netdev_priv(netdev);
729         if (new_mtu < BE_MIN_MTU ||
730                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
731                                         (ETH_HLEN + ETH_FCS_LEN))) {
732                 dev_info(&adapter->pdev->dev,
733                         "MTU must be between %d and %d bytes\n",
734                         BE_MIN_MTU,
735                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
736                 return -EINVAL;
737         }
738         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
739                         netdev->mtu, new_mtu);
740         netdev->mtu = new_mtu;
741         return 0;
742 }
743
744 /*
745  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
746  * If the user configures more, place BE in vlan promiscuous mode.
747  */
748 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
749 {
750         u16 vtag[BE_NUM_VLANS_SUPPORTED];
751         u16 ntags = 0, i;
752         int status = 0;
753         u32 if_handle;
754
755         if (vf) {
756                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
757                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
758                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
759         }
760
761         /* No need to further configure vids if in promiscuous mode */
762         if (adapter->promiscuous)
763                 return 0;
764
765         if (adapter->vlans_added <= adapter->max_vlans)  {
766                 /* Construct VLAN Table to give to HW */
767                 for (i = 0; i < VLAN_N_VID; i++) {
768                         if (adapter->vlan_tag[i]) {
769                                 vtag[ntags] = cpu_to_le16(i);
770                                 ntags++;
771                         }
772                 }
773                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
774                                         vtag, ntags, 1, 0);
775         } else {
776                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
777                                         NULL, 0, 1, 1);
778         }
779
780         return status;
781 }
782
783 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
784 {
785         struct be_adapter *adapter = netdev_priv(netdev);
786
787         adapter->vlans_added++;
788         if (!be_physfn(adapter))
789                 return;
790
791         adapter->vlan_tag[vid] = 1;
792         if (adapter->vlans_added <= (adapter->max_vlans + 1))
793                 be_vid_config(adapter, false, 0);
794 }
795
796 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
797 {
798         struct be_adapter *adapter = netdev_priv(netdev);
799
800         adapter->vlans_added--;
801
802         if (!be_physfn(adapter))
803                 return;
804
805         adapter->vlan_tag[vid] = 0;
806         if (adapter->vlans_added <= adapter->max_vlans)
807                 be_vid_config(adapter, false, 0);
808 }
809
810 static void be_set_rx_mode(struct net_device *netdev)
811 {
812         struct be_adapter *adapter = netdev_priv(netdev);
813
814         if (netdev->flags & IFF_PROMISC) {
815                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
816                 adapter->promiscuous = true;
817                 goto done;
818         }
819
820         /* BE was previously in promiscuous mode; disable it */
821         if (adapter->promiscuous) {
822                 adapter->promiscuous = false;
823                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
824
825                 if (adapter->vlans_added)
826                         be_vid_config(adapter, false, 0);
827         }
828
829         /* Enable multicast promisc if num configured exceeds what we support */
830         if (netdev->flags & IFF_ALLMULTI ||
831                         netdev_mc_count(netdev) > BE_MAX_MC) {
832                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
833                 goto done;
834         }
835
836         be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
837 done:
838         return;
839 }
840
841 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
842 {
843         struct be_adapter *adapter = netdev_priv(netdev);
844         int status;
845
846         if (!adapter->sriov_enabled)
847                 return -EPERM;
848
849         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
850                 return -EINVAL;
851
852         if (lancer_chip(adapter)) {
853                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
854         } else {
855                 status = be_cmd_pmac_del(adapter,
856                                 adapter->vf_cfg[vf].vf_if_handle,
857                                 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
858
859                 status = be_cmd_pmac_add(adapter, mac,
860                                 adapter->vf_cfg[vf].vf_if_handle,
861                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
862         }
863
864         if (status)
865                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
866                                 mac, vf);
867         else
868                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
869
870         return status;
871 }
872
873 static int be_get_vf_config(struct net_device *netdev, int vf,
874                         struct ifla_vf_info *vi)
875 {
876         struct be_adapter *adapter = netdev_priv(netdev);
877
878         if (!adapter->sriov_enabled)
879                 return -EPERM;
880
881         if (vf >= num_vfs)
882                 return -EINVAL;
883
884         vi->vf = vf;
885         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
886         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
887         vi->qos = 0;
888         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
889
890         return 0;
891 }
892
893 static int be_set_vf_vlan(struct net_device *netdev,
894                         int vf, u16 vlan, u8 qos)
895 {
896         struct be_adapter *adapter = netdev_priv(netdev);
897         int status = 0;
898
899         if (!adapter->sriov_enabled)
900                 return -EPERM;
901
902         if ((vf >= num_vfs) || (vlan > 4095))
903                 return -EINVAL;
904
905         if (vlan) {
906                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
907                 adapter->vlans_added++;
908         } else {
909                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
910                 adapter->vlans_added--;
911         }
912
913         status = be_vid_config(adapter, true, vf);
914
915         if (status)
916                 dev_info(&adapter->pdev->dev,
917                                 "VLAN %d config on VF %d failed\n", vlan, vf);
918         return status;
919 }
920
921 static int be_set_vf_tx_rate(struct net_device *netdev,
922                         int vf, int rate)
923 {
924         struct be_adapter *adapter = netdev_priv(netdev);
925         int status = 0;
926
927         if (!adapter->sriov_enabled)
928                 return -EPERM;
929
930         if ((vf >= num_vfs) || (rate < 0))
931                 return -EINVAL;
932
933         if (rate > 10000)
934                 rate = 10000;
935
936         adapter->vf_cfg[vf].vf_tx_rate = rate;
937         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
938
939         if (status)
940                 dev_info(&adapter->pdev->dev,
941                                 "tx rate %d on VF %d failed\n", rate, vf);
942         return status;
943 }
944
945 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
946 {
947         struct be_eq_obj *rx_eq = &rxo->rx_eq;
948         struct be_rx_stats *stats = rx_stats(rxo);
949         ulong now = jiffies;
950         ulong delta = now - stats->rx_jiffies;
951         u64 pkts;
952         unsigned int start, eqd;
953
954         if (!rx_eq->enable_aic)
955                 return;
956
957         /* Wrapped around */
958         if (time_before(now, stats->rx_jiffies)) {
959                 stats->rx_jiffies = now;
960                 return;
961         }
962
963         /* Update once a second */
964         if (delta < HZ)
965                 return;
966
967         do {
968                 start = u64_stats_fetch_begin_bh(&stats->sync);
969                 pkts = stats->rx_pkts;
970         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
971
972         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
973         stats->rx_pkts_prev = pkts;
974         stats->rx_jiffies = now;
975         eqd = stats->rx_pps / 110000;
976         eqd = eqd << 3;
977         if (eqd > rx_eq->max_eqd)
978                 eqd = rx_eq->max_eqd;
979         if (eqd < rx_eq->min_eqd)
980                 eqd = rx_eq->min_eqd;
981         if (eqd < 10)
982                 eqd = 0;
983         if (eqd != rx_eq->cur_eqd) {
984                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
985                 rx_eq->cur_eqd = eqd;
986         }
987 }
988
989 static void be_rx_stats_update(struct be_rx_obj *rxo,
990                 struct be_rx_compl_info *rxcp)
991 {
992         struct be_rx_stats *stats = rx_stats(rxo);
993
994         u64_stats_update_begin(&stats->sync);
995         stats->rx_compl++;
996         stats->rx_bytes += rxcp->pkt_size;
997         stats->rx_pkts++;
998         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
999                 stats->rx_mcast_pkts++;
1000         if (rxcp->err)
1001                 stats->rx_compl_err++;
1002         u64_stats_update_end(&stats->sync);
1003 }
1004
1005 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1006 {
1007         /* L4 checksum is not reliable for non TCP/UDP packets.
1008          * Also ignore ipcksm for ipv6 pkts */
1009         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1010                                 (rxcp->ip_csum || rxcp->ipv6);
1011 }
1012
1013 static struct be_rx_page_info *
1014 get_rx_page_info(struct be_adapter *adapter,
1015                 struct be_rx_obj *rxo,
1016                 u16 frag_idx)
1017 {
1018         struct be_rx_page_info *rx_page_info;
1019         struct be_queue_info *rxq = &rxo->q;
1020
1021         rx_page_info = &rxo->page_info_tbl[frag_idx];
1022         BUG_ON(!rx_page_info->page);
1023
1024         if (rx_page_info->last_page_user) {
1025                 dma_unmap_page(&adapter->pdev->dev,
1026                                dma_unmap_addr(rx_page_info, bus),
1027                                adapter->big_page_size, DMA_FROM_DEVICE);
1028                 rx_page_info->last_page_user = false;
1029         }
1030
1031         atomic_dec(&rxq->used);
1032         return rx_page_info;
1033 }
1034
1035 /* Throwaway the data in the Rx completion */
1036 static void be_rx_compl_discard(struct be_adapter *adapter,
1037                 struct be_rx_obj *rxo,
1038                 struct be_rx_compl_info *rxcp)
1039 {
1040         struct be_queue_info *rxq = &rxo->q;
1041         struct be_rx_page_info *page_info;
1042         u16 i, num_rcvd = rxcp->num_rcvd;
1043
1044         for (i = 0; i < num_rcvd; i++) {
1045                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1046                 put_page(page_info->page);
1047                 memset(page_info, 0, sizeof(*page_info));
1048                 index_inc(&rxcp->rxq_idx, rxq->len);
1049         }
1050 }
1051
1052 /*
1053  * skb_fill_rx_data forms a complete skb for an ether frame
1054  * indicated by rxcp.
1055  */
1056 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1057                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1058 {
1059         struct be_queue_info *rxq = &rxo->q;
1060         struct be_rx_page_info *page_info;
1061         u16 i, j;
1062         u16 hdr_len, curr_frag_len, remaining;
1063         u8 *start;
1064
1065         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1066         start = page_address(page_info->page) + page_info->page_offset;
1067         prefetch(start);
1068
1069         /* Copy data in the first descriptor of this completion */
1070         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1071
1072         /* Copy the header portion into skb_data */
1073         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1074         memcpy(skb->data, start, hdr_len);
1075         skb->len = curr_frag_len;
1076         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1077                 /* Complete packet has now been moved to data */
1078                 put_page(page_info->page);
1079                 skb->data_len = 0;
1080                 skb->tail += curr_frag_len;
1081         } else {
1082                 skb_shinfo(skb)->nr_frags = 1;
1083                 skb_frag_set_page(skb, 0, page_info->page);
1084                 skb_shinfo(skb)->frags[0].page_offset =
1085                                         page_info->page_offset + hdr_len;
1086                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1087                 skb->data_len = curr_frag_len - hdr_len;
1088                 skb->truesize += rx_frag_size;
1089                 skb->tail += hdr_len;
1090         }
1091         page_info->page = NULL;
1092
1093         if (rxcp->pkt_size <= rx_frag_size) {
1094                 BUG_ON(rxcp->num_rcvd != 1);
1095                 return;
1096         }
1097
1098         /* More frags present for this completion */
1099         index_inc(&rxcp->rxq_idx, rxq->len);
1100         remaining = rxcp->pkt_size - curr_frag_len;
1101         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1102                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1103                 curr_frag_len = min(remaining, rx_frag_size);
1104
1105                 /* Coalesce all frags from the same physical page in one slot */
1106                 if (page_info->page_offset == 0) {
1107                         /* Fresh page */
1108                         j++;
1109                         skb_frag_set_page(skb, j, page_info->page);
1110                         skb_shinfo(skb)->frags[j].page_offset =
1111                                                         page_info->page_offset;
1112                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1113                         skb_shinfo(skb)->nr_frags++;
1114                 } else {
1115                         put_page(page_info->page);
1116                 }
1117
1118                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1119                 skb->len += curr_frag_len;
1120                 skb->data_len += curr_frag_len;
1121                 skb->truesize += rx_frag_size;
1122                 remaining -= curr_frag_len;
1123                 index_inc(&rxcp->rxq_idx, rxq->len);
1124                 page_info->page = NULL;
1125         }
1126         BUG_ON(j > MAX_SKB_FRAGS);
1127 }
1128
1129 /* Process the RX completion indicated by rxcp when GRO is disabled */
1130 static void be_rx_compl_process(struct be_adapter *adapter,
1131                         struct be_rx_obj *rxo,
1132                         struct be_rx_compl_info *rxcp)
1133 {
1134         struct net_device *netdev = adapter->netdev;
1135         struct sk_buff *skb;
1136
1137         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1138         if (unlikely(!skb)) {
1139                 rx_stats(rxo)->rx_drops_no_skbs++;
1140                 be_rx_compl_discard(adapter, rxo, rxcp);
1141                 return;
1142         }
1143
1144         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1145
1146         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1147                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1148         else
1149                 skb_checksum_none_assert(skb);
1150
1151         skb->protocol = eth_type_trans(skb, netdev);
1152         if (adapter->netdev->features & NETIF_F_RXHASH)
1153                 skb->rxhash = rxcp->rss_hash;
1154
1155
1156         if (rxcp->vlanf)
1157                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1158
1159         netif_receive_skb(skb);
1160 }
1161
1162 /* Process the RX completion indicated by rxcp when GRO is enabled */
1163 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1164                 struct be_rx_obj *rxo,
1165                 struct be_rx_compl_info *rxcp)
1166 {
1167         struct be_rx_page_info *page_info;
1168         struct sk_buff *skb = NULL;
1169         struct be_queue_info *rxq = &rxo->q;
1170         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1171         u16 remaining, curr_frag_len;
1172         u16 i, j;
1173
1174         skb = napi_get_frags(&eq_obj->napi);
1175         if (!skb) {
1176                 be_rx_compl_discard(adapter, rxo, rxcp);
1177                 return;
1178         }
1179
1180         remaining = rxcp->pkt_size;
1181         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1182                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1183
1184                 curr_frag_len = min(remaining, rx_frag_size);
1185
1186                 /* Coalesce all frags from the same physical page in one slot */
1187                 if (i == 0 || page_info->page_offset == 0) {
1188                         /* First frag or Fresh page */
1189                         j++;
1190                         skb_frag_set_page(skb, j, page_info->page);
1191                         skb_shinfo(skb)->frags[j].page_offset =
1192                                                         page_info->page_offset;
1193                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1194                 } else {
1195                         put_page(page_info->page);
1196                 }
1197                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1198                 skb->truesize += rx_frag_size;
1199                 remaining -= curr_frag_len;
1200                 index_inc(&rxcp->rxq_idx, rxq->len);
1201                 memset(page_info, 0, sizeof(*page_info));
1202         }
1203         BUG_ON(j > MAX_SKB_FRAGS);
1204
1205         skb_shinfo(skb)->nr_frags = j + 1;
1206         skb->len = rxcp->pkt_size;
1207         skb->data_len = rxcp->pkt_size;
1208         skb->ip_summed = CHECKSUM_UNNECESSARY;
1209         if (adapter->netdev->features & NETIF_F_RXHASH)
1210                 skb->rxhash = rxcp->rss_hash;
1211
1212         if (rxcp->vlanf)
1213                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1214
1215         napi_gro_frags(&eq_obj->napi);
1216 }
1217
1218 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1219                                 struct be_eth_rx_compl *compl,
1220                                 struct be_rx_compl_info *rxcp)
1221 {
1222         rxcp->pkt_size =
1223                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1224         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1225         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1226         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1227         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1228         rxcp->ip_csum =
1229                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1230         rxcp->l4_csum =
1231                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1232         rxcp->ipv6 =
1233                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1234         rxcp->rxq_idx =
1235                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1236         rxcp->num_rcvd =
1237                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1238         rxcp->pkt_type =
1239                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1240         rxcp->rss_hash =
1241                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1242         if (rxcp->vlanf) {
1243                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1244                                           compl);
1245                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1246                                                compl);
1247         }
1248         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1249 }
1250
1251 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1252                                 struct be_eth_rx_compl *compl,
1253                                 struct be_rx_compl_info *rxcp)
1254 {
1255         rxcp->pkt_size =
1256                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1257         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1258         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1259         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1260         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1261         rxcp->ip_csum =
1262                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1263         rxcp->l4_csum =
1264                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1265         rxcp->ipv6 =
1266                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1267         rxcp->rxq_idx =
1268                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1269         rxcp->num_rcvd =
1270                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1271         rxcp->pkt_type =
1272                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1273         rxcp->rss_hash =
1274                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1275         if (rxcp->vlanf) {
1276                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1277                                           compl);
1278                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1279                                                compl);
1280         }
1281         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1282 }
1283
1284 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1285 {
1286         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1287         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1288         struct be_adapter *adapter = rxo->adapter;
1289
1290         /* For checking the valid bit it is Ok to use either definition as the
1291          * valid bit is at the same position in both v0 and v1 Rx compl */
1292         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1293                 return NULL;
1294
1295         rmb();
1296         be_dws_le_to_cpu(compl, sizeof(*compl));
1297
1298         if (adapter->be3_native)
1299                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1300         else
1301                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1302
1303         if (rxcp->vlanf) {
1304                 /* vlanf could be wrongly set in some cards.
1305                  * ignore if vtm is not set */
1306                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1307                         rxcp->vlanf = 0;
1308
1309                 if (!lancer_chip(adapter))
1310                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1311
1312                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1313                     !adapter->vlan_tag[rxcp->vlan_tag])
1314                         rxcp->vlanf = 0;
1315         }
1316
1317         /* As the compl has been parsed, reset it; we wont touch it again */
1318         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1319
1320         queue_tail_inc(&rxo->cq);
1321         return rxcp;
1322 }
1323
1324 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1325 {
1326         u32 order = get_order(size);
1327
1328         if (order > 0)
1329                 gfp |= __GFP_COMP;
1330         return  alloc_pages(gfp, order);
1331 }
1332
1333 /*
1334  * Allocate a page, split it to fragments of size rx_frag_size and post as
1335  * receive buffers to BE
1336  */
1337 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1338 {
1339         struct be_adapter *adapter = rxo->adapter;
1340         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1341         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1342         struct be_queue_info *rxq = &rxo->q;
1343         struct page *pagep = NULL;
1344         struct be_eth_rx_d *rxd;
1345         u64 page_dmaaddr = 0, frag_dmaaddr;
1346         u32 posted, page_offset = 0;
1347
1348         page_info = &rxo->page_info_tbl[rxq->head];
1349         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1350                 if (!pagep) {
1351                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1352                         if (unlikely(!pagep)) {
1353                                 rx_stats(rxo)->rx_post_fail++;
1354                                 break;
1355                         }
1356                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1357                                                     0, adapter->big_page_size,
1358                                                     DMA_FROM_DEVICE);
1359                         page_info->page_offset = 0;
1360                 } else {
1361                         get_page(pagep);
1362                         page_info->page_offset = page_offset + rx_frag_size;
1363                 }
1364                 page_offset = page_info->page_offset;
1365                 page_info->page = pagep;
1366                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1367                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1368
1369                 rxd = queue_head_node(rxq);
1370                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1371                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1372
1373                 /* Any space left in the current big page for another frag? */
1374                 if ((page_offset + rx_frag_size + rx_frag_size) >
1375                                         adapter->big_page_size) {
1376                         pagep = NULL;
1377                         page_info->last_page_user = true;
1378                 }
1379
1380                 prev_page_info = page_info;
1381                 queue_head_inc(rxq);
1382                 page_info = &page_info_tbl[rxq->head];
1383         }
1384         if (pagep)
1385                 prev_page_info->last_page_user = true;
1386
1387         if (posted) {
1388                 atomic_add(posted, &rxq->used);
1389                 be_rxq_notify(adapter, rxq->id, posted);
1390         } else if (atomic_read(&rxq->used) == 0) {
1391                 /* Let be_worker replenish when memory is available */
1392                 rxo->rx_post_starved = true;
1393         }
1394 }
1395
1396 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1397 {
1398         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1399
1400         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1401                 return NULL;
1402
1403         rmb();
1404         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1405
1406         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1407
1408         queue_tail_inc(tx_cq);
1409         return txcp;
1410 }
1411
1412 static u16 be_tx_compl_process(struct be_adapter *adapter,
1413                 struct be_tx_obj *txo, u16 last_index)
1414 {
1415         struct be_queue_info *txq = &txo->q;
1416         struct be_eth_wrb *wrb;
1417         struct sk_buff **sent_skbs = txo->sent_skb_list;
1418         struct sk_buff *sent_skb;
1419         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1420         bool unmap_skb_hdr = true;
1421
1422         sent_skb = sent_skbs[txq->tail];
1423         BUG_ON(!sent_skb);
1424         sent_skbs[txq->tail] = NULL;
1425
1426         /* skip header wrb */
1427         queue_tail_inc(txq);
1428
1429         do {
1430                 cur_index = txq->tail;
1431                 wrb = queue_tail_node(txq);
1432                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1433                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1434                 unmap_skb_hdr = false;
1435
1436                 num_wrbs++;
1437                 queue_tail_inc(txq);
1438         } while (cur_index != last_index);
1439
1440         kfree_skb(sent_skb);
1441         return num_wrbs;
1442 }
1443
1444 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1445 {
1446         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1447
1448         if (!eqe->evt)
1449                 return NULL;
1450
1451         rmb();
1452         eqe->evt = le32_to_cpu(eqe->evt);
1453         queue_tail_inc(&eq_obj->q);
1454         return eqe;
1455 }
1456
1457 static int event_handle(struct be_adapter *adapter,
1458                         struct be_eq_obj *eq_obj,
1459                         bool rearm)
1460 {
1461         struct be_eq_entry *eqe;
1462         u16 num = 0;
1463
1464         while ((eqe = event_get(eq_obj)) != NULL) {
1465                 eqe->evt = 0;
1466                 num++;
1467         }
1468
1469         /* Deal with any spurious interrupts that come
1470          * without events
1471          */
1472         if (!num)
1473                 rearm = true;
1474
1475         be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1476         if (num)
1477                 napi_schedule(&eq_obj->napi);
1478
1479         return num;
1480 }
1481
1482 /* Just read and notify events without processing them.
1483  * Used at the time of destroying event queues */
1484 static void be_eq_clean(struct be_adapter *adapter,
1485                         struct be_eq_obj *eq_obj)
1486 {
1487         struct be_eq_entry *eqe;
1488         u16 num = 0;
1489
1490         while ((eqe = event_get(eq_obj)) != NULL) {
1491                 eqe->evt = 0;
1492                 num++;
1493         }
1494
1495         if (num)
1496                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1497 }
1498
1499 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1500 {
1501         struct be_rx_page_info *page_info;
1502         struct be_queue_info *rxq = &rxo->q;
1503         struct be_queue_info *rx_cq = &rxo->cq;
1504         struct be_rx_compl_info *rxcp;
1505         u16 tail;
1506
1507         /* First cleanup pending rx completions */
1508         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1509                 be_rx_compl_discard(adapter, rxo, rxcp);
1510                 be_cq_notify(adapter, rx_cq->id, false, 1);
1511         }
1512
1513         /* Then free posted rx buffer that were not used */
1514         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1515         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1516                 page_info = get_rx_page_info(adapter, rxo, tail);
1517                 put_page(page_info->page);
1518                 memset(page_info, 0, sizeof(*page_info));
1519         }
1520         BUG_ON(atomic_read(&rxq->used));
1521         rxq->tail = rxq->head = 0;
1522 }
1523
1524 static void be_tx_compl_clean(struct be_adapter *adapter,
1525                                 struct be_tx_obj *txo)
1526 {
1527         struct be_queue_info *tx_cq = &txo->cq;
1528         struct be_queue_info *txq = &txo->q;
1529         struct be_eth_tx_compl *txcp;
1530         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1531         struct sk_buff **sent_skbs = txo->sent_skb_list;
1532         struct sk_buff *sent_skb;
1533         bool dummy_wrb;
1534
1535         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1536         do {
1537                 while ((txcp = be_tx_compl_get(tx_cq))) {
1538                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1539                                         wrb_index, txcp);
1540                         num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1541                         cmpl++;
1542                 }
1543                 if (cmpl) {
1544                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1545                         atomic_sub(num_wrbs, &txq->used);
1546                         cmpl = 0;
1547                         num_wrbs = 0;
1548                 }
1549
1550                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1551                         break;
1552
1553                 mdelay(1);
1554         } while (true);
1555
1556         if (atomic_read(&txq->used))
1557                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1558                         atomic_read(&txq->used));
1559
1560         /* free posted tx for which compls will never arrive */
1561         while (atomic_read(&txq->used)) {
1562                 sent_skb = sent_skbs[txq->tail];
1563                 end_idx = txq->tail;
1564                 index_adv(&end_idx,
1565                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1566                         txq->len);
1567                 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1568                 atomic_sub(num_wrbs, &txq->used);
1569         }
1570 }
1571
1572 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1573 {
1574         struct be_queue_info *q;
1575
1576         q = &adapter->mcc_obj.q;
1577         if (q->created)
1578                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1579         be_queue_free(adapter, q);
1580
1581         q = &adapter->mcc_obj.cq;
1582         if (q->created)
1583                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1584         be_queue_free(adapter, q);
1585 }
1586
1587 /* Must be called only after TX qs are created as MCC shares TX EQ */
1588 static int be_mcc_queues_create(struct be_adapter *adapter)
1589 {
1590         struct be_queue_info *q, *cq;
1591
1592         /* Alloc MCC compl queue */
1593         cq = &adapter->mcc_obj.cq;
1594         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1595                         sizeof(struct be_mcc_compl)))
1596                 goto err;
1597
1598         /* Ask BE to create MCC compl queue; share TX's eq */
1599         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1600                 goto mcc_cq_free;
1601
1602         /* Alloc MCC queue */
1603         q = &adapter->mcc_obj.q;
1604         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1605                 goto mcc_cq_destroy;
1606
1607         /* Ask BE to create MCC queue */
1608         if (be_cmd_mccq_create(adapter, q, cq))
1609                 goto mcc_q_free;
1610
1611         return 0;
1612
1613 mcc_q_free:
1614         be_queue_free(adapter, q);
1615 mcc_cq_destroy:
1616         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1617 mcc_cq_free:
1618         be_queue_free(adapter, cq);
1619 err:
1620         return -1;
1621 }
1622
1623 static void be_tx_queues_destroy(struct be_adapter *adapter)
1624 {
1625         struct be_queue_info *q;
1626         struct be_tx_obj *txo;
1627         u8 i;
1628
1629         for_all_tx_queues(adapter, txo, i) {
1630                 q = &txo->q;
1631                 if (q->created)
1632                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1633                 be_queue_free(adapter, q);
1634
1635                 q = &txo->cq;
1636                 if (q->created)
1637                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1638                 be_queue_free(adapter, q);
1639         }
1640
1641         /* Clear any residual events */
1642         be_eq_clean(adapter, &adapter->tx_eq);
1643
1644         q = &adapter->tx_eq.q;
1645         if (q->created)
1646                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1647         be_queue_free(adapter, q);
1648 }
1649
1650 static int be_num_txqs_want(struct be_adapter *adapter)
1651 {
1652         if ((num_vfs && adapter->sriov_enabled) ||
1653                 be_is_mc(adapter) ||
1654                 lancer_chip(adapter) || !be_physfn(adapter) ||
1655                 adapter->generation == BE_GEN2)
1656                 return 1;
1657         else
1658                 return MAX_TX_QS;
1659 }
1660
1661 /* One TX event queue is shared by all TX compl qs */
1662 static int be_tx_queues_create(struct be_adapter *adapter)
1663 {
1664         struct be_queue_info *eq, *q, *cq;
1665         struct be_tx_obj *txo;
1666         u8 i;
1667
1668         adapter->num_tx_qs = be_num_txqs_want(adapter);
1669         if (adapter->num_tx_qs != MAX_TX_QS) {
1670                 rtnl_lock();
1671                 netif_set_real_num_tx_queues(adapter->netdev,
1672                         adapter->num_tx_qs);
1673                 rtnl_unlock();
1674         }
1675
1676         adapter->tx_eq.max_eqd = 0;
1677         adapter->tx_eq.min_eqd = 0;
1678         adapter->tx_eq.cur_eqd = 96;
1679         adapter->tx_eq.enable_aic = false;
1680
1681         eq = &adapter->tx_eq.q;
1682         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1683                 sizeof(struct be_eq_entry)))
1684                 return -1;
1685
1686         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1687                 goto err;
1688         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1689
1690         for_all_tx_queues(adapter, txo, i) {
1691                 cq = &txo->cq;
1692                 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1693                         sizeof(struct be_eth_tx_compl)))
1694                         goto err;
1695
1696                 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1697                         goto err;
1698
1699                 q = &txo->q;
1700                 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1701                         sizeof(struct be_eth_wrb)))
1702                         goto err;
1703         }
1704         return 0;
1705
1706 err:
1707         be_tx_queues_destroy(adapter);
1708         return -1;
1709 }
1710
1711 static void be_rx_queues_destroy(struct be_adapter *adapter)
1712 {
1713         struct be_queue_info *q;
1714         struct be_rx_obj *rxo;
1715         int i;
1716
1717         for_all_rx_queues(adapter, rxo, i) {
1718                 be_queue_free(adapter, &rxo->q);
1719
1720                 q = &rxo->cq;
1721                 if (q->created)
1722                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1723                 be_queue_free(adapter, q);
1724
1725                 q = &rxo->rx_eq.q;
1726                 if (q->created)
1727                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1728                 be_queue_free(adapter, q);
1729         }
1730 }
1731
1732 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1733 {
1734         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1735                 !adapter->sriov_enabled && be_physfn(adapter) &&
1736                 !be_is_mc(adapter)) {
1737                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1738         } else {
1739                 dev_warn(&adapter->pdev->dev,
1740                         "No support for multiple RX queues\n");
1741                 return 1;
1742         }
1743 }
1744
1745 static int be_rx_queues_create(struct be_adapter *adapter)
1746 {
1747         struct be_queue_info *eq, *q, *cq;
1748         struct be_rx_obj *rxo;
1749         int rc, i;
1750
1751         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1752                                 msix_enabled(adapter) ?
1753                                         adapter->num_msix_vec - 1 : 1);
1754         if (adapter->num_rx_qs != MAX_RX_QS)
1755                 dev_warn(&adapter->pdev->dev,
1756                         "Can create only %d RX queues", adapter->num_rx_qs);
1757
1758         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1759         for_all_rx_queues(adapter, rxo, i) {
1760                 rxo->adapter = adapter;
1761                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1762                 rxo->rx_eq.enable_aic = true;
1763
1764                 /* EQ */
1765                 eq = &rxo->rx_eq.q;
1766                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1767                                         sizeof(struct be_eq_entry));
1768                 if (rc)
1769                         goto err;
1770
1771                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1772                 if (rc)
1773                         goto err;
1774
1775                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1776
1777                 /* CQ */
1778                 cq = &rxo->cq;
1779                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1780                                 sizeof(struct be_eth_rx_compl));
1781                 if (rc)
1782                         goto err;
1783
1784                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1785                 if (rc)
1786                         goto err;
1787
1788                 /* Rx Q - will be created in be_open() */
1789                 q = &rxo->q;
1790                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1791                                 sizeof(struct be_eth_rx_d));
1792                 if (rc)
1793                         goto err;
1794
1795         }
1796
1797         return 0;
1798 err:
1799         be_rx_queues_destroy(adapter);
1800         return -1;
1801 }
1802
1803 static bool event_peek(struct be_eq_obj *eq_obj)
1804 {
1805         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1806         if (!eqe->evt)
1807                 return false;
1808         else
1809                 return true;
1810 }
1811
1812 static irqreturn_t be_intx(int irq, void *dev)
1813 {
1814         struct be_adapter *adapter = dev;
1815         struct be_rx_obj *rxo;
1816         int isr, i, tx = 0 , rx = 0;
1817
1818         if (lancer_chip(adapter)) {
1819                 if (event_peek(&adapter->tx_eq))
1820                         tx = event_handle(adapter, &adapter->tx_eq, false);
1821                 for_all_rx_queues(adapter, rxo, i) {
1822                         if (event_peek(&rxo->rx_eq))
1823                                 rx |= event_handle(adapter, &rxo->rx_eq, true);
1824                 }
1825
1826                 if (!(tx || rx))
1827                         return IRQ_NONE;
1828
1829         } else {
1830                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1831                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1832                 if (!isr)
1833                         return IRQ_NONE;
1834
1835                 if ((1 << adapter->tx_eq.eq_idx & isr))
1836                         event_handle(adapter, &adapter->tx_eq, false);
1837
1838                 for_all_rx_queues(adapter, rxo, i) {
1839                         if ((1 << rxo->rx_eq.eq_idx & isr))
1840                                 event_handle(adapter, &rxo->rx_eq, true);
1841                 }
1842         }
1843
1844         return IRQ_HANDLED;
1845 }
1846
1847 static irqreturn_t be_msix_rx(int irq, void *dev)
1848 {
1849         struct be_rx_obj *rxo = dev;
1850         struct be_adapter *adapter = rxo->adapter;
1851
1852         event_handle(adapter, &rxo->rx_eq, true);
1853
1854         return IRQ_HANDLED;
1855 }
1856
1857 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1858 {
1859         struct be_adapter *adapter = dev;
1860
1861         event_handle(adapter, &adapter->tx_eq, false);
1862
1863         return IRQ_HANDLED;
1864 }
1865
1866 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1867 {
1868         return (rxcp->tcpf && !rxcp->err) ? true : false;
1869 }
1870
1871 static int be_poll_rx(struct napi_struct *napi, int budget)
1872 {
1873         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1874         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1875         struct be_adapter *adapter = rxo->adapter;
1876         struct be_queue_info *rx_cq = &rxo->cq;
1877         struct be_rx_compl_info *rxcp;
1878         u32 work_done;
1879
1880         rx_stats(rxo)->rx_polls++;
1881         for (work_done = 0; work_done < budget; work_done++) {
1882                 rxcp = be_rx_compl_get(rxo);
1883                 if (!rxcp)
1884                         break;
1885
1886                 /* Is it a flush compl that has no data */
1887                 if (unlikely(rxcp->num_rcvd == 0))
1888                         goto loop_continue;
1889
1890                 /* Discard compl with partial DMA Lancer B0 */
1891                 if (unlikely(!rxcp->pkt_size)) {
1892                         be_rx_compl_discard(adapter, rxo, rxcp);
1893                         goto loop_continue;
1894                 }
1895
1896                 /* On BE drop pkts that arrive due to imperfect filtering in
1897                  * promiscuous mode on some skews
1898                  */
1899                 if (unlikely(rxcp->port != adapter->port_num &&
1900                                 !lancer_chip(adapter))) {
1901                         be_rx_compl_discard(adapter, rxo, rxcp);
1902                         goto loop_continue;
1903                 }
1904
1905                 if (do_gro(rxcp))
1906                         be_rx_compl_process_gro(adapter, rxo, rxcp);
1907                 else
1908                         be_rx_compl_process(adapter, rxo, rxcp);
1909 loop_continue:
1910                 be_rx_stats_update(rxo, rxcp);
1911         }
1912
1913         be_cq_notify(adapter, rx_cq->id, false, work_done);
1914
1915         /* Refill the queue */
1916         if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1917                 be_post_rx_frags(rxo, GFP_ATOMIC);
1918
1919         /* All consumed */
1920         if (work_done < budget) {
1921                 napi_complete(napi);
1922                 /* Arm CQ */
1923                 be_cq_notify(adapter, rx_cq->id, true, 0);
1924         }
1925         return work_done;
1926 }
1927
1928 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1929  * For TX/MCC we don't honour budget; consume everything
1930  */
1931 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1932 {
1933         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1934         struct be_adapter *adapter =
1935                 container_of(tx_eq, struct be_adapter, tx_eq);
1936         struct be_tx_obj *txo;
1937         struct be_eth_tx_compl *txcp;
1938         int tx_compl, mcc_compl, status = 0;
1939         u8 i;
1940         u16 num_wrbs;
1941
1942         for_all_tx_queues(adapter, txo, i) {
1943                 tx_compl = 0;
1944                 num_wrbs = 0;
1945                 while ((txcp = be_tx_compl_get(&txo->cq))) {
1946                         num_wrbs += be_tx_compl_process(adapter, txo,
1947                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
1948                                         wrb_index, txcp));
1949                         tx_compl++;
1950                 }
1951                 if (tx_compl) {
1952                         be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1953
1954                         atomic_sub(num_wrbs, &txo->q.used);
1955
1956                         /* As Tx wrbs have been freed up, wake up netdev queue
1957                          * if it was stopped due to lack of tx wrbs.  */
1958                         if (__netif_subqueue_stopped(adapter->netdev, i) &&
1959                                 atomic_read(&txo->q.used) < txo->q.len / 2) {
1960                                 netif_wake_subqueue(adapter->netdev, i);
1961                         }
1962
1963                         u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1964                         tx_stats(txo)->tx_compl += tx_compl;
1965                         u64_stats_update_end(&tx_stats(txo)->sync_compl);
1966                 }
1967         }
1968
1969         mcc_compl = be_process_mcc(adapter, &status);
1970
1971         if (mcc_compl) {
1972                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1973                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1974         }
1975
1976         napi_complete(napi);
1977
1978         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1979         adapter->drv_stats.tx_events++;
1980         return 1;
1981 }
1982
1983 void be_detect_dump_ue(struct be_adapter *adapter)
1984 {
1985         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1986         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1987         u32 i;
1988
1989         if (adapter->eeh_err || adapter->ue_detected)
1990                 return;
1991
1992         if (lancer_chip(adapter)) {
1993                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1994                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1995                         sliport_err1 = ioread32(adapter->db +
1996                                         SLIPORT_ERROR1_OFFSET);
1997                         sliport_err2 = ioread32(adapter->db +
1998                                         SLIPORT_ERROR2_OFFSET);
1999                 }
2000         } else {
2001                 pci_read_config_dword(adapter->pdev,
2002                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2003                 pci_read_config_dword(adapter->pdev,
2004                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2005                 pci_read_config_dword(adapter->pdev,
2006                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2007                 pci_read_config_dword(adapter->pdev,
2008                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2009
2010                 ue_lo = (ue_lo & (~ue_lo_mask));
2011                 ue_hi = (ue_hi & (~ue_hi_mask));
2012         }
2013
2014         if (ue_lo || ue_hi ||
2015                 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2016                 adapter->ue_detected = true;
2017                 adapter->eeh_err = true;
2018                 dev_err(&adapter->pdev->dev,
2019                         "Unrecoverable error in the card\n");
2020         }
2021
2022         if (ue_lo) {
2023                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2024                         if (ue_lo & 1)
2025                                 dev_err(&adapter->pdev->dev,
2026                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2027                 }
2028         }
2029         if (ue_hi) {
2030                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2031                         if (ue_hi & 1)
2032                                 dev_err(&adapter->pdev->dev,
2033                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2034                 }
2035         }
2036
2037         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2038                 dev_err(&adapter->pdev->dev,
2039                         "sliport status 0x%x\n", sliport_status);
2040                 dev_err(&adapter->pdev->dev,
2041                         "sliport error1 0x%x\n", sliport_err1);
2042                 dev_err(&adapter->pdev->dev,
2043                         "sliport error2 0x%x\n", sliport_err2);
2044         }
2045 }
2046
2047 static void be_msix_disable(struct be_adapter *adapter)
2048 {
2049         if (msix_enabled(adapter)) {
2050                 pci_disable_msix(adapter->pdev);
2051                 adapter->num_msix_vec = 0;
2052         }
2053 }
2054
2055 static void be_msix_enable(struct be_adapter *adapter)
2056 {
2057 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2058         int i, status, num_vec;
2059
2060         num_vec = be_num_rxqs_want(adapter) + 1;
2061
2062         for (i = 0; i < num_vec; i++)
2063                 adapter->msix_entries[i].entry = i;
2064
2065         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2066         if (status == 0) {
2067                 goto done;
2068         } else if (status >= BE_MIN_MSIX_VECTORS) {
2069                 num_vec = status;
2070                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2071                                 num_vec) == 0)
2072                         goto done;
2073         }
2074         return;
2075 done:
2076         adapter->num_msix_vec = num_vec;
2077         return;
2078 }
2079
2080 static int be_sriov_enable(struct be_adapter *adapter)
2081 {
2082         be_check_sriov_fn_type(adapter);
2083 #ifdef CONFIG_PCI_IOV
2084         if (be_physfn(adapter) && num_vfs) {
2085                 int status, pos;
2086                 u16 nvfs;
2087
2088                 pos = pci_find_ext_capability(adapter->pdev,
2089                                                 PCI_EXT_CAP_ID_SRIOV);
2090                 pci_read_config_word(adapter->pdev,
2091                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2092
2093                 if (num_vfs > nvfs) {
2094                         dev_info(&adapter->pdev->dev,
2095                                         "Device supports %d VFs and not %d\n",
2096                                         nvfs, num_vfs);
2097                         num_vfs = nvfs;
2098                 }
2099
2100                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2101                 adapter->sriov_enabled = status ? false : true;
2102
2103                 if (adapter->sriov_enabled) {
2104                         adapter->vf_cfg = kcalloc(num_vfs,
2105                                                 sizeof(struct be_vf_cfg),
2106                                                 GFP_KERNEL);
2107                         if (!adapter->vf_cfg)
2108                                 return -ENOMEM;
2109                 }
2110         }
2111 #endif
2112         return 0;
2113 }
2114
2115 static void be_sriov_disable(struct be_adapter *adapter)
2116 {
2117 #ifdef CONFIG_PCI_IOV
2118         if (adapter->sriov_enabled) {
2119                 pci_disable_sriov(adapter->pdev);
2120                 kfree(adapter->vf_cfg);
2121                 adapter->sriov_enabled = false;
2122         }
2123 #endif
2124 }
2125
2126 static inline int be_msix_vec_get(struct be_adapter *adapter,
2127                                         struct be_eq_obj *eq_obj)
2128 {
2129         return adapter->msix_entries[eq_obj->eq_idx].vector;
2130 }
2131
2132 static int be_request_irq(struct be_adapter *adapter,
2133                 struct be_eq_obj *eq_obj,
2134                 void *handler, char *desc, void *context)
2135 {
2136         struct net_device *netdev = adapter->netdev;
2137         int vec;
2138
2139         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2140         vec = be_msix_vec_get(adapter, eq_obj);
2141         return request_irq(vec, handler, 0, eq_obj->desc, context);
2142 }
2143
2144 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2145                         void *context)
2146 {
2147         int vec = be_msix_vec_get(adapter, eq_obj);
2148         free_irq(vec, context);
2149 }
2150
2151 static int be_msix_register(struct be_adapter *adapter)
2152 {
2153         struct be_rx_obj *rxo;
2154         int status, i;
2155         char qname[10];
2156
2157         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2158                                 adapter);
2159         if (status)
2160                 goto err;
2161
2162         for_all_rx_queues(adapter, rxo, i) {
2163                 sprintf(qname, "rxq%d", i);
2164                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2165                                 qname, rxo);
2166                 if (status)
2167                         goto err_msix;
2168         }
2169
2170         return 0;
2171
2172 err_msix:
2173         be_free_irq(adapter, &adapter->tx_eq, adapter);
2174
2175         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2176                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2177
2178 err:
2179         dev_warn(&adapter->pdev->dev,
2180                 "MSIX Request IRQ failed - err %d\n", status);
2181         be_msix_disable(adapter);
2182         return status;
2183 }
2184
2185 static int be_irq_register(struct be_adapter *adapter)
2186 {
2187         struct net_device *netdev = adapter->netdev;
2188         int status;
2189
2190         if (msix_enabled(adapter)) {
2191                 status = be_msix_register(adapter);
2192                 if (status == 0)
2193                         goto done;
2194                 /* INTx is not supported for VF */
2195                 if (!be_physfn(adapter))
2196                         return status;
2197         }
2198
2199         /* INTx */
2200         netdev->irq = adapter->pdev->irq;
2201         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2202                         adapter);
2203         if (status) {
2204                 dev_err(&adapter->pdev->dev,
2205                         "INTx request IRQ failed - err %d\n", status);
2206                 return status;
2207         }
2208 done:
2209         adapter->isr_registered = true;
2210         return 0;
2211 }
2212
2213 static void be_irq_unregister(struct be_adapter *adapter)
2214 {
2215         struct net_device *netdev = adapter->netdev;
2216         struct be_rx_obj *rxo;
2217         int i;
2218
2219         if (!adapter->isr_registered)
2220                 return;
2221
2222         /* INTx */
2223         if (!msix_enabled(adapter)) {
2224                 free_irq(netdev->irq, adapter);
2225                 goto done;
2226         }
2227
2228         /* MSIx */
2229         be_free_irq(adapter, &adapter->tx_eq, adapter);
2230
2231         for_all_rx_queues(adapter, rxo, i)
2232                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2233
2234 done:
2235         adapter->isr_registered = false;
2236 }
2237
2238 static void be_rx_queues_clear(struct be_adapter *adapter)
2239 {
2240         struct be_queue_info *q;
2241         struct be_rx_obj *rxo;
2242         int i;
2243
2244         for_all_rx_queues(adapter, rxo, i) {
2245                 q = &rxo->q;
2246                 if (q->created) {
2247                         be_cmd_rxq_destroy(adapter, q);
2248                         /* After the rxq is invalidated, wait for a grace time
2249                          * of 1ms for all dma to end and the flush compl to
2250                          * arrive
2251                          */
2252                         mdelay(1);
2253                         be_rx_q_clean(adapter, rxo);
2254                 }
2255
2256                 /* Clear any residual events */
2257                 q = &rxo->rx_eq.q;
2258                 if (q->created)
2259                         be_eq_clean(adapter, &rxo->rx_eq);
2260         }
2261 }
2262
2263 static int be_close(struct net_device *netdev)
2264 {
2265         struct be_adapter *adapter = netdev_priv(netdev);
2266         struct be_rx_obj *rxo;
2267         struct be_tx_obj *txo;
2268         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2269         int vec, i;
2270
2271         be_async_mcc_disable(adapter);
2272
2273         if (!lancer_chip(adapter))
2274                 be_intr_set(adapter, false);
2275
2276         for_all_rx_queues(adapter, rxo, i)
2277                 napi_disable(&rxo->rx_eq.napi);
2278
2279         napi_disable(&tx_eq->napi);
2280
2281         if (lancer_chip(adapter)) {
2282                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2283                 for_all_rx_queues(adapter, rxo, i)
2284                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2285                 for_all_tx_queues(adapter, txo, i)
2286                          be_cq_notify(adapter, txo->cq.id, false, 0);
2287         }
2288
2289         if (msix_enabled(adapter)) {
2290                 vec = be_msix_vec_get(adapter, tx_eq);
2291                 synchronize_irq(vec);
2292
2293                 for_all_rx_queues(adapter, rxo, i) {
2294                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2295                         synchronize_irq(vec);
2296                 }
2297         } else {
2298                 synchronize_irq(netdev->irq);
2299         }
2300         be_irq_unregister(adapter);
2301
2302         /* Wait for all pending tx completions to arrive so that
2303          * all tx skbs are freed.
2304          */
2305         for_all_tx_queues(adapter, txo, i)
2306                 be_tx_compl_clean(adapter, txo);
2307
2308         be_rx_queues_clear(adapter);
2309         return 0;
2310 }
2311
2312 static int be_rx_queues_setup(struct be_adapter *adapter)
2313 {
2314         struct be_rx_obj *rxo;
2315         int rc, i, j;
2316         u8 rsstable[128];
2317
2318         for_all_rx_queues(adapter, rxo, i) {
2319                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2320                         rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2321                         adapter->if_handle,
2322                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2323                 if (rc)
2324                         return rc;
2325         }
2326
2327         if (be_multi_rxq(adapter)) {
2328                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2329                         for_all_rss_queues(adapter, rxo, i) {
2330                                 if ((j + i) >= 128)
2331                                         break;
2332                                 rsstable[j + i] = rxo->rss_id;
2333                         }
2334                 }
2335                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2336
2337                 if (rc)
2338                         return rc;
2339         }
2340
2341         /* First time posting */
2342         for_all_rx_queues(adapter, rxo, i) {
2343                 be_post_rx_frags(rxo, GFP_KERNEL);
2344                 napi_enable(&rxo->rx_eq.napi);
2345         }
2346         return 0;
2347 }
2348
2349 static int be_open(struct net_device *netdev)
2350 {
2351         struct be_adapter *adapter = netdev_priv(netdev);
2352         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2353         struct be_rx_obj *rxo;
2354         int status, i;
2355
2356         status = be_rx_queues_setup(adapter);
2357         if (status)
2358                 goto err;
2359
2360         napi_enable(&tx_eq->napi);
2361
2362         be_irq_register(adapter);
2363
2364         if (!lancer_chip(adapter))
2365                 be_intr_set(adapter, true);
2366
2367         /* The evt queues are created in unarmed state; arm them */
2368         for_all_rx_queues(adapter, rxo, i) {
2369                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2370                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2371         }
2372         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2373
2374         /* Now that interrupts are on we can process async mcc */
2375         be_async_mcc_enable(adapter);
2376
2377         return 0;
2378 err:
2379         be_close(adapter->netdev);
2380         return -EIO;
2381 }
2382
2383 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2384 {
2385         struct be_dma_mem cmd;
2386         int status = 0;
2387         u8 mac[ETH_ALEN];
2388
2389         memset(mac, 0, ETH_ALEN);
2390
2391         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2392         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2393                                     GFP_KERNEL);
2394         if (cmd.va == NULL)
2395                 return -1;
2396         memset(cmd.va, 0, cmd.size);
2397
2398         if (enable) {
2399                 status = pci_write_config_dword(adapter->pdev,
2400                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2401                 if (status) {
2402                         dev_err(&adapter->pdev->dev,
2403                                 "Could not enable Wake-on-lan\n");
2404                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2405                                           cmd.dma);
2406                         return status;
2407                 }
2408                 status = be_cmd_enable_magic_wol(adapter,
2409                                 adapter->netdev->dev_addr, &cmd);
2410                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2411                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2412         } else {
2413                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2414                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2415                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2416         }
2417
2418         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2419         return status;
2420 }
2421
2422 /*
2423  * Generate a seed MAC address from the PF MAC Address using jhash.
2424  * MAC Address for VFs are assigned incrementally starting from the seed.
2425  * These addresses are programmed in the ASIC by the PF and the VF driver
2426  * queries for the MAC address during its probe.
2427  */
2428 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2429 {
2430         u32 vf;
2431         int status = 0;
2432         u8 mac[ETH_ALEN];
2433
2434         be_vf_eth_addr_generate(adapter, mac);
2435
2436         for (vf = 0; vf < num_vfs; vf++) {
2437                 if (lancer_chip(adapter)) {
2438                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2439                 } else {
2440                         status = be_cmd_pmac_add(adapter, mac,
2441                                         adapter->vf_cfg[vf].vf_if_handle,
2442                                         &adapter->vf_cfg[vf].vf_pmac_id,
2443                                         vf + 1);
2444                 }
2445
2446                 if (status)
2447                         dev_err(&adapter->pdev->dev,
2448                         "Mac address assignment failed for VF %d\n", vf);
2449                 else
2450                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2451
2452                 mac[5] += 1;
2453         }
2454         return status;
2455 }
2456
2457 static void be_vf_clear(struct be_adapter *adapter)
2458 {
2459         u32 vf;
2460
2461         for (vf = 0; vf < num_vfs; vf++) {
2462                 if (lancer_chip(adapter))
2463                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2464                 else
2465                         be_cmd_pmac_del(adapter,
2466                                         adapter->vf_cfg[vf].vf_if_handle,
2467                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2468         }
2469
2470         for (vf = 0; vf < num_vfs; vf++)
2471                 be_cmd_if_destroy(adapter, adapter->vf_cfg[vf].vf_if_handle,
2472                                 vf + 1);
2473 }
2474
2475 static int be_clear(struct be_adapter *adapter)
2476 {
2477         if (be_physfn(adapter) && adapter->sriov_enabled)
2478                 be_vf_clear(adapter);
2479
2480         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2481
2482         be_mcc_queues_destroy(adapter);
2483         be_rx_queues_destroy(adapter);
2484         be_tx_queues_destroy(adapter);
2485
2486         /* tell fw we're done with firing cmds */
2487         be_cmd_fw_clean(adapter);
2488         return 0;
2489 }
2490
2491 static void be_vf_setup_init(struct be_adapter *adapter)
2492 {
2493         int vf;
2494
2495         for (vf = 0; vf < num_vfs; vf++) {
2496                 adapter->vf_cfg[vf].vf_if_handle = -1;
2497                 adapter->vf_cfg[vf].vf_pmac_id = -1;
2498         }
2499 }
2500
2501 static int be_vf_setup(struct be_adapter *adapter)
2502 {
2503         u32 cap_flags, en_flags, vf;
2504         u16 lnk_speed;
2505         int status;
2506
2507         be_vf_setup_init(adapter);
2508
2509         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2510                                 BE_IF_FLAGS_MULTICAST;
2511
2512         for (vf = 0; vf < num_vfs; vf++) {
2513                 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2514                                         &adapter->vf_cfg[vf].vf_if_handle,
2515                                         NULL, vf+1);
2516                 if (status)
2517                         goto err;
2518         }
2519
2520         status = be_vf_eth_addr_config(adapter);
2521         if (status)
2522                 goto err;
2523
2524         for (vf = 0; vf < num_vfs; vf++) {
2525                 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2526                                 vf + 1);
2527                 if (status)
2528                         goto err;
2529                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2530         }
2531         return 0;
2532 err:
2533         return status;
2534 }
2535
2536 static void be_setup_init(struct be_adapter *adapter)
2537 {
2538         adapter->vlan_prio_bmap = 0xff;
2539         adapter->link_speed = -1;
2540         adapter->if_handle = -1;
2541         adapter->be3_native = false;
2542         adapter->promiscuous = false;
2543         adapter->eq_next_idx = 0;
2544 }
2545
2546 static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2547 {
2548         u32 pmac_id;
2549         int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2550         if (status != 0)
2551                 goto do_none;
2552         status = be_cmd_mac_addr_query(adapter, mac,
2553                         MAC_ADDRESS_TYPE_NETWORK,
2554                         false, adapter->if_handle, pmac_id);
2555         if (status != 0)
2556                 goto do_none;
2557         status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2558                         &adapter->pmac_id, 0);
2559 do_none:
2560         return status;
2561 }
2562
2563 static int be_setup(struct be_adapter *adapter)
2564 {
2565         struct net_device *netdev = adapter->netdev;
2566         u32 cap_flags, en_flags;
2567         u32 tx_fc, rx_fc;
2568         int status, i;
2569         u8 mac[ETH_ALEN];
2570         struct be_tx_obj *txo;
2571
2572         be_setup_init(adapter);
2573
2574         be_cmd_req_native_mode(adapter);
2575
2576         status = be_tx_queues_create(adapter);
2577         if (status != 0)
2578                 goto err;
2579
2580         status = be_rx_queues_create(adapter);
2581         if (status != 0)
2582                 goto err;
2583
2584         status = be_mcc_queues_create(adapter);
2585         if (status != 0)
2586                 goto err;
2587
2588         memset(mac, 0, ETH_ALEN);
2589         status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2590                         true /*permanent */, 0, 0);
2591         if (status)
2592                 return status;
2593         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2594         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2595
2596         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2597                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2598         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2599                         BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2600
2601         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2602                 cap_flags |= BE_IF_FLAGS_RSS;
2603                 en_flags |= BE_IF_FLAGS_RSS;
2604         }
2605         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2606                         netdev->dev_addr, &adapter->if_handle,
2607                         &adapter->pmac_id, 0);
2608         if (status != 0)
2609                 goto err;
2610
2611          for_all_tx_queues(adapter, txo, i) {
2612                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2613                 if (status)
2614                         goto err;
2615         }
2616
2617          /* The VF's permanent mac queried from card is incorrect.
2618           * For BEx: Query the mac configued by the PF using if_handle
2619           * For Lancer: Get and use mac_list to obtain mac address.
2620           */
2621         if (!be_physfn(adapter)) {
2622                 if (lancer_chip(adapter))
2623                         status = be_configure_mac_from_list(adapter, mac);
2624                 else
2625                         status = be_cmd_mac_addr_query(adapter, mac,
2626                                         MAC_ADDRESS_TYPE_NETWORK, false,
2627                                         adapter->if_handle, 0);
2628                 if (!status) {
2629                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2630                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2631                 }
2632         }
2633
2634         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2635
2636         status = be_vid_config(adapter, false, 0);
2637         if (status)
2638                 goto err;
2639
2640         be_set_rx_mode(adapter->netdev);
2641
2642         status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2643         /* For Lancer: It is legal for this cmd to fail on VF */
2644         if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2645                 goto err;
2646
2647         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2648                 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2649                                         adapter->rx_fc);
2650                 /* For Lancer: It is legal for this cmd to fail on VF */
2651                 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2652                         goto err;
2653         }
2654
2655         pcie_set_readrq(adapter->pdev, 4096);
2656
2657         if (be_physfn(adapter) && adapter->sriov_enabled) {
2658                 status = be_vf_setup(adapter);
2659                 if (status)
2660                         goto err;
2661         }
2662
2663         return 0;
2664 err:
2665         be_clear(adapter);
2666         return status;
2667 }
2668
2669 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2670 static bool be_flash_redboot(struct be_adapter *adapter,
2671                         const u8 *p, u32 img_start, int image_size,
2672                         int hdr_size)
2673 {
2674         u32 crc_offset;
2675         u8 flashed_crc[4];
2676         int status;
2677
2678         crc_offset = hdr_size + img_start + image_size - 4;
2679
2680         p += crc_offset;
2681
2682         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2683                         (image_size - 4));
2684         if (status) {
2685                 dev_err(&adapter->pdev->dev,
2686                 "could not get crc from flash, not flashing redboot\n");
2687                 return false;
2688         }
2689
2690         /*update redboot only if crc does not match*/
2691         if (!memcmp(flashed_crc, p, 4))
2692                 return false;
2693         else
2694                 return true;
2695 }
2696
2697 static bool phy_flashing_required(struct be_adapter *adapter)
2698 {
2699         int status = 0;
2700         struct be_phy_info phy_info;
2701
2702         status = be_cmd_get_phy_info(adapter, &phy_info);
2703         if (status)
2704                 return false;
2705         if ((phy_info.phy_type == TN_8022) &&
2706                 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2707                 return true;
2708         }
2709         return false;
2710 }
2711
2712 static int be_flash_data(struct be_adapter *adapter,
2713                         const struct firmware *fw,
2714                         struct be_dma_mem *flash_cmd, int num_of_images)
2715
2716 {
2717         int status = 0, i, filehdr_size = 0;
2718         u32 total_bytes = 0, flash_op;
2719         int num_bytes;
2720         const u8 *p = fw->data;
2721         struct be_cmd_write_flashrom *req = flash_cmd->va;
2722         const struct flash_comp *pflashcomp;
2723         int num_comp;
2724
2725         static const struct flash_comp gen3_flash_types[10] = {
2726                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2727                         FLASH_IMAGE_MAX_SIZE_g3},
2728                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2729                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2730                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2731                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2732                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2733                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2734                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2735                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2736                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2737                         FLASH_IMAGE_MAX_SIZE_g3},
2738                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2739                         FLASH_IMAGE_MAX_SIZE_g3},
2740                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2741                         FLASH_IMAGE_MAX_SIZE_g3},
2742                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2743                         FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2744                 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2745                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2746         };
2747         static const struct flash_comp gen2_flash_types[8] = {
2748                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2749                         FLASH_IMAGE_MAX_SIZE_g2},
2750                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2751                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2752                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2753                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2754                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2755                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2756                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2757                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2758                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2759                         FLASH_IMAGE_MAX_SIZE_g2},
2760                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2761                         FLASH_IMAGE_MAX_SIZE_g2},
2762                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2763                          FLASH_IMAGE_MAX_SIZE_g2}
2764         };
2765
2766         if (adapter->generation == BE_GEN3) {
2767                 pflashcomp = gen3_flash_types;
2768                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2769                 num_comp = ARRAY_SIZE(gen3_flash_types);
2770         } else {
2771                 pflashcomp = gen2_flash_types;
2772                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2773                 num_comp = ARRAY_SIZE(gen2_flash_types);
2774         }
2775         for (i = 0; i < num_comp; i++) {
2776                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2777                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2778                         continue;
2779                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2780                         if (!phy_flashing_required(adapter))
2781                                 continue;
2782                 }
2783                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2784                         (!be_flash_redboot(adapter, fw->data,
2785                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2786                         (num_of_images * sizeof(struct image_hdr)))))
2787                         continue;
2788                 p = fw->data;
2789                 p += filehdr_size + pflashcomp[i].offset
2790                         + (num_of_images * sizeof(struct image_hdr));
2791                 if (p + pflashcomp[i].size > fw->data + fw->size)
2792                         return -1;
2793                 total_bytes = pflashcomp[i].size;
2794                 while (total_bytes) {
2795                         if (total_bytes > 32*1024)
2796                                 num_bytes = 32*1024;
2797                         else
2798                                 num_bytes = total_bytes;
2799                         total_bytes -= num_bytes;
2800                         if (!total_bytes) {
2801                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2802                                         flash_op = FLASHROM_OPER_PHY_FLASH;
2803                                 else
2804                                         flash_op = FLASHROM_OPER_FLASH;
2805                         } else {
2806                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2807                                         flash_op = FLASHROM_OPER_PHY_SAVE;
2808                                 else
2809                                         flash_op = FLASHROM_OPER_SAVE;
2810                         }
2811                         memcpy(req->params.data_buf, p, num_bytes);
2812                         p += num_bytes;
2813                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2814                                 pflashcomp[i].optype, flash_op, num_bytes);
2815                         if (status) {
2816                                 if ((status == ILLEGAL_IOCTL_REQ) &&
2817                                         (pflashcomp[i].optype ==
2818                                                 IMG_TYPE_PHY_FW))
2819                                         break;
2820                                 dev_err(&adapter->pdev->dev,
2821                                         "cmd to write to flash rom failed.\n");
2822                                 return -1;
2823                         }
2824                 }
2825         }
2826         return 0;
2827 }
2828
2829 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2830 {
2831         if (fhdr == NULL)
2832                 return 0;
2833         if (fhdr->build[0] == '3')
2834                 return BE_GEN3;
2835         else if (fhdr->build[0] == '2')
2836                 return BE_GEN2;
2837         else
2838                 return 0;
2839 }
2840
2841 static int lancer_fw_download(struct be_adapter *adapter,
2842                                 const struct firmware *fw)
2843 {
2844 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2845 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2846         struct be_dma_mem flash_cmd;
2847         const u8 *data_ptr = NULL;
2848         u8 *dest_image_ptr = NULL;
2849         size_t image_size = 0;
2850         u32 chunk_size = 0;
2851         u32 data_written = 0;
2852         u32 offset = 0;
2853         int status = 0;
2854         u8 add_status = 0;
2855
2856         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2857                 dev_err(&adapter->pdev->dev,
2858                         "FW Image not properly aligned. "
2859                         "Length must be 4 byte aligned.\n");
2860                 status = -EINVAL;
2861                 goto lancer_fw_exit;
2862         }
2863
2864         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2865                                 + LANCER_FW_DOWNLOAD_CHUNK;
2866         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2867                                                 &flash_cmd.dma, GFP_KERNEL);
2868         if (!flash_cmd.va) {
2869                 status = -ENOMEM;
2870                 dev_err(&adapter->pdev->dev,
2871                         "Memory allocation failure while flashing\n");
2872                 goto lancer_fw_exit;
2873         }
2874
2875         dest_image_ptr = flash_cmd.va +
2876                                 sizeof(struct lancer_cmd_req_write_object);
2877         image_size = fw->size;
2878         data_ptr = fw->data;
2879
2880         while (image_size) {
2881                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2882
2883                 /* Copy the image chunk content. */
2884                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2885
2886                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2887                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2888                                 &data_written, &add_status);
2889
2890                 if (status)
2891                         break;
2892
2893                 offset += data_written;
2894                 data_ptr += data_written;
2895                 image_size -= data_written;
2896         }
2897
2898         if (!status) {
2899                 /* Commit the FW written */
2900                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2901                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2902                                         &data_written, &add_status);
2903         }
2904
2905         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2906                                 flash_cmd.dma);
2907         if (status) {
2908                 dev_err(&adapter->pdev->dev,
2909                         "Firmware load error. "
2910                         "Status code: 0x%x Additional Status: 0x%x\n",
2911                         status, add_status);
2912                 goto lancer_fw_exit;
2913         }
2914
2915         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2916 lancer_fw_exit:
2917         return status;
2918 }
2919
2920 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2921 {
2922         struct flash_file_hdr_g2 *fhdr;
2923         struct flash_file_hdr_g3 *fhdr3;
2924         struct image_hdr *img_hdr_ptr = NULL;
2925         struct be_dma_mem flash_cmd;
2926         const u8 *p;
2927         int status = 0, i = 0, num_imgs = 0;
2928
2929         p = fw->data;
2930         fhdr = (struct flash_file_hdr_g2 *) p;
2931
2932         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2933         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2934                                           &flash_cmd.dma, GFP_KERNEL);
2935         if (!flash_cmd.va) {
2936                 status = -ENOMEM;
2937                 dev_err(&adapter->pdev->dev,
2938                         "Memory allocation failure while flashing\n");
2939                 goto be_fw_exit;
2940         }
2941
2942         if ((adapter->generation == BE_GEN3) &&
2943                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2944                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2945                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2946                 for (i = 0; i < num_imgs; i++) {
2947                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2948                                         (sizeof(struct flash_file_hdr_g3) +
2949                                          i * sizeof(struct image_hdr)));
2950                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2951                                 status = be_flash_data(adapter, fw, &flash_cmd,
2952                                                         num_imgs);
2953                 }
2954         } else if ((adapter->generation == BE_GEN2) &&
2955                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2956                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2957         } else {
2958                 dev_err(&adapter->pdev->dev,
2959                         "UFI and Interface are not compatible for flashing\n");
2960                 status = -1;
2961         }
2962
2963         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2964                           flash_cmd.dma);
2965         if (status) {
2966                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2967                 goto be_fw_exit;
2968         }
2969
2970         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2971
2972 be_fw_exit:
2973         return status;
2974 }
2975
2976 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2977 {
2978         const struct firmware *fw;
2979         int status;
2980
2981         if (!netif_running(adapter->netdev)) {
2982                 dev_err(&adapter->pdev->dev,
2983                         "Firmware load not allowed (interface is down)\n");
2984                 return -1;
2985         }
2986
2987         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2988         if (status)
2989                 goto fw_exit;
2990
2991         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2992
2993         if (lancer_chip(adapter))
2994                 status = lancer_fw_download(adapter, fw);
2995         else
2996                 status = be_fw_download(adapter, fw);
2997
2998 fw_exit:
2999         release_firmware(fw);
3000         return status;
3001 }
3002
3003 static struct net_device_ops be_netdev_ops = {
3004         .ndo_open               = be_open,
3005         .ndo_stop               = be_close,
3006         .ndo_start_xmit         = be_xmit,
3007         .ndo_set_rx_mode        = be_set_rx_mode,
3008         .ndo_set_mac_address    = be_mac_addr_set,
3009         .ndo_change_mtu         = be_change_mtu,
3010         .ndo_get_stats64        = be_get_stats64,
3011         .ndo_validate_addr      = eth_validate_addr,
3012         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3013         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3014         .ndo_set_vf_mac         = be_set_vf_mac,
3015         .ndo_set_vf_vlan        = be_set_vf_vlan,
3016         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3017         .ndo_get_vf_config      = be_get_vf_config
3018 };
3019
3020 static void be_netdev_init(struct net_device *netdev)
3021 {
3022         struct be_adapter *adapter = netdev_priv(netdev);
3023         struct be_rx_obj *rxo;
3024         int i;
3025
3026         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3027                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3028                 NETIF_F_HW_VLAN_TX;
3029         if (be_multi_rxq(adapter))
3030                 netdev->hw_features |= NETIF_F_RXHASH;
3031
3032         netdev->features |= netdev->hw_features |
3033                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3034
3035         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3036                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3037
3038         netdev->flags |= IFF_MULTICAST;
3039
3040         netif_set_gso_max_size(netdev, 65535);
3041
3042         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3043
3044         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3045
3046         for_all_rx_queues(adapter, rxo, i)
3047                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3048                                 BE_NAPI_WEIGHT);
3049
3050         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
3051                 BE_NAPI_WEIGHT);
3052 }
3053
3054 static void be_unmap_pci_bars(struct be_adapter *adapter)
3055 {
3056         if (adapter->csr)
3057                 iounmap(adapter->csr);
3058         if (adapter->db)
3059                 iounmap(adapter->db);
3060 }
3061
3062 static int be_map_pci_bars(struct be_adapter *adapter)
3063 {
3064         u8 __iomem *addr;
3065         int db_reg;
3066
3067         if (lancer_chip(adapter)) {
3068                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3069                         pci_resource_len(adapter->pdev, 0));
3070                 if (addr == NULL)
3071                         return -ENOMEM;
3072                 adapter->db = addr;
3073                 return 0;
3074         }
3075
3076         if (be_physfn(adapter)) {
3077                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3078                                 pci_resource_len(adapter->pdev, 2));
3079                 if (addr == NULL)
3080                         return -ENOMEM;
3081                 adapter->csr = addr;
3082         }
3083
3084         if (adapter->generation == BE_GEN2) {
3085                 db_reg = 4;
3086         } else {
3087                 if (be_physfn(adapter))
3088                         db_reg = 4;
3089                 else
3090                         db_reg = 0;
3091         }
3092         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3093                                 pci_resource_len(adapter->pdev, db_reg));
3094         if (addr == NULL)
3095                 goto pci_map_err;
3096         adapter->db = addr;
3097
3098         return 0;
3099 pci_map_err:
3100         be_unmap_pci_bars(adapter);
3101         return -ENOMEM;
3102 }
3103
3104
3105 static void be_ctrl_cleanup(struct be_adapter *adapter)
3106 {
3107         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3108
3109         be_unmap_pci_bars(adapter);
3110
3111         if (mem->va)
3112                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3113                                   mem->dma);
3114
3115         mem = &adapter->rx_filter;
3116         if (mem->va)
3117                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3118                                   mem->dma);
3119 }
3120
3121 static int be_ctrl_init(struct be_adapter *adapter)
3122 {
3123         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3124         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3125         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3126         int status;
3127
3128         status = be_map_pci_bars(adapter);
3129         if (status)
3130                 goto done;
3131
3132         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3133         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3134                                                 mbox_mem_alloc->size,
3135                                                 &mbox_mem_alloc->dma,
3136                                                 GFP_KERNEL);
3137         if (!mbox_mem_alloc->va) {
3138                 status = -ENOMEM;
3139                 goto unmap_pci_bars;
3140         }
3141         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3142         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3143         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3144         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3145
3146         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3147         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3148                                         &rx_filter->dma, GFP_KERNEL);
3149         if (rx_filter->va == NULL) {
3150                 status = -ENOMEM;
3151                 goto free_mbox;
3152         }
3153         memset(rx_filter->va, 0, rx_filter->size);
3154
3155         mutex_init(&adapter->mbox_lock);
3156         spin_lock_init(&adapter->mcc_lock);
3157         spin_lock_init(&adapter->mcc_cq_lock);
3158
3159         init_completion(&adapter->flash_compl);
3160         pci_save_state(adapter->pdev);
3161         return 0;
3162
3163 free_mbox:
3164         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3165                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3166
3167 unmap_pci_bars:
3168         be_unmap_pci_bars(adapter);
3169
3170 done:
3171         return status;
3172 }
3173
3174 static void be_stats_cleanup(struct be_adapter *adapter)
3175 {
3176         struct be_dma_mem *cmd = &adapter->stats_cmd;
3177
3178         if (cmd->va)
3179                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3180                                   cmd->va, cmd->dma);
3181 }
3182
3183 static int be_stats_init(struct be_adapter *adapter)
3184 {
3185         struct be_dma_mem *cmd = &adapter->stats_cmd;
3186
3187         if (adapter->generation == BE_GEN2) {
3188                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3189         } else {
3190                 if (lancer_chip(adapter))
3191                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3192                 else
3193                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3194         }
3195         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3196                                      GFP_KERNEL);
3197         if (cmd->va == NULL)
3198                 return -1;
3199         memset(cmd->va, 0, cmd->size);
3200         return 0;
3201 }
3202
3203 static void __devexit be_remove(struct pci_dev *pdev)
3204 {
3205         struct be_adapter *adapter = pci_get_drvdata(pdev);
3206
3207         if (!adapter)
3208                 return;
3209
3210         cancel_delayed_work_sync(&adapter->work);
3211
3212         unregister_netdev(adapter->netdev);
3213
3214         be_clear(adapter);
3215
3216         be_stats_cleanup(adapter);
3217
3218         be_ctrl_cleanup(adapter);
3219
3220         be_sriov_disable(adapter);
3221
3222         be_msix_disable(adapter);
3223
3224         pci_set_drvdata(pdev, NULL);
3225         pci_release_regions(pdev);
3226         pci_disable_device(pdev);
3227
3228         free_netdev(adapter->netdev);
3229 }
3230
3231 static int be_get_config(struct be_adapter *adapter)
3232 {
3233         int status;
3234
3235         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3236                         &adapter->function_mode, &adapter->function_caps);
3237         if (status)
3238                 return status;
3239
3240         if (adapter->function_mode & FLEX10_MODE)
3241                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3242         else
3243                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3244
3245         status = be_cmd_get_cntl_attributes(adapter);
3246         if (status)
3247                 return status;
3248
3249         return 0;
3250 }
3251
3252 static int be_dev_family_check(struct be_adapter *adapter)
3253 {
3254         struct pci_dev *pdev = adapter->pdev;
3255         u32 sli_intf = 0, if_type;
3256
3257         switch (pdev->device) {
3258         case BE_DEVICE_ID1:
3259         case OC_DEVICE_ID1:
3260                 adapter->generation = BE_GEN2;
3261                 break;
3262         case BE_DEVICE_ID2:
3263         case OC_DEVICE_ID2:
3264                 adapter->generation = BE_GEN3;
3265                 break;
3266         case OC_DEVICE_ID3:
3267         case OC_DEVICE_ID4:
3268                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3269                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3270                                                 SLI_INTF_IF_TYPE_SHIFT;
3271
3272                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3273                         if_type != 0x02) {
3274                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3275                         return -EINVAL;
3276                 }
3277                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3278                                          SLI_INTF_FAMILY_SHIFT);
3279                 adapter->generation = BE_GEN3;
3280                 break;
3281         default:
3282                 adapter->generation = 0;
3283         }
3284         return 0;
3285 }
3286
3287 static int lancer_wait_ready(struct be_adapter *adapter)
3288 {
3289 #define SLIPORT_READY_TIMEOUT 30
3290         u32 sliport_status;
3291         int status = 0, i;
3292
3293         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3294                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3295                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3296                         break;
3297
3298                 msleep(1000);
3299         }
3300
3301         if (i == SLIPORT_READY_TIMEOUT)
3302                 status = -1;
3303
3304         return status;
3305 }
3306
3307 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3308 {
3309         int status;
3310         u32 sliport_status, err, reset_needed;
3311         status = lancer_wait_ready(adapter);
3312         if (!status) {
3313                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3314                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3315                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3316                 if (err && reset_needed) {
3317                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3318                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3319
3320                         /* check adapter has corrected the error */
3321                         status = lancer_wait_ready(adapter);
3322                         sliport_status = ioread32(adapter->db +
3323                                                         SLIPORT_STATUS_OFFSET);
3324                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3325                                                 SLIPORT_STATUS_RN_MASK);
3326                         if (status || sliport_status)
3327                                 status = -1;
3328                 } else if (err || reset_needed) {
3329                         status = -1;
3330                 }
3331         }
3332         return status;
3333 }
3334
3335 static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3336 {
3337         int status;
3338         u32 sliport_status;
3339
3340         if (adapter->eeh_err || adapter->ue_detected)
3341                 return;
3342
3343         sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3344
3345         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3346                 dev_err(&adapter->pdev->dev,
3347                                 "Adapter in error state."
3348                                 "Trying to recover.\n");
3349
3350                 status = lancer_test_and_set_rdy_state(adapter);
3351                 if (status)
3352                         goto err;
3353
3354                 netif_device_detach(adapter->netdev);
3355
3356                 if (netif_running(adapter->netdev))
3357                         be_close(adapter->netdev);
3358
3359                 be_clear(adapter);
3360
3361                 adapter->fw_timeout = false;
3362
3363                 status = be_setup(adapter);
3364                 if (status)
3365                         goto err;
3366
3367                 if (netif_running(adapter->netdev)) {
3368                         status = be_open(adapter->netdev);
3369                         if (status)
3370                                 goto err;
3371                 }
3372
3373                 netif_device_attach(adapter->netdev);
3374
3375                 dev_err(&adapter->pdev->dev,
3376                                 "Adapter error recovery succeeded\n");
3377         }
3378         return;
3379 err:
3380         dev_err(&adapter->pdev->dev,
3381                         "Adapter error recovery failed\n");
3382 }
3383
3384 static void be_worker(struct work_struct *work)
3385 {
3386         struct be_adapter *adapter =
3387                 container_of(work, struct be_adapter, work.work);
3388         struct be_rx_obj *rxo;
3389         int i;
3390
3391         if (lancer_chip(adapter))
3392                 lancer_test_and_recover_fn_err(adapter);
3393
3394         be_detect_dump_ue(adapter);
3395
3396         /* when interrupts are not yet enabled, just reap any pending
3397         * mcc completions */
3398         if (!netif_running(adapter->netdev)) {
3399                 int mcc_compl, status = 0;
3400
3401                 mcc_compl = be_process_mcc(adapter, &status);
3402
3403                 if (mcc_compl) {
3404                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3405                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3406                 }
3407
3408                 goto reschedule;
3409         }
3410
3411         if (!adapter->stats_cmd_sent) {
3412                 if (lancer_chip(adapter))
3413                         lancer_cmd_get_pport_stats(adapter,
3414                                                 &adapter->stats_cmd);
3415                 else
3416                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
3417         }
3418
3419         for_all_rx_queues(adapter, rxo, i) {
3420                 be_rx_eqd_update(adapter, rxo);
3421
3422                 if (rxo->rx_post_starved) {
3423                         rxo->rx_post_starved = false;
3424                         be_post_rx_frags(rxo, GFP_KERNEL);
3425                 }
3426         }
3427
3428 reschedule:
3429         adapter->work_counter++;
3430         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3431 }
3432
3433 static int __devinit be_probe(struct pci_dev *pdev,
3434                         const struct pci_device_id *pdev_id)
3435 {
3436         int status = 0;
3437         struct be_adapter *adapter;
3438         struct net_device *netdev;
3439
3440         status = pci_enable_device(pdev);
3441         if (status)
3442                 goto do_none;
3443
3444         status = pci_request_regions(pdev, DRV_NAME);
3445         if (status)
3446                 goto disable_dev;
3447         pci_set_master(pdev);
3448
3449         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3450         if (netdev == NULL) {
3451                 status = -ENOMEM;
3452                 goto rel_reg;
3453         }
3454         adapter = netdev_priv(netdev);
3455         adapter->pdev = pdev;
3456         pci_set_drvdata(pdev, adapter);
3457
3458         status = be_dev_family_check(adapter);
3459         if (status)
3460                 goto free_netdev;
3461
3462         adapter->netdev = netdev;
3463         SET_NETDEV_DEV(netdev, &pdev->dev);
3464
3465         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3466         if (!status) {
3467                 netdev->features |= NETIF_F_HIGHDMA;
3468         } else {
3469                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3470                 if (status) {
3471                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3472                         goto free_netdev;
3473                 }
3474         }
3475
3476         status = be_sriov_enable(adapter);
3477         if (status)
3478                 goto free_netdev;
3479
3480         status = be_ctrl_init(adapter);
3481         if (status)
3482                 goto disable_sriov;
3483
3484         if (lancer_chip(adapter)) {
3485                 status = lancer_wait_ready(adapter);
3486                 if (!status) {
3487                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3488                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3489                         status = lancer_test_and_set_rdy_state(adapter);
3490                 }
3491                 if (status) {
3492                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3493                         goto ctrl_clean;
3494                 }
3495         }
3496
3497         /* sync up with fw's ready state */
3498         if (be_physfn(adapter)) {
3499                 status = be_cmd_POST(adapter);
3500                 if (status)
3501                         goto ctrl_clean;
3502         }
3503
3504         /* tell fw we're ready to fire cmds */
3505         status = be_cmd_fw_init(adapter);
3506         if (status)
3507                 goto ctrl_clean;
3508
3509         status = be_cmd_reset_function(adapter);
3510         if (status)
3511                 goto ctrl_clean;
3512
3513         status = be_stats_init(adapter);
3514         if (status)
3515                 goto ctrl_clean;
3516
3517         status = be_get_config(adapter);
3518         if (status)
3519                 goto stats_clean;
3520
3521         /* The INTR bit may be set in the card when probed by a kdump kernel
3522          * after a crash.
3523          */
3524         if (!lancer_chip(adapter))
3525                 be_intr_set(adapter, false);
3526
3527         be_msix_enable(adapter);
3528
3529         INIT_DELAYED_WORK(&adapter->work, be_worker);
3530         adapter->rx_fc = adapter->tx_fc = true;
3531
3532         status = be_setup(adapter);
3533         if (status)
3534                 goto msix_disable;
3535
3536         be_netdev_init(netdev);
3537         status = register_netdev(netdev);
3538         if (status != 0)
3539                 goto unsetup;
3540
3541         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3542
3543         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3544         return 0;
3545
3546 unsetup:
3547         be_clear(adapter);
3548 msix_disable:
3549         be_msix_disable(adapter);
3550 stats_clean:
3551         be_stats_cleanup(adapter);
3552 ctrl_clean:
3553         be_ctrl_cleanup(adapter);
3554 disable_sriov:
3555         be_sriov_disable(adapter);
3556 free_netdev:
3557         free_netdev(netdev);
3558         pci_set_drvdata(pdev, NULL);
3559 rel_reg:
3560         pci_release_regions(pdev);
3561 disable_dev:
3562         pci_disable_device(pdev);
3563 do_none:
3564         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3565         return status;
3566 }
3567
3568 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3569 {
3570         struct be_adapter *adapter = pci_get_drvdata(pdev);
3571         struct net_device *netdev =  adapter->netdev;
3572
3573         cancel_delayed_work_sync(&adapter->work);
3574         if (adapter->wol)
3575                 be_setup_wol(adapter, true);
3576
3577         netif_device_detach(netdev);
3578         if (netif_running(netdev)) {
3579                 rtnl_lock();
3580                 be_close(netdev);
3581                 rtnl_unlock();
3582         }
3583         be_clear(adapter);
3584
3585         be_msix_disable(adapter);
3586         pci_save_state(pdev);
3587         pci_disable_device(pdev);
3588         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3589         return 0;
3590 }
3591
3592 static int be_resume(struct pci_dev *pdev)
3593 {
3594         int status = 0;
3595         struct be_adapter *adapter = pci_get_drvdata(pdev);
3596         struct net_device *netdev =  adapter->netdev;
3597
3598         netif_device_detach(netdev);
3599
3600         status = pci_enable_device(pdev);
3601         if (status)
3602                 return status;
3603
3604         pci_set_power_state(pdev, 0);
3605         pci_restore_state(pdev);
3606
3607         be_msix_enable(adapter);
3608         /* tell fw we're ready to fire cmds */
3609         status = be_cmd_fw_init(adapter);
3610         if (status)
3611                 return status;
3612
3613         be_setup(adapter);
3614         if (netif_running(netdev)) {
3615                 rtnl_lock();
3616                 be_open(netdev);
3617                 rtnl_unlock();
3618         }
3619         netif_device_attach(netdev);
3620
3621         if (adapter->wol)
3622                 be_setup_wol(adapter, false);
3623
3624         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3625         return 0;
3626 }
3627
3628 /*
3629  * An FLR will stop BE from DMAing any data.
3630  */
3631 static void be_shutdown(struct pci_dev *pdev)
3632 {
3633         struct be_adapter *adapter = pci_get_drvdata(pdev);
3634
3635         if (!adapter)
3636                 return;
3637
3638         cancel_delayed_work_sync(&adapter->work);
3639
3640         netif_device_detach(adapter->netdev);
3641
3642         if (adapter->wol)
3643                 be_setup_wol(adapter, true);
3644
3645         be_cmd_reset_function(adapter);
3646
3647         pci_disable_device(pdev);
3648 }
3649
3650 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3651                                 pci_channel_state_t state)
3652 {
3653         struct be_adapter *adapter = pci_get_drvdata(pdev);
3654         struct net_device *netdev =  adapter->netdev;
3655
3656         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3657
3658         adapter->eeh_err = true;
3659
3660         netif_device_detach(netdev);
3661
3662         if (netif_running(netdev)) {
3663                 rtnl_lock();
3664                 be_close(netdev);
3665                 rtnl_unlock();
3666         }
3667         be_clear(adapter);
3668
3669         if (state == pci_channel_io_perm_failure)
3670                 return PCI_ERS_RESULT_DISCONNECT;
3671
3672         pci_disable_device(pdev);
3673
3674         return PCI_ERS_RESULT_NEED_RESET;
3675 }
3676
3677 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3678 {
3679         struct be_adapter *adapter = pci_get_drvdata(pdev);
3680         int status;
3681
3682         dev_info(&adapter->pdev->dev, "EEH reset\n");
3683         adapter->eeh_err = false;
3684         adapter->ue_detected = false;
3685         adapter->fw_timeout = false;
3686
3687         status = pci_enable_device(pdev);
3688         if (status)
3689                 return PCI_ERS_RESULT_DISCONNECT;
3690
3691         pci_set_master(pdev);
3692         pci_set_power_state(pdev, 0);
3693         pci_restore_state(pdev);
3694
3695         /* Check if card is ok and fw is ready */
3696         status = be_cmd_POST(adapter);
3697         if (status)
3698                 return PCI_ERS_RESULT_DISCONNECT;
3699
3700         return PCI_ERS_RESULT_RECOVERED;
3701 }
3702
3703 static void be_eeh_resume(struct pci_dev *pdev)
3704 {
3705         int status = 0;
3706         struct be_adapter *adapter = pci_get_drvdata(pdev);
3707         struct net_device *netdev =  adapter->netdev;
3708
3709         dev_info(&adapter->pdev->dev, "EEH resume\n");
3710
3711         pci_save_state(pdev);
3712
3713         /* tell fw we're ready to fire cmds */
3714         status = be_cmd_fw_init(adapter);
3715         if (status)
3716                 goto err;
3717
3718         status = be_setup(adapter);
3719         if (status)
3720                 goto err;
3721
3722         if (netif_running(netdev)) {
3723                 status = be_open(netdev);
3724                 if (status)
3725                         goto err;
3726         }
3727         netif_device_attach(netdev);
3728         return;
3729 err:
3730         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3731 }
3732
3733 static struct pci_error_handlers be_eeh_handlers = {
3734         .error_detected = be_eeh_err_detected,
3735         .slot_reset = be_eeh_reset,
3736         .resume = be_eeh_resume,
3737 };
3738
3739 static struct pci_driver be_driver = {
3740         .name = DRV_NAME,
3741         .id_table = be_dev_ids,
3742         .probe = be_probe,
3743         .remove = be_remove,
3744         .suspend = be_suspend,
3745         .resume = be_resume,
3746         .shutdown = be_shutdown,
3747         .err_handler = &be_eeh_handlers
3748 };
3749
3750 static int __init be_init_module(void)
3751 {
3752         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3753             rx_frag_size != 2048) {
3754                 printk(KERN_WARNING DRV_NAME
3755                         " : Module param rx_frag_size must be 2048/4096/8192."
3756                         " Using 2048\n");
3757                 rx_frag_size = 2048;
3758         }
3759
3760         return pci_register_driver(&be_driver);
3761 }
3762 module_init(be_init_module);
3763
3764 static void __exit be_exit_module(void)
3765 {
3766         pci_unregister_driver(&be_driver);
3767 }
3768 module_exit(be_exit_module);