6e10230a2ee00653e6b3a172d4fb124fe1f6e9e8
[cascardo/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2014 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26
27 MODULE_VERSION(DRV_VER);
28 MODULE_DEVICE_TABLE(pci, be_dev_ids);
29 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
32
33 static unsigned int num_vfs;
34 module_param(num_vfs, uint, S_IRUGO);
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36
37 static ushort rx_frag_size = 2048;
38 module_param(rx_frag_size, ushort, S_IRUGO);
39 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
41 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
42         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
44         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
48         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
49         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
50         { 0 }
51 };
52 MODULE_DEVICE_TABLE(pci, be_dev_ids);
53 /* UE Status Low CSR */
54 static const char * const ue_status_low_desc[] = {
55         "CEV",
56         "CTX",
57         "DBUF",
58         "ERX",
59         "Host",
60         "MPU",
61         "NDMA",
62         "PTC ",
63         "RDMA ",
64         "RXF ",
65         "RXIPS ",
66         "RXULP0 ",
67         "RXULP1 ",
68         "RXULP2 ",
69         "TIM ",
70         "TPOST ",
71         "TPRE ",
72         "TXIPS ",
73         "TXULP0 ",
74         "TXULP1 ",
75         "UC ",
76         "WDMA ",
77         "TXULP2 ",
78         "HOST1 ",
79         "P0_OB_LINK ",
80         "P1_OB_LINK ",
81         "HOST_GPIO ",
82         "MBOX ",
83         "AXGMAC0",
84         "AXGMAC1",
85         "JTAG",
86         "MPU_INTPEND"
87 };
88 /* UE Status High CSR */
89 static const char * const ue_status_hi_desc[] = {
90         "LPCMEMHOST",
91         "MGMT_MAC",
92         "PCS0ONLINE",
93         "MPU_IRAM",
94         "PCS1ONLINE",
95         "PCTL0",
96         "PCTL1",
97         "PMEM",
98         "RR",
99         "TXPB",
100         "RXPP",
101         "XAUI",
102         "TXP",
103         "ARM",
104         "IPC",
105         "HOST2",
106         "HOST3",
107         "HOST4",
108         "HOST5",
109         "HOST6",
110         "HOST7",
111         "HOST8",
112         "HOST9",
113         "NETC",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown",
121         "Unknown"
122 };
123
124
125 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126 {
127         struct be_dma_mem *mem = &q->dma_mem;
128         if (mem->va) {
129                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130                                   mem->dma);
131                 mem->va = NULL;
132         }
133 }
134
135 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136                 u16 len, u16 entry_size)
137 {
138         struct be_dma_mem *mem = &q->dma_mem;
139
140         memset(q, 0, sizeof(*q));
141         q->len = len;
142         q->entry_size = entry_size;
143         mem->size = len * entry_size;
144         mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145                                       GFP_KERNEL);
146         if (!mem->va)
147                 return -ENOMEM;
148         return 0;
149 }
150
151 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
152 {
153         u32 reg, enabled;
154
155         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156                                 &reg);
157         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
159         if (!enabled && enable)
160                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
161         else if (enabled && !enable)
162                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163         else
164                 return;
165
166         pci_write_config_dword(adapter->pdev,
167                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
168 }
169
170 static void be_intr_set(struct be_adapter *adapter, bool enable)
171 {
172         int status = 0;
173
174         /* On lancer interrupts can't be controlled via this register */
175         if (lancer_chip(adapter))
176                 return;
177
178         if (adapter->eeh_error)
179                 return;
180
181         status = be_cmd_intr_set(adapter, enable);
182         if (status)
183                 be_reg_intr_set(adapter, enable);
184 }
185
186 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187 {
188         u32 val = 0;
189         val |= qid & DB_RQ_RING_ID_MASK;
190         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
191
192         wmb();
193         iowrite32(val, adapter->db + DB_RQ_OFFSET);
194 }
195
196 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197                           u16 posted)
198 {
199         u32 val = 0;
200         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
201         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
202
203         wmb();
204         iowrite32(val, adapter->db + txo->db_offset);
205 }
206
207 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
208                 bool arm, bool clear_int, u16 num_popped)
209 {
210         u32 val = 0;
211         val |= qid & DB_EQ_RING_ID_MASK;
212         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
214
215         if (adapter->eeh_error)
216                 return;
217
218         if (arm)
219                 val |= 1 << DB_EQ_REARM_SHIFT;
220         if (clear_int)
221                 val |= 1 << DB_EQ_CLR_SHIFT;
222         val |= 1 << DB_EQ_EVNT_SHIFT;
223         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
224         iowrite32(val, adapter->db + DB_EQ_OFFSET);
225 }
226
227 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
228 {
229         u32 val = 0;
230         val |= qid & DB_CQ_RING_ID_MASK;
231         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
233
234         if (adapter->eeh_error)
235                 return;
236
237         if (arm)
238                 val |= 1 << DB_CQ_REARM_SHIFT;
239         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
240         iowrite32(val, adapter->db + DB_CQ_OFFSET);
241 }
242
243 static int be_mac_addr_set(struct net_device *netdev, void *p)
244 {
245         struct be_adapter *adapter = netdev_priv(netdev);
246         struct device *dev = &adapter->pdev->dev;
247         struct sockaddr *addr = p;
248         int status;
249         u8 mac[ETH_ALEN];
250         u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
251
252         if (!is_valid_ether_addr(addr->sa_data))
253                 return -EADDRNOTAVAIL;
254
255         /* Proceed further only if, User provided MAC is different
256          * from active MAC
257          */
258         if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259                 return 0;
260
261         /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262          * privilege or if PF did not provision the new MAC address.
263          * On BE3, this cmd will always fail if the VF doesn't have the
264          * FILTMGMT privilege. This failure is OK, only if the PF programmed
265          * the MAC for the VF.
266          */
267         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268                                  adapter->if_handle, &adapter->pmac_id[0], 0);
269         if (!status) {
270                 curr_pmac_id = adapter->pmac_id[0];
271
272                 /* Delete the old programmed MAC. This call may fail if the
273                  * old MAC was already deleted by the PF driver.
274                  */
275                 if (adapter->pmac_id[0] != old_pmac_id)
276                         be_cmd_pmac_del(adapter, adapter->if_handle,
277                                         old_pmac_id, 0);
278         }
279
280         /* Decide if the new MAC is successfully activated only after
281          * querying the FW
282          */
283         status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284                                        adapter->if_handle, true, 0);
285         if (status)
286                 goto err;
287
288         /* The MAC change did not happen, either due to lack of privilege
289          * or PF didn't pre-provision.
290          */
291         if (!ether_addr_equal(addr->sa_data, mac)) {
292                 status = -EPERM;
293                 goto err;
294         }
295
296         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
297         dev_info(dev, "MAC address changed to %pM\n", mac);
298         return 0;
299 err:
300         dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
301         return status;
302 }
303
304 /* BE2 supports only v0 cmd */
305 static void *hw_stats_from_cmd(struct be_adapter *adapter)
306 {
307         if (BE2_chip(adapter)) {
308                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310                 return &cmd->hw_stats;
311         } else if (BE3_chip(adapter)) {
312                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314                 return &cmd->hw_stats;
315         } else {
316                 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318                 return &cmd->hw_stats;
319         }
320 }
321
322 /* BE2 supports only v0 cmd */
323 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324 {
325         if (BE2_chip(adapter)) {
326                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328                 return &hw_stats->erx;
329         } else if (BE3_chip(adapter)) {
330                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332                 return &hw_stats->erx;
333         } else {
334                 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336                 return &hw_stats->erx;
337         }
338 }
339
340 static void populate_be_v0_stats(struct be_adapter *adapter)
341 {
342         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
345         struct be_port_rxf_stats_v0 *port_stats =
346                                         &rxf_stats->port[adapter->port_num];
347         struct be_drv_stats *drvs = &adapter->drv_stats;
348
349         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
350         drvs->rx_pause_frames = port_stats->rx_pause_frames;
351         drvs->rx_crc_errors = port_stats->rx_crc_errors;
352         drvs->rx_control_frames = port_stats->rx_control_frames;
353         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
364         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
365         drvs->rx_dropped_header_too_small =
366                 port_stats->rx_dropped_header_too_small;
367         drvs->rx_address_filtered =
368                                         port_stats->rx_address_filtered +
369                                         port_stats->rx_vlan_filtered;
370         drvs->rx_alignment_symbol_errors =
371                 port_stats->rx_alignment_symbol_errors;
372
373         drvs->tx_pauseframes = port_stats->tx_pauseframes;
374         drvs->tx_controlframes = port_stats->tx_controlframes;
375
376         if (adapter->port_num)
377                 drvs->jabber_events = rxf_stats->port1_jabber_events;
378         else
379                 drvs->jabber_events = rxf_stats->port0_jabber_events;
380         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
381         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
382         drvs->forwarded_packets = rxf_stats->forwarded_packets;
383         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
384         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
386         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387 }
388
389 static void populate_be_v1_stats(struct be_adapter *adapter)
390 {
391         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
394         struct be_port_rxf_stats_v1 *port_stats =
395                                         &rxf_stats->port[adapter->port_num];
396         struct be_drv_stats *drvs = &adapter->drv_stats;
397
398         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
399         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
401         drvs->rx_pause_frames = port_stats->rx_pause_frames;
402         drvs->rx_crc_errors = port_stats->rx_crc_errors;
403         drvs->rx_control_frames = port_stats->rx_control_frames;
404         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414         drvs->rx_dropped_header_too_small =
415                 port_stats->rx_dropped_header_too_small;
416         drvs->rx_input_fifo_overflow_drop =
417                 port_stats->rx_input_fifo_overflow_drop;
418         drvs->rx_address_filtered = port_stats->rx_address_filtered;
419         drvs->rx_alignment_symbol_errors =
420                 port_stats->rx_alignment_symbol_errors;
421         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
422         drvs->tx_pauseframes = port_stats->tx_pauseframes;
423         drvs->tx_controlframes = port_stats->tx_controlframes;
424         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
425         drvs->jabber_events = port_stats->jabber_events;
426         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
427         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
428         drvs->forwarded_packets = rxf_stats->forwarded_packets;
429         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
430         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
432         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433 }
434
435 static void populate_be_v2_stats(struct be_adapter *adapter)
436 {
437         struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439         struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440         struct be_port_rxf_stats_v2 *port_stats =
441                                         &rxf_stats->port[adapter->port_num];
442         struct be_drv_stats *drvs = &adapter->drv_stats;
443
444         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447         drvs->rx_pause_frames = port_stats->rx_pause_frames;
448         drvs->rx_crc_errors = port_stats->rx_crc_errors;
449         drvs->rx_control_frames = port_stats->rx_control_frames;
450         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460         drvs->rx_dropped_header_too_small =
461                 port_stats->rx_dropped_header_too_small;
462         drvs->rx_input_fifo_overflow_drop =
463                 port_stats->rx_input_fifo_overflow_drop;
464         drvs->rx_address_filtered = port_stats->rx_address_filtered;
465         drvs->rx_alignment_symbol_errors =
466                 port_stats->rx_alignment_symbol_errors;
467         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468         drvs->tx_pauseframes = port_stats->tx_pauseframes;
469         drvs->tx_controlframes = port_stats->tx_controlframes;
470         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471         drvs->jabber_events = port_stats->jabber_events;
472         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474         drvs->forwarded_packets = rxf_stats->forwarded_packets;
475         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
479         if (be_roce_supported(adapter))  {
480                 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481                 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482                 drvs->rx_roce_frames = port_stats->roce_frames_received;
483                 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484                 drvs->roce_drops_payload_len =
485                         port_stats->roce_drops_payload_len;
486         }
487 }
488
489 static void populate_lancer_stats(struct be_adapter *adapter)
490 {
491
492         struct be_drv_stats *drvs = &adapter->drv_stats;
493         struct lancer_pport_stats *pport_stats =
494                                         pport_stats_from_cmd(adapter);
495
496         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
500         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
501         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
502         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506         drvs->rx_dropped_tcp_length =
507                                 pport_stats->rx_dropped_invalid_tcp_length;
508         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511         drvs->rx_dropped_header_too_small =
512                                 pport_stats->rx_dropped_header_too_small;
513         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
514         drvs->rx_address_filtered =
515                                         pport_stats->rx_address_filtered +
516                                         pport_stats->rx_vlan_filtered;
517         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
518         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
519         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
521         drvs->jabber_events = pport_stats->rx_jabbers;
522         drvs->forwarded_packets = pport_stats->num_forwards_lo;
523         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
524         drvs->rx_drops_too_many_frags =
525                                 pport_stats->rx_drops_too_many_frags_lo;
526 }
527
528 static void accumulate_16bit_val(u32 *acc, u16 val)
529 {
530 #define lo(x)                   (x & 0xFFFF)
531 #define hi(x)                   (x & 0xFFFF0000)
532         bool wrapped = val < lo(*acc);
533         u32 newacc = hi(*acc) + val;
534
535         if (wrapped)
536                 newacc += 65536;
537         ACCESS_ONCE(*acc) = newacc;
538 }
539
540 static void populate_erx_stats(struct be_adapter *adapter,
541                         struct be_rx_obj *rxo,
542                         u32 erx_stat)
543 {
544         if (!BEx_chip(adapter))
545                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546         else
547                 /* below erx HW counter can actually wrap around after
548                  * 65535. Driver accumulates a 32-bit value
549                  */
550                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551                                      (u16)erx_stat);
552 }
553
554 void be_parse_stats(struct be_adapter *adapter)
555 {
556         struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
557         struct be_rx_obj *rxo;
558         int i;
559         u32 erx_stat;
560
561         if (lancer_chip(adapter)) {
562                 populate_lancer_stats(adapter);
563         } else {
564                 if (BE2_chip(adapter))
565                         populate_be_v0_stats(adapter);
566                 else if (BE3_chip(adapter))
567                         /* for BE3 */
568                         populate_be_v1_stats(adapter);
569                 else
570                         populate_be_v2_stats(adapter);
571
572                 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
573                 for_all_rx_queues(adapter, rxo, i) {
574                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575                         populate_erx_stats(adapter, rxo, erx_stat);
576                 }
577         }
578 }
579
580 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581                                         struct rtnl_link_stats64 *stats)
582 {
583         struct be_adapter *adapter = netdev_priv(netdev);
584         struct be_drv_stats *drvs = &adapter->drv_stats;
585         struct be_rx_obj *rxo;
586         struct be_tx_obj *txo;
587         u64 pkts, bytes;
588         unsigned int start;
589         int i;
590
591         for_all_rx_queues(adapter, rxo, i) {
592                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593                 do {
594                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
595                         pkts = rx_stats(rxo)->rx_pkts;
596                         bytes = rx_stats(rxo)->rx_bytes;
597                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
598                 stats->rx_packets += pkts;
599                 stats->rx_bytes += bytes;
600                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602                                         rx_stats(rxo)->rx_drops_no_frags;
603         }
604
605         for_all_tx_queues(adapter, txo, i) {
606                 const struct be_tx_stats *tx_stats = tx_stats(txo);
607                 do {
608                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
609                         pkts = tx_stats(txo)->tx_pkts;
610                         bytes = tx_stats(txo)->tx_bytes;
611                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
612                 stats->tx_packets += pkts;
613                 stats->tx_bytes += bytes;
614         }
615
616         /* bad pkts received */
617         stats->rx_errors = drvs->rx_crc_errors +
618                 drvs->rx_alignment_symbol_errors +
619                 drvs->rx_in_range_errors +
620                 drvs->rx_out_range_errors +
621                 drvs->rx_frame_too_long +
622                 drvs->rx_dropped_too_small +
623                 drvs->rx_dropped_too_short +
624                 drvs->rx_dropped_header_too_small +
625                 drvs->rx_dropped_tcp_length +
626                 drvs->rx_dropped_runt;
627
628         /* detailed rx errors */
629         stats->rx_length_errors = drvs->rx_in_range_errors +
630                 drvs->rx_out_range_errors +
631                 drvs->rx_frame_too_long;
632
633         stats->rx_crc_errors = drvs->rx_crc_errors;
634
635         /* frame alignment errors */
636         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
637
638         /* receiver fifo overrun */
639         /* drops_no_pbuf is no per i/f, it's per BE card */
640         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
641                                 drvs->rx_input_fifo_overflow_drop +
642                                 drvs->rx_drops_no_pbuf;
643         return stats;
644 }
645
646 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
647 {
648         struct net_device *netdev = adapter->netdev;
649
650         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
651                 netif_carrier_off(netdev);
652                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
653         }
654
655         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
656                 netif_carrier_on(netdev);
657         else
658                 netif_carrier_off(netdev);
659 }
660
661 static void be_tx_stats_update(struct be_tx_obj *txo,
662                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
663 {
664         struct be_tx_stats *stats = tx_stats(txo);
665
666         u64_stats_update_begin(&stats->sync);
667         stats->tx_reqs++;
668         stats->tx_wrbs += wrb_cnt;
669         stats->tx_bytes += copied;
670         stats->tx_pkts += (gso_segs ? gso_segs : 1);
671         if (stopped)
672                 stats->tx_stops++;
673         u64_stats_update_end(&stats->sync);
674 }
675
676 /* Determine number of WRB entries needed to xmit data in an skb */
677 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678                                                                 bool *dummy)
679 {
680         int cnt = (skb->len > skb->data_len);
681
682         cnt += skb_shinfo(skb)->nr_frags;
683
684         /* to account for hdr wrb */
685         cnt++;
686         if (lancer_chip(adapter) || !(cnt & 1)) {
687                 *dummy = false;
688         } else {
689                 /* add a dummy to make it an even num */
690                 cnt++;
691                 *dummy = true;
692         }
693         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694         return cnt;
695 }
696
697 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698 {
699         wrb->frag_pa_hi = upper_32_bits(addr);
700         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
702         wrb->rsvd0 = 0;
703 }
704
705 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706                                         struct sk_buff *skb)
707 {
708         u8 vlan_prio;
709         u16 vlan_tag;
710
711         vlan_tag = vlan_tx_tag_get(skb);
712         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713         /* If vlan priority provided by OS is NOT in available bmap */
714         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716                                 adapter->recommended_prio;
717
718         return vlan_tag;
719 }
720
721 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
722                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
723 {
724         u16 vlan_tag;
725
726         memset(hdr, 0, sizeof(*hdr));
727
728         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
730         if (skb_is_gso(skb)) {
731                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733                         hdr, skb_shinfo(skb)->gso_size);
734                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
735                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
736         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737                 if (is_tcp_pkt(skb))
738                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739                 else if (is_udp_pkt(skb))
740                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741         }
742
743         if (vlan_tx_tag_present(skb)) {
744                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
745                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
746                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
747         }
748
749         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
751         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
752         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754 }
755
756 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
757                 bool unmap_single)
758 {
759         dma_addr_t dma;
760
761         be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
764         if (wrb->frag_len) {
765                 if (unmap_single)
766                         dma_unmap_single(dev, dma, wrb->frag_len,
767                                          DMA_TO_DEVICE);
768                 else
769                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
770         }
771 }
772
773 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
774                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775                 bool skip_hw_vlan)
776 {
777         dma_addr_t busaddr;
778         int i, copied = 0;
779         struct device *dev = &adapter->pdev->dev;
780         struct sk_buff *first_skb = skb;
781         struct be_eth_wrb *wrb;
782         struct be_eth_hdr_wrb *hdr;
783         bool map_single = false;
784         u16 map_head;
785
786         hdr = queue_head_node(txq);
787         queue_head_inc(txq);
788         map_head = txq->head;
789
790         if (skb->len > skb->data_len) {
791                 int len = skb_headlen(skb);
792                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793                 if (dma_mapping_error(dev, busaddr))
794                         goto dma_err;
795                 map_single = true;
796                 wrb = queue_head_node(txq);
797                 wrb_fill(wrb, busaddr, len);
798                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799                 queue_head_inc(txq);
800                 copied += len;
801         }
802
803         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
804                 const struct skb_frag_struct *frag =
805                         &skb_shinfo(skb)->frags[i];
806                 busaddr = skb_frag_dma_map(dev, frag, 0,
807                                            skb_frag_size(frag), DMA_TO_DEVICE);
808                 if (dma_mapping_error(dev, busaddr))
809                         goto dma_err;
810                 wrb = queue_head_node(txq);
811                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
812                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813                 queue_head_inc(txq);
814                 copied += skb_frag_size(frag);
815         }
816
817         if (dummy_wrb) {
818                 wrb = queue_head_node(txq);
819                 wrb_fill(wrb, 0, 0);
820                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821                 queue_head_inc(txq);
822         }
823
824         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
825         be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827         return copied;
828 dma_err:
829         txq->head = map_head;
830         while (copied) {
831                 wrb = queue_head_node(txq);
832                 unmap_tx_frag(dev, wrb, map_single);
833                 map_single = false;
834                 copied -= wrb->frag_len;
835                 queue_head_inc(txq);
836         }
837         return 0;
838 }
839
840 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
841                                              struct sk_buff *skb,
842                                              bool *skip_hw_vlan)
843 {
844         u16 vlan_tag = 0;
845
846         skb = skb_share_check(skb, GFP_ATOMIC);
847         if (unlikely(!skb))
848                 return skb;
849
850         if (vlan_tx_tag_present(skb))
851                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
852
853         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854                 if (!vlan_tag)
855                         vlan_tag = adapter->pvid;
856                 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857                  * skip VLAN insertion
858                  */
859                 if (skip_hw_vlan)
860                         *skip_hw_vlan = true;
861         }
862
863         if (vlan_tag) {
864                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
865                 if (unlikely(!skb))
866                         return skb;
867                 skb->vlan_tci = 0;
868         }
869
870         /* Insert the outer VLAN, if any */
871         if (adapter->qnq_vid) {
872                 vlan_tag = adapter->qnq_vid;
873                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
874                 if (unlikely(!skb))
875                         return skb;
876                 if (skip_hw_vlan)
877                         *skip_hw_vlan = true;
878         }
879
880         return skb;
881 }
882
883 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884 {
885         struct ethhdr *eh = (struct ethhdr *)skb->data;
886         u16 offset = ETH_HLEN;
887
888         if (eh->h_proto == htons(ETH_P_IPV6)) {
889                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891                 offset += sizeof(struct ipv6hdr);
892                 if (ip6h->nexthdr != NEXTHDR_TCP &&
893                     ip6h->nexthdr != NEXTHDR_UDP) {
894                         struct ipv6_opt_hdr *ehdr =
895                                 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898                         if (ehdr->hdrlen == 0xff)
899                                 return true;
900                 }
901         }
902         return false;
903 }
904
905 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906 {
907         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908 }
909
910 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911                                 struct sk_buff *skb)
912 {
913         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
914 }
915
916 static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
917                                                   struct sk_buff *skb,
918                                                   bool *skip_hw_vlan)
919 {
920         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
921         unsigned int eth_hdr_len;
922         struct iphdr *ip;
923
924         /* For padded packets, BE HW modifies tot_len field in IP header
925          * incorrecly when VLAN tag is inserted by HW.
926          * For padded packets, Lancer computes incorrect checksum.
927          */
928         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
929                                                 VLAN_ETH_HLEN : ETH_HLEN;
930         if (skb->len <= 60 &&
931             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
932             is_ipv4_pkt(skb)) {
933                 ip = (struct iphdr *)ip_hdr(skb);
934                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
935         }
936
937         /* If vlan tag is already inlined in the packet, skip HW VLAN
938          * tagging in pvid-tagging mode
939          */
940         if (be_pvid_tagging_enabled(adapter) &&
941             veh->h_vlan_proto == htons(ETH_P_8021Q))
942                         *skip_hw_vlan = true;
943
944         /* HW has a bug wherein it will calculate CSUM for VLAN
945          * pkts even though it is disabled.
946          * Manually insert VLAN in pkt.
947          */
948         if (skb->ip_summed != CHECKSUM_PARTIAL &&
949             vlan_tx_tag_present(skb)) {
950                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
951                 if (unlikely(!skb))
952                         goto err;
953         }
954
955         /* HW may lockup when VLAN HW tagging is requested on
956          * certain ipv6 packets. Drop such pkts if the HW workaround to
957          * skip HW tagging is not enabled by FW.
958          */
959         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
960             (adapter->pvid || adapter->qnq_vid) &&
961             !qnq_async_evt_rcvd(adapter)))
962                 goto tx_drop;
963
964         /* Manual VLAN tag insertion to prevent:
965          * ASIC lockup when the ASIC inserts VLAN tag into
966          * certain ipv6 packets. Insert VLAN tags in driver,
967          * and set event, completion, vlan bits accordingly
968          * in the Tx WRB.
969          */
970         if (be_ipv6_tx_stall_chk(adapter, skb) &&
971             be_vlan_tag_tx_chk(adapter, skb)) {
972                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
973                 if (unlikely(!skb))
974                         goto err;
975         }
976
977         return skb;
978 tx_drop:
979         dev_kfree_skb_any(skb);
980 err:
981         return NULL;
982 }
983
984 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
985                                            struct sk_buff *skb,
986                                            bool *skip_hw_vlan)
987 {
988         /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
989          * less may cause a transmit stall on that port. So the work-around is
990          * to pad short packets (<= 32 bytes) to a 36-byte length.
991          */
992         if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
993                 if (skb_padto(skb, 36))
994                         return NULL;
995                 skb->len = 36;
996         }
997
998         if (BEx_chip(adapter) || lancer_chip(adapter)) {
999                 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1000                 if (!skb)
1001                         return NULL;
1002         }
1003
1004         return skb;
1005 }
1006
1007 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1008 {
1009         struct be_adapter *adapter = netdev_priv(netdev);
1010         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1011         struct be_queue_info *txq = &txo->q;
1012         bool dummy_wrb, stopped = false;
1013         u32 wrb_cnt = 0, copied = 0;
1014         bool skip_hw_vlan = false;
1015         u32 start = txq->head;
1016
1017         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1018         if (!skb) {
1019                 tx_stats(txo)->tx_drv_drops++;
1020                 return NETDEV_TX_OK;
1021         }
1022
1023         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1024
1025         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1026                               skip_hw_vlan);
1027         if (copied) {
1028                 int gso_segs = skb_shinfo(skb)->gso_segs;
1029
1030                 /* record the sent skb in the sent_skb table */
1031                 BUG_ON(txo->sent_skb_list[start]);
1032                 txo->sent_skb_list[start] = skb;
1033
1034                 /* Ensure txq has space for the next skb; Else stop the queue
1035                  * *BEFORE* ringing the tx doorbell, so that we serialze the
1036                  * tx compls of the current transmit which'll wake up the queue
1037                  */
1038                 atomic_add(wrb_cnt, &txq->used);
1039                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1040                                                                 txq->len) {
1041                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1042                         stopped = true;
1043                 }
1044
1045                 be_txq_notify(adapter, txo, wrb_cnt);
1046
1047                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1048         } else {
1049                 txq->head = start;
1050                 tx_stats(txo)->tx_drv_drops++;
1051                 dev_kfree_skb_any(skb);
1052         }
1053         return NETDEV_TX_OK;
1054 }
1055
1056 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1057 {
1058         struct be_adapter *adapter = netdev_priv(netdev);
1059         if (new_mtu < BE_MIN_MTU ||
1060                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1061                                         (ETH_HLEN + ETH_FCS_LEN))) {
1062                 dev_info(&adapter->pdev->dev,
1063                         "MTU must be between %d and %d bytes\n",
1064                         BE_MIN_MTU,
1065                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1066                 return -EINVAL;
1067         }
1068         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1069                         netdev->mtu, new_mtu);
1070         netdev->mtu = new_mtu;
1071         return 0;
1072 }
1073
1074 /*
1075  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1076  * If the user configures more, place BE in vlan promiscuous mode.
1077  */
1078 static int be_vid_config(struct be_adapter *adapter)
1079 {
1080         u16 vids[BE_NUM_VLANS_SUPPORTED];
1081         u16 num = 0, i;
1082         int status = 0;
1083
1084         /* No need to further configure vids if in promiscuous mode */
1085         if (adapter->promiscuous)
1086                 return 0;
1087
1088         if (adapter->vlans_added > be_max_vlans(adapter))
1089                 goto set_vlan_promisc;
1090
1091         /* Construct VLAN Table to give to HW */
1092         for (i = 0; i < VLAN_N_VID; i++)
1093                 if (adapter->vlan_tag[i])
1094                         vids[num++] = cpu_to_le16(i);
1095
1096         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1097                                     vids, num, 0);
1098
1099         if (status) {
1100                 /* Set to VLAN promisc mode as setting VLAN filter failed */
1101                 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1102                         goto set_vlan_promisc;
1103                 dev_err(&adapter->pdev->dev,
1104                         "Setting HW VLAN filtering failed.\n");
1105         } else {
1106                 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1107                         /* hw VLAN filtering re-enabled. */
1108                         status = be_cmd_rx_filter(adapter,
1109                                                   BE_FLAGS_VLAN_PROMISC, OFF);
1110                         if (!status) {
1111                                 dev_info(&adapter->pdev->dev,
1112                                          "Disabling VLAN Promiscuous mode.\n");
1113                                 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1114                         }
1115                 }
1116         }
1117
1118         return status;
1119
1120 set_vlan_promisc:
1121         if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1122                 return 0;
1123
1124         status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1125         if (!status) {
1126                 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1127                 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1128         } else
1129                 dev_err(&adapter->pdev->dev,
1130                         "Failed to enable VLAN Promiscuous mode.\n");
1131         return status;
1132 }
1133
1134 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1135 {
1136         struct be_adapter *adapter = netdev_priv(netdev);
1137         int status = 0;
1138
1139         /* Packets with VID 0 are always received by Lancer by default */
1140         if (lancer_chip(adapter) && vid == 0)
1141                 goto ret;
1142
1143         adapter->vlan_tag[vid] = 1;
1144         adapter->vlans_added++;
1145
1146         status = be_vid_config(adapter);
1147         if (status) {
1148                 adapter->vlans_added--;
1149                 adapter->vlan_tag[vid] = 0;
1150         }
1151 ret:
1152         return status;
1153 }
1154
1155 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1156 {
1157         struct be_adapter *adapter = netdev_priv(netdev);
1158         int status = 0;
1159
1160         /* Packets with VID 0 are always received by Lancer by default */
1161         if (lancer_chip(adapter) && vid == 0)
1162                 goto ret;
1163
1164         adapter->vlan_tag[vid] = 0;
1165         status = be_vid_config(adapter);
1166         if (!status)
1167                 adapter->vlans_added--;
1168         else
1169                 adapter->vlan_tag[vid] = 1;
1170 ret:
1171         return status;
1172 }
1173
1174 static void be_clear_promisc(struct be_adapter *adapter)
1175 {
1176         adapter->promiscuous = false;
1177         adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1178
1179         be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1180 }
1181
1182 static void be_set_rx_mode(struct net_device *netdev)
1183 {
1184         struct be_adapter *adapter = netdev_priv(netdev);
1185         int status;
1186
1187         if (netdev->flags & IFF_PROMISC) {
1188                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1189                 adapter->promiscuous = true;
1190                 goto done;
1191         }
1192
1193         /* BE was previously in promiscuous mode; disable it */
1194         if (adapter->promiscuous) {
1195                 be_clear_promisc(adapter);
1196                 if (adapter->vlans_added)
1197                         be_vid_config(adapter);
1198         }
1199
1200         /* Enable multicast promisc if num configured exceeds what we support */
1201         if (netdev->flags & IFF_ALLMULTI ||
1202             netdev_mc_count(netdev) > be_max_mc(adapter)) {
1203                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1204                 goto done;
1205         }
1206
1207         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1208                 struct netdev_hw_addr *ha;
1209                 int i = 1; /* First slot is claimed by the Primary MAC */
1210
1211                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1212                         be_cmd_pmac_del(adapter, adapter->if_handle,
1213                                         adapter->pmac_id[i], 0);
1214                 }
1215
1216                 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1217                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1218                         adapter->promiscuous = true;
1219                         goto done;
1220                 }
1221
1222                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1223                         adapter->uc_macs++; /* First slot is for Primary MAC */
1224                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1225                                         adapter->if_handle,
1226                                         &adapter->pmac_id[adapter->uc_macs], 0);
1227                 }
1228         }
1229
1230         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1231
1232         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1233         if (status) {
1234                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1235                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1236                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1237         }
1238 done:
1239         return;
1240 }
1241
1242 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1243 {
1244         struct be_adapter *adapter = netdev_priv(netdev);
1245         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1246         int status;
1247
1248         if (!sriov_enabled(adapter))
1249                 return -EPERM;
1250
1251         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1252                 return -EINVAL;
1253
1254         if (BEx_chip(adapter)) {
1255                 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1256                                 vf + 1);
1257
1258                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1259                                          &vf_cfg->pmac_id, vf + 1);
1260         } else {
1261                 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1262                                         vf + 1);
1263         }
1264
1265         if (status)
1266                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1267                                 mac, vf);
1268         else
1269                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1270
1271         return status;
1272 }
1273
1274 static int be_get_vf_config(struct net_device *netdev, int vf,
1275                         struct ifla_vf_info *vi)
1276 {
1277         struct be_adapter *adapter = netdev_priv(netdev);
1278         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1279
1280         if (!sriov_enabled(adapter))
1281                 return -EPERM;
1282
1283         if (vf >= adapter->num_vfs)
1284                 return -EINVAL;
1285
1286         vi->vf = vf;
1287         vi->tx_rate = vf_cfg->tx_rate;
1288         vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1289         vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1290         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1291
1292         return 0;
1293 }
1294
1295 static int be_set_vf_vlan(struct net_device *netdev,
1296                         int vf, u16 vlan, u8 qos)
1297 {
1298         struct be_adapter *adapter = netdev_priv(netdev);
1299         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1300         int status = 0;
1301
1302         if (!sriov_enabled(adapter))
1303                 return -EPERM;
1304
1305         if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1306                 return -EINVAL;
1307
1308         if (vlan || qos) {
1309                 vlan |= qos << VLAN_PRIO_SHIFT;
1310                 if (vf_cfg->vlan_tag != vlan)
1311                         status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1312                                                        vf_cfg->if_handle, 0);
1313         } else {
1314                 /* Reset Transparent Vlan Tagging. */
1315                 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1316                                                vf + 1, vf_cfg->if_handle, 0);
1317         }
1318
1319         if (!status)
1320                 vf_cfg->vlan_tag = vlan;
1321         else
1322                 dev_info(&adapter->pdev->dev,
1323                          "VLAN %d config on VF %d failed\n", vlan, vf);
1324         return status;
1325 }
1326
1327 static int be_set_vf_tx_rate(struct net_device *netdev,
1328                         int vf, int rate)
1329 {
1330         struct be_adapter *adapter = netdev_priv(netdev);
1331         int status = 0;
1332
1333         if (!sriov_enabled(adapter))
1334                 return -EPERM;
1335
1336         if (vf >= adapter->num_vfs)
1337                 return -EINVAL;
1338
1339         if (rate < 100 || rate > 10000) {
1340                 dev_err(&adapter->pdev->dev,
1341                         "tx rate must be between 100 and 10000 Mbps\n");
1342                 return -EINVAL;
1343         }
1344
1345         if (lancer_chip(adapter))
1346                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1347         else
1348                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1349
1350         if (status)
1351                 dev_err(&adapter->pdev->dev,
1352                                 "tx rate %d on VF %d failed\n", rate, vf);
1353         else
1354                 adapter->vf_cfg[vf].tx_rate = rate;
1355         return status;
1356 }
1357
1358 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1359                           ulong now)
1360 {
1361         aic->rx_pkts_prev = rx_pkts;
1362         aic->tx_reqs_prev = tx_pkts;
1363         aic->jiffies = now;
1364 }
1365
1366 static void be_eqd_update(struct be_adapter *adapter)
1367 {
1368         struct be_set_eqd set_eqd[MAX_EVT_QS];
1369         int eqd, i, num = 0, start;
1370         struct be_aic_obj *aic;
1371         struct be_eq_obj *eqo;
1372         struct be_rx_obj *rxo;
1373         struct be_tx_obj *txo;
1374         u64 rx_pkts, tx_pkts;
1375         ulong now;
1376         u32 pps, delta;
1377
1378         for_all_evt_queues(adapter, eqo, i) {
1379                 aic = &adapter->aic_obj[eqo->idx];
1380                 if (!aic->enable) {
1381                         if (aic->jiffies)
1382                                 aic->jiffies = 0;
1383                         eqd = aic->et_eqd;
1384                         goto modify_eqd;
1385                 }
1386
1387                 rxo = &adapter->rx_obj[eqo->idx];
1388                 do {
1389                         start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1390                         rx_pkts = rxo->stats.rx_pkts;
1391                 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
1392
1393                 txo = &adapter->tx_obj[eqo->idx];
1394                 do {
1395                         start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1396                         tx_pkts = txo->stats.tx_reqs;
1397                 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
1398
1399
1400                 /* Skip, if wrapped around or first calculation */
1401                 now = jiffies;
1402                 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1403                     rx_pkts < aic->rx_pkts_prev ||
1404                     tx_pkts < aic->tx_reqs_prev) {
1405                         be_aic_update(aic, rx_pkts, tx_pkts, now);
1406                         continue;
1407                 }
1408
1409                 delta = jiffies_to_msecs(now - aic->jiffies);
1410                 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1411                         (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1412                 eqd = (pps / 15000) << 2;
1413
1414                 if (eqd < 8)
1415                         eqd = 0;
1416                 eqd = min_t(u32, eqd, aic->max_eqd);
1417                 eqd = max_t(u32, eqd, aic->min_eqd);
1418
1419                 be_aic_update(aic, rx_pkts, tx_pkts, now);
1420 modify_eqd:
1421                 if (eqd != aic->prev_eqd) {
1422                         set_eqd[num].delay_multiplier = (eqd * 65)/100;
1423                         set_eqd[num].eq_id = eqo->q.id;
1424                         aic->prev_eqd = eqd;
1425                         num++;
1426                 }
1427         }
1428
1429         if (num)
1430                 be_cmd_modify_eqd(adapter, set_eqd, num);
1431 }
1432
1433 static void be_rx_stats_update(struct be_rx_obj *rxo,
1434                 struct be_rx_compl_info *rxcp)
1435 {
1436         struct be_rx_stats *stats = rx_stats(rxo);
1437
1438         u64_stats_update_begin(&stats->sync);
1439         stats->rx_compl++;
1440         stats->rx_bytes += rxcp->pkt_size;
1441         stats->rx_pkts++;
1442         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1443                 stats->rx_mcast_pkts++;
1444         if (rxcp->err)
1445                 stats->rx_compl_err++;
1446         u64_stats_update_end(&stats->sync);
1447 }
1448
1449 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1450 {
1451         /* L4 checksum is not reliable for non TCP/UDP packets.
1452          * Also ignore ipcksm for ipv6 pkts */
1453         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1454                                 (rxcp->ip_csum || rxcp->ipv6);
1455 }
1456
1457 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1458 {
1459         struct be_adapter *adapter = rxo->adapter;
1460         struct be_rx_page_info *rx_page_info;
1461         struct be_queue_info *rxq = &rxo->q;
1462         u16 frag_idx = rxq->tail;
1463
1464         rx_page_info = &rxo->page_info_tbl[frag_idx];
1465         BUG_ON(!rx_page_info->page);
1466
1467         if (rx_page_info->last_frag) {
1468                 dma_unmap_page(&adapter->pdev->dev,
1469                                dma_unmap_addr(rx_page_info, bus),
1470                                adapter->big_page_size, DMA_FROM_DEVICE);
1471                 rx_page_info->last_frag = false;
1472         } else {
1473                 dma_sync_single_for_cpu(&adapter->pdev->dev,
1474                                         dma_unmap_addr(rx_page_info, bus),
1475                                         rx_frag_size, DMA_FROM_DEVICE);
1476         }
1477
1478         queue_tail_inc(rxq);
1479         atomic_dec(&rxq->used);
1480         return rx_page_info;
1481 }
1482
1483 /* Throwaway the data in the Rx completion */
1484 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1485                                 struct be_rx_compl_info *rxcp)
1486 {
1487         struct be_rx_page_info *page_info;
1488         u16 i, num_rcvd = rxcp->num_rcvd;
1489
1490         for (i = 0; i < num_rcvd; i++) {
1491                 page_info = get_rx_page_info(rxo);
1492                 put_page(page_info->page);
1493                 memset(page_info, 0, sizeof(*page_info));
1494         }
1495 }
1496
1497 /*
1498  * skb_fill_rx_data forms a complete skb for an ether frame
1499  * indicated by rxcp.
1500  */
1501 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1502                              struct be_rx_compl_info *rxcp)
1503 {
1504         struct be_rx_page_info *page_info;
1505         u16 i, j;
1506         u16 hdr_len, curr_frag_len, remaining;
1507         u8 *start;
1508
1509         page_info = get_rx_page_info(rxo);
1510         start = page_address(page_info->page) + page_info->page_offset;
1511         prefetch(start);
1512
1513         /* Copy data in the first descriptor of this completion */
1514         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1515
1516         skb->len = curr_frag_len;
1517         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1518                 memcpy(skb->data, start, curr_frag_len);
1519                 /* Complete packet has now been moved to data */
1520                 put_page(page_info->page);
1521                 skb->data_len = 0;
1522                 skb->tail += curr_frag_len;
1523         } else {
1524                 hdr_len = ETH_HLEN;
1525                 memcpy(skb->data, start, hdr_len);
1526                 skb_shinfo(skb)->nr_frags = 1;
1527                 skb_frag_set_page(skb, 0, page_info->page);
1528                 skb_shinfo(skb)->frags[0].page_offset =
1529                                         page_info->page_offset + hdr_len;
1530                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1531                 skb->data_len = curr_frag_len - hdr_len;
1532                 skb->truesize += rx_frag_size;
1533                 skb->tail += hdr_len;
1534         }
1535         page_info->page = NULL;
1536
1537         if (rxcp->pkt_size <= rx_frag_size) {
1538                 BUG_ON(rxcp->num_rcvd != 1);
1539                 return;
1540         }
1541
1542         /* More frags present for this completion */
1543         remaining = rxcp->pkt_size - curr_frag_len;
1544         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1545                 page_info = get_rx_page_info(rxo);
1546                 curr_frag_len = min(remaining, rx_frag_size);
1547
1548                 /* Coalesce all frags from the same physical page in one slot */
1549                 if (page_info->page_offset == 0) {
1550                         /* Fresh page */
1551                         j++;
1552                         skb_frag_set_page(skb, j, page_info->page);
1553                         skb_shinfo(skb)->frags[j].page_offset =
1554                                                         page_info->page_offset;
1555                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1556                         skb_shinfo(skb)->nr_frags++;
1557                 } else {
1558                         put_page(page_info->page);
1559                 }
1560
1561                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1562                 skb->len += curr_frag_len;
1563                 skb->data_len += curr_frag_len;
1564                 skb->truesize += rx_frag_size;
1565                 remaining -= curr_frag_len;
1566                 page_info->page = NULL;
1567         }
1568         BUG_ON(j > MAX_SKB_FRAGS);
1569 }
1570
1571 /* Process the RX completion indicated by rxcp when GRO is disabled */
1572 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1573                                 struct be_rx_compl_info *rxcp)
1574 {
1575         struct be_adapter *adapter = rxo->adapter;
1576         struct net_device *netdev = adapter->netdev;
1577         struct sk_buff *skb;
1578
1579         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1580         if (unlikely(!skb)) {
1581                 rx_stats(rxo)->rx_drops_no_skbs++;
1582                 be_rx_compl_discard(rxo, rxcp);
1583                 return;
1584         }
1585
1586         skb_fill_rx_data(rxo, skb, rxcp);
1587
1588         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1589                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1590         else
1591                 skb_checksum_none_assert(skb);
1592
1593         skb->protocol = eth_type_trans(skb, netdev);
1594         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1595         if (netdev->features & NETIF_F_RXHASH)
1596                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1597         skb_mark_napi_id(skb, napi);
1598
1599         if (rxcp->vlanf)
1600                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1601
1602         netif_receive_skb(skb);
1603 }
1604
1605 /* Process the RX completion indicated by rxcp when GRO is enabled */
1606 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1607                                     struct napi_struct *napi,
1608                                     struct be_rx_compl_info *rxcp)
1609 {
1610         struct be_adapter *adapter = rxo->adapter;
1611         struct be_rx_page_info *page_info;
1612         struct sk_buff *skb = NULL;
1613         u16 remaining, curr_frag_len;
1614         u16 i, j;
1615
1616         skb = napi_get_frags(napi);
1617         if (!skb) {
1618                 be_rx_compl_discard(rxo, rxcp);
1619                 return;
1620         }
1621
1622         remaining = rxcp->pkt_size;
1623         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1624                 page_info = get_rx_page_info(rxo);
1625
1626                 curr_frag_len = min(remaining, rx_frag_size);
1627
1628                 /* Coalesce all frags from the same physical page in one slot */
1629                 if (i == 0 || page_info->page_offset == 0) {
1630                         /* First frag or Fresh page */
1631                         j++;
1632                         skb_frag_set_page(skb, j, page_info->page);
1633                         skb_shinfo(skb)->frags[j].page_offset =
1634                                                         page_info->page_offset;
1635                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1636                 } else {
1637                         put_page(page_info->page);
1638                 }
1639                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1640                 skb->truesize += rx_frag_size;
1641                 remaining -= curr_frag_len;
1642                 memset(page_info, 0, sizeof(*page_info));
1643         }
1644         BUG_ON(j > MAX_SKB_FRAGS);
1645
1646         skb_shinfo(skb)->nr_frags = j + 1;
1647         skb->len = rxcp->pkt_size;
1648         skb->data_len = rxcp->pkt_size;
1649         skb->ip_summed = CHECKSUM_UNNECESSARY;
1650         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1651         if (adapter->netdev->features & NETIF_F_RXHASH)
1652                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1653         skb_mark_napi_id(skb, napi);
1654
1655         if (rxcp->vlanf)
1656                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1657
1658         napi_gro_frags(napi);
1659 }
1660
1661 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1662                                  struct be_rx_compl_info *rxcp)
1663 {
1664         rxcp->pkt_size =
1665                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1666         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1667         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1668         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1669         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1670         rxcp->ip_csum =
1671                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1672         rxcp->l4_csum =
1673                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1674         rxcp->ipv6 =
1675                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1676         rxcp->num_rcvd =
1677                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1678         rxcp->pkt_type =
1679                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1680         rxcp->rss_hash =
1681                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1682         if (rxcp->vlanf) {
1683                 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
1684                                           compl);
1685                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1686                                                compl);
1687         }
1688         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1689 }
1690
1691 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1692                                  struct be_rx_compl_info *rxcp)
1693 {
1694         rxcp->pkt_size =
1695                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1696         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1697         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1698         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1699         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1700         rxcp->ip_csum =
1701                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1702         rxcp->l4_csum =
1703                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1704         rxcp->ipv6 =
1705                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1706         rxcp->num_rcvd =
1707                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1708         rxcp->pkt_type =
1709                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1710         rxcp->rss_hash =
1711                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1712         if (rxcp->vlanf) {
1713                 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
1714                                           compl);
1715                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1716                                                compl);
1717         }
1718         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1719         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1720                                       ip_frag, compl);
1721 }
1722
1723 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1724 {
1725         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1726         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1727         struct be_adapter *adapter = rxo->adapter;
1728
1729         /* For checking the valid bit it is Ok to use either definition as the
1730          * valid bit is at the same position in both v0 and v1 Rx compl */
1731         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1732                 return NULL;
1733
1734         rmb();
1735         be_dws_le_to_cpu(compl, sizeof(*compl));
1736
1737         if (adapter->be3_native)
1738                 be_parse_rx_compl_v1(compl, rxcp);
1739         else
1740                 be_parse_rx_compl_v0(compl, rxcp);
1741
1742         if (rxcp->ip_frag)
1743                 rxcp->l4_csum = 0;
1744
1745         if (rxcp->vlanf) {
1746                 /* In QNQ modes, if qnq bit is not set, then the packet was
1747                  * tagged only with the transparent outer vlan-tag and must
1748                  * not be treated as a vlan packet by host
1749                  */
1750                 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
1751                         rxcp->vlanf = 0;
1752
1753                 if (!lancer_chip(adapter))
1754                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1755
1756                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1757                     !adapter->vlan_tag[rxcp->vlan_tag])
1758                         rxcp->vlanf = 0;
1759         }
1760
1761         /* As the compl has been parsed, reset it; we wont touch it again */
1762         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1763
1764         queue_tail_inc(&rxo->cq);
1765         return rxcp;
1766 }
1767
1768 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1769 {
1770         u32 order = get_order(size);
1771
1772         if (order > 0)
1773                 gfp |= __GFP_COMP;
1774         return  alloc_pages(gfp, order);
1775 }
1776
1777 /*
1778  * Allocate a page, split it to fragments of size rx_frag_size and post as
1779  * receive buffers to BE
1780  */
1781 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1782 {
1783         struct be_adapter *adapter = rxo->adapter;
1784         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1785         struct be_queue_info *rxq = &rxo->q;
1786         struct page *pagep = NULL;
1787         struct device *dev = &adapter->pdev->dev;
1788         struct be_eth_rx_d *rxd;
1789         u64 page_dmaaddr = 0, frag_dmaaddr;
1790         u32 posted, page_offset = 0;
1791
1792         page_info = &rxo->page_info_tbl[rxq->head];
1793         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1794                 if (!pagep) {
1795                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1796                         if (unlikely(!pagep)) {
1797                                 rx_stats(rxo)->rx_post_fail++;
1798                                 break;
1799                         }
1800                         page_dmaaddr = dma_map_page(dev, pagep, 0,
1801                                                     adapter->big_page_size,
1802                                                     DMA_FROM_DEVICE);
1803                         if (dma_mapping_error(dev, page_dmaaddr)) {
1804                                 put_page(pagep);
1805                                 pagep = NULL;
1806                                 rx_stats(rxo)->rx_post_fail++;
1807                                 break;
1808                         }
1809                         page_offset = 0;
1810                 } else {
1811                         get_page(pagep);
1812                         page_offset += rx_frag_size;
1813                 }
1814                 page_info->page_offset = page_offset;
1815                 page_info->page = pagep;
1816
1817                 rxd = queue_head_node(rxq);
1818                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1819                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1820                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1821
1822                 /* Any space left in the current big page for another frag? */
1823                 if ((page_offset + rx_frag_size + rx_frag_size) >
1824                                         adapter->big_page_size) {
1825                         pagep = NULL;
1826                         page_info->last_frag = true;
1827                         dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1828                 } else {
1829                         dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
1830                 }
1831
1832                 prev_page_info = page_info;
1833                 queue_head_inc(rxq);
1834                 page_info = &rxo->page_info_tbl[rxq->head];
1835         }
1836
1837         /* Mark the last frag of a page when we break out of the above loop
1838          * with no more slots available in the RXQ
1839          */
1840         if (pagep) {
1841                 prev_page_info->last_frag = true;
1842                 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1843         }
1844
1845         if (posted) {
1846                 atomic_add(posted, &rxq->used);
1847                 if (rxo->rx_post_starved)
1848                         rxo->rx_post_starved = false;
1849                 be_rxq_notify(adapter, rxq->id, posted);
1850         } else if (atomic_read(&rxq->used) == 0) {
1851                 /* Let be_worker replenish when memory is available */
1852                 rxo->rx_post_starved = true;
1853         }
1854 }
1855
1856 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1857 {
1858         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1859
1860         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1861                 return NULL;
1862
1863         rmb();
1864         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1865
1866         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1867
1868         queue_tail_inc(tx_cq);
1869         return txcp;
1870 }
1871
1872 static u16 be_tx_compl_process(struct be_adapter *adapter,
1873                 struct be_tx_obj *txo, u16 last_index)
1874 {
1875         struct be_queue_info *txq = &txo->q;
1876         struct be_eth_wrb *wrb;
1877         struct sk_buff **sent_skbs = txo->sent_skb_list;
1878         struct sk_buff *sent_skb;
1879         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1880         bool unmap_skb_hdr = true;
1881
1882         sent_skb = sent_skbs[txq->tail];
1883         BUG_ON(!sent_skb);
1884         sent_skbs[txq->tail] = NULL;
1885
1886         /* skip header wrb */
1887         queue_tail_inc(txq);
1888
1889         do {
1890                 cur_index = txq->tail;
1891                 wrb = queue_tail_node(txq);
1892                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1893                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1894                 unmap_skb_hdr = false;
1895
1896                 num_wrbs++;
1897                 queue_tail_inc(txq);
1898         } while (cur_index != last_index);
1899
1900         kfree_skb(sent_skb);
1901         return num_wrbs;
1902 }
1903
1904 /* Return the number of events in the event queue */
1905 static inline int events_get(struct be_eq_obj *eqo)
1906 {
1907         struct be_eq_entry *eqe;
1908         int num = 0;
1909
1910         do {
1911                 eqe = queue_tail_node(&eqo->q);
1912                 if (eqe->evt == 0)
1913                         break;
1914
1915                 rmb();
1916                 eqe->evt = 0;
1917                 num++;
1918                 queue_tail_inc(&eqo->q);
1919         } while (true);
1920
1921         return num;
1922 }
1923
1924 /* Leaves the EQ is disarmed state */
1925 static void be_eq_clean(struct be_eq_obj *eqo)
1926 {
1927         int num = events_get(eqo);
1928
1929         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1930 }
1931
1932 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1933 {
1934         struct be_rx_page_info *page_info;
1935         struct be_queue_info *rxq = &rxo->q;
1936         struct be_queue_info *rx_cq = &rxo->cq;
1937         struct be_rx_compl_info *rxcp;
1938         struct be_adapter *adapter = rxo->adapter;
1939         int flush_wait = 0;
1940
1941         /* Consume pending rx completions.
1942          * Wait for the flush completion (identified by zero num_rcvd)
1943          * to arrive. Notify CQ even when there are no more CQ entries
1944          * for HW to flush partially coalesced CQ entries.
1945          * In Lancer, there is no need to wait for flush compl.
1946          */
1947         for (;;) {
1948                 rxcp = be_rx_compl_get(rxo);
1949                 if (rxcp == NULL) {
1950                         if (lancer_chip(adapter))
1951                                 break;
1952
1953                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1954                                 dev_warn(&adapter->pdev->dev,
1955                                          "did not receive flush compl\n");
1956                                 break;
1957                         }
1958                         be_cq_notify(adapter, rx_cq->id, true, 0);
1959                         mdelay(1);
1960                 } else {
1961                         be_rx_compl_discard(rxo, rxcp);
1962                         be_cq_notify(adapter, rx_cq->id, false, 1);
1963                         if (rxcp->num_rcvd == 0)
1964                                 break;
1965                 }
1966         }
1967
1968         /* After cleanup, leave the CQ in unarmed state */
1969         be_cq_notify(adapter, rx_cq->id, false, 0);
1970
1971         /* Then free posted rx buffers that were not used */
1972         while (atomic_read(&rxq->used) > 0) {
1973                 page_info = get_rx_page_info(rxo);
1974                 put_page(page_info->page);
1975                 memset(page_info, 0, sizeof(*page_info));
1976         }
1977         BUG_ON(atomic_read(&rxq->used));
1978         rxq->tail = rxq->head = 0;
1979 }
1980
1981 static void be_tx_compl_clean(struct be_adapter *adapter)
1982 {
1983         struct be_tx_obj *txo;
1984         struct be_queue_info *txq;
1985         struct be_eth_tx_compl *txcp;
1986         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1987         struct sk_buff *sent_skb;
1988         bool dummy_wrb;
1989         int i, pending_txqs;
1990
1991         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1992         do {
1993                 pending_txqs = adapter->num_tx_qs;
1994
1995                 for_all_tx_queues(adapter, txo, i) {
1996                         txq = &txo->q;
1997                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1998                                 end_idx =
1999                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
2000                                                       wrb_index, txcp);
2001                                 num_wrbs += be_tx_compl_process(adapter, txo,
2002                                                                 end_idx);
2003                                 cmpl++;
2004                         }
2005                         if (cmpl) {
2006                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2007                                 atomic_sub(num_wrbs, &txq->used);
2008                                 cmpl = 0;
2009                                 num_wrbs = 0;
2010                         }
2011                         if (atomic_read(&txq->used) == 0)
2012                                 pending_txqs--;
2013                 }
2014
2015                 if (pending_txqs == 0 || ++timeo > 200)
2016                         break;
2017
2018                 mdelay(1);
2019         } while (true);
2020
2021         for_all_tx_queues(adapter, txo, i) {
2022                 txq = &txo->q;
2023                 if (atomic_read(&txq->used))
2024                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2025                                 atomic_read(&txq->used));
2026
2027                 /* free posted tx for which compls will never arrive */
2028                 while (atomic_read(&txq->used)) {
2029                         sent_skb = txo->sent_skb_list[txq->tail];
2030                         end_idx = txq->tail;
2031                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2032                                                    &dummy_wrb);
2033                         index_adv(&end_idx, num_wrbs - 1, txq->len);
2034                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2035                         atomic_sub(num_wrbs, &txq->used);
2036                 }
2037         }
2038 }
2039
2040 static void be_evt_queues_destroy(struct be_adapter *adapter)
2041 {
2042         struct be_eq_obj *eqo;
2043         int i;
2044
2045         for_all_evt_queues(adapter, eqo, i) {
2046                 if (eqo->q.created) {
2047                         be_eq_clean(eqo);
2048                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2049                         napi_hash_del(&eqo->napi);
2050                         netif_napi_del(&eqo->napi);
2051                 }
2052                 be_queue_free(adapter, &eqo->q);
2053         }
2054 }
2055
2056 static int be_evt_queues_create(struct be_adapter *adapter)
2057 {
2058         struct be_queue_info *eq;
2059         struct be_eq_obj *eqo;
2060         struct be_aic_obj *aic;
2061         int i, rc;
2062
2063         adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2064                                     adapter->cfg_num_qs);
2065
2066         for_all_evt_queues(adapter, eqo, i) {
2067                 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2068                                BE_NAPI_WEIGHT);
2069                 napi_hash_add(&eqo->napi);
2070                 aic = &adapter->aic_obj[i];
2071                 eqo->adapter = adapter;
2072                 eqo->tx_budget = BE_TX_BUDGET;
2073                 eqo->idx = i;
2074                 aic->max_eqd = BE_MAX_EQD;
2075                 aic->enable = true;
2076
2077                 eq = &eqo->q;
2078                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2079                                         sizeof(struct be_eq_entry));
2080                 if (rc)
2081                         return rc;
2082
2083                 rc = be_cmd_eq_create(adapter, eqo);
2084                 if (rc)
2085                         return rc;
2086         }
2087         return 0;
2088 }
2089
2090 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2091 {
2092         struct be_queue_info *q;
2093
2094         q = &adapter->mcc_obj.q;
2095         if (q->created)
2096                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2097         be_queue_free(adapter, q);
2098
2099         q = &adapter->mcc_obj.cq;
2100         if (q->created)
2101                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2102         be_queue_free(adapter, q);
2103 }
2104
2105 /* Must be called only after TX qs are created as MCC shares TX EQ */
2106 static int be_mcc_queues_create(struct be_adapter *adapter)
2107 {
2108         struct be_queue_info *q, *cq;
2109
2110         cq = &adapter->mcc_obj.cq;
2111         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2112                         sizeof(struct be_mcc_compl)))
2113                 goto err;
2114
2115         /* Use the default EQ for MCC completions */
2116         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2117                 goto mcc_cq_free;
2118
2119         q = &adapter->mcc_obj.q;
2120         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2121                 goto mcc_cq_destroy;
2122
2123         if (be_cmd_mccq_create(adapter, q, cq))
2124                 goto mcc_q_free;
2125
2126         return 0;
2127
2128 mcc_q_free:
2129         be_queue_free(adapter, q);
2130 mcc_cq_destroy:
2131         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2132 mcc_cq_free:
2133         be_queue_free(adapter, cq);
2134 err:
2135         return -1;
2136 }
2137
2138 static void be_tx_queues_destroy(struct be_adapter *adapter)
2139 {
2140         struct be_queue_info *q;
2141         struct be_tx_obj *txo;
2142         u8 i;
2143
2144         for_all_tx_queues(adapter, txo, i) {
2145                 q = &txo->q;
2146                 if (q->created)
2147                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2148                 be_queue_free(adapter, q);
2149
2150                 q = &txo->cq;
2151                 if (q->created)
2152                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2153                 be_queue_free(adapter, q);
2154         }
2155 }
2156
2157 static int be_tx_qs_create(struct be_adapter *adapter)
2158 {
2159         struct be_queue_info *cq, *eq;
2160         struct be_tx_obj *txo;
2161         int status, i;
2162
2163         adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2164
2165         for_all_tx_queues(adapter, txo, i) {
2166                 cq = &txo->cq;
2167                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2168                                         sizeof(struct be_eth_tx_compl));
2169                 if (status)
2170                         return status;
2171
2172                 u64_stats_init(&txo->stats.sync);
2173                 u64_stats_init(&txo->stats.sync_compl);
2174
2175                 /* If num_evt_qs is less than num_tx_qs, then more than
2176                  * one txq share an eq
2177                  */
2178                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2179                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2180                 if (status)
2181                         return status;
2182
2183                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2184                                         sizeof(struct be_eth_wrb));
2185                 if (status)
2186                         return status;
2187
2188                 status = be_cmd_txq_create(adapter, txo);
2189                 if (status)
2190                         return status;
2191         }
2192
2193         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2194                  adapter->num_tx_qs);
2195         return 0;
2196 }
2197
2198 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2199 {
2200         struct be_queue_info *q;
2201         struct be_rx_obj *rxo;
2202         int i;
2203
2204         for_all_rx_queues(adapter, rxo, i) {
2205                 q = &rxo->cq;
2206                 if (q->created)
2207                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2208                 be_queue_free(adapter, q);
2209         }
2210 }
2211
2212 static int be_rx_cqs_create(struct be_adapter *adapter)
2213 {
2214         struct be_queue_info *eq, *cq;
2215         struct be_rx_obj *rxo;
2216         int rc, i;
2217
2218         /* We can create as many RSS rings as there are EQs. */
2219         adapter->num_rx_qs = adapter->num_evt_qs;
2220
2221         /* We'll use RSS only if atleast 2 RSS rings are supported.
2222          * When RSS is used, we'll need a default RXQ for non-IP traffic.
2223          */
2224         if (adapter->num_rx_qs > 1)
2225                 adapter->num_rx_qs++;
2226
2227         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2228         for_all_rx_queues(adapter, rxo, i) {
2229                 rxo->adapter = adapter;
2230                 cq = &rxo->cq;
2231                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2232                                 sizeof(struct be_eth_rx_compl));
2233                 if (rc)
2234                         return rc;
2235
2236                 u64_stats_init(&rxo->stats.sync);
2237                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2238                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2239                 if (rc)
2240                         return rc;
2241         }
2242
2243         dev_info(&adapter->pdev->dev,
2244                  "created %d RSS queue(s) and 1 default RX queue\n",
2245                  adapter->num_rx_qs - 1);
2246         return 0;
2247 }
2248
2249 static irqreturn_t be_intx(int irq, void *dev)
2250 {
2251         struct be_eq_obj *eqo = dev;
2252         struct be_adapter *adapter = eqo->adapter;
2253         int num_evts = 0;
2254
2255         /* IRQ is not expected when NAPI is scheduled as the EQ
2256          * will not be armed.
2257          * But, this can happen on Lancer INTx where it takes
2258          * a while to de-assert INTx or in BE2 where occasionaly
2259          * an interrupt may be raised even when EQ is unarmed.
2260          * If NAPI is already scheduled, then counting & notifying
2261          * events will orphan them.
2262          */
2263         if (napi_schedule_prep(&eqo->napi)) {
2264                 num_evts = events_get(eqo);
2265                 __napi_schedule(&eqo->napi);
2266                 if (num_evts)
2267                         eqo->spurious_intr = 0;
2268         }
2269         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2270
2271         /* Return IRQ_HANDLED only for the the first spurious intr
2272          * after a valid intr to stop the kernel from branding
2273          * this irq as a bad one!
2274          */
2275         if (num_evts || eqo->spurious_intr++ == 0)
2276                 return IRQ_HANDLED;
2277         else
2278                 return IRQ_NONE;
2279 }
2280
2281 static irqreturn_t be_msix(int irq, void *dev)
2282 {
2283         struct be_eq_obj *eqo = dev;
2284
2285         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2286         napi_schedule(&eqo->napi);
2287         return IRQ_HANDLED;
2288 }
2289
2290 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2291 {
2292         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2293 }
2294
2295 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2296                         int budget, int polling)
2297 {
2298         struct be_adapter *adapter = rxo->adapter;
2299         struct be_queue_info *rx_cq = &rxo->cq;
2300         struct be_rx_compl_info *rxcp;
2301         u32 work_done;
2302
2303         for (work_done = 0; work_done < budget; work_done++) {
2304                 rxcp = be_rx_compl_get(rxo);
2305                 if (!rxcp)
2306                         break;
2307
2308                 /* Is it a flush compl that has no data */
2309                 if (unlikely(rxcp->num_rcvd == 0))
2310                         goto loop_continue;
2311
2312                 /* Discard compl with partial DMA Lancer B0 */
2313                 if (unlikely(!rxcp->pkt_size)) {
2314                         be_rx_compl_discard(rxo, rxcp);
2315                         goto loop_continue;
2316                 }
2317
2318                 /* On BE drop pkts that arrive due to imperfect filtering in
2319                  * promiscuous mode on some skews
2320                  */
2321                 if (unlikely(rxcp->port != adapter->port_num &&
2322                                 !lancer_chip(adapter))) {
2323                         be_rx_compl_discard(rxo, rxcp);
2324                         goto loop_continue;
2325                 }
2326
2327                 /* Don't do gro when we're busy_polling */
2328                 if (do_gro(rxcp) && polling != BUSY_POLLING)
2329                         be_rx_compl_process_gro(rxo, napi, rxcp);
2330                 else
2331                         be_rx_compl_process(rxo, napi, rxcp);
2332
2333 loop_continue:
2334                 be_rx_stats_update(rxo, rxcp);
2335         }
2336
2337         if (work_done) {
2338                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2339
2340                 /* When an rx-obj gets into post_starved state, just
2341                  * let be_worker do the posting.
2342                  */
2343                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2344                     !rxo->rx_post_starved)
2345                         be_post_rx_frags(rxo, GFP_ATOMIC);
2346         }
2347
2348         return work_done;
2349 }
2350
2351 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2352                           int budget, int idx)
2353 {
2354         struct be_eth_tx_compl *txcp;
2355         int num_wrbs = 0, work_done;
2356
2357         for (work_done = 0; work_done < budget; work_done++) {
2358                 txcp = be_tx_compl_get(&txo->cq);
2359                 if (!txcp)
2360                         break;
2361                 num_wrbs += be_tx_compl_process(adapter, txo,
2362                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2363                                         wrb_index, txcp));
2364         }
2365
2366         if (work_done) {
2367                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2368                 atomic_sub(num_wrbs, &txo->q.used);
2369
2370                 /* As Tx wrbs have been freed up, wake up netdev queue
2371                  * if it was stopped due to lack of tx wrbs.  */
2372                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2373                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2374                         netif_wake_subqueue(adapter->netdev, idx);
2375                 }
2376
2377                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2378                 tx_stats(txo)->tx_compl += work_done;
2379                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2380         }
2381         return (work_done < budget); /* Done */
2382 }
2383
2384 int be_poll(struct napi_struct *napi, int budget)
2385 {
2386         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2387         struct be_adapter *adapter = eqo->adapter;
2388         int max_work = 0, work, i, num_evts;
2389         struct be_rx_obj *rxo;
2390         bool tx_done;
2391
2392         num_evts = events_get(eqo);
2393
2394         /* Process all TXQs serviced by this EQ */
2395         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2396                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2397                                         eqo->tx_budget, i);
2398                 if (!tx_done)
2399                         max_work = budget;
2400         }
2401
2402         if (be_lock_napi(eqo)) {
2403                 /* This loop will iterate twice for EQ0 in which
2404                  * completions of the last RXQ (default one) are also processed
2405                  * For other EQs the loop iterates only once
2406                  */
2407                 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2408                         work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2409                         max_work = max(work, max_work);
2410                 }
2411                 be_unlock_napi(eqo);
2412         } else {
2413                 max_work = budget;
2414         }
2415
2416         if (is_mcc_eqo(eqo))
2417                 be_process_mcc(adapter);
2418
2419         if (max_work < budget) {
2420                 napi_complete(napi);
2421                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2422         } else {
2423                 /* As we'll continue in polling mode, count and clear events */
2424                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2425         }
2426         return max_work;
2427 }
2428
2429 #ifdef CONFIG_NET_RX_BUSY_POLL
2430 static int be_busy_poll(struct napi_struct *napi)
2431 {
2432         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2433         struct be_adapter *adapter = eqo->adapter;
2434         struct be_rx_obj *rxo;
2435         int i, work = 0;
2436
2437         if (!be_lock_busy_poll(eqo))
2438                 return LL_FLUSH_BUSY;
2439
2440         for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2441                 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2442                 if (work)
2443                         break;
2444         }
2445
2446         be_unlock_busy_poll(eqo);
2447         return work;
2448 }
2449 #endif
2450
2451 void be_detect_error(struct be_adapter *adapter)
2452 {
2453         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2454         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2455         u32 i;
2456         bool error_detected = false;
2457         struct device *dev = &adapter->pdev->dev;
2458         struct net_device *netdev = adapter->netdev;
2459
2460         if (be_hw_error(adapter))
2461                 return;
2462
2463         if (lancer_chip(adapter)) {
2464                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2465                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2466                         sliport_err1 = ioread32(adapter->db +
2467                                         SLIPORT_ERROR1_OFFSET);
2468                         sliport_err2 = ioread32(adapter->db +
2469                                         SLIPORT_ERROR2_OFFSET);
2470                         adapter->hw_error = true;
2471                         /* Do not log error messages if its a FW reset */
2472                         if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2473                             sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2474                                 dev_info(dev, "Firmware update in progress\n");
2475                         } else {
2476                                 error_detected = true;
2477                                 dev_err(dev, "Error detected in the card\n");
2478                                 dev_err(dev, "ERR: sliport status 0x%x\n",
2479                                         sliport_status);
2480                                 dev_err(dev, "ERR: sliport error1 0x%x\n",
2481                                         sliport_err1);
2482                                 dev_err(dev, "ERR: sliport error2 0x%x\n",
2483                                         sliport_err2);
2484                         }
2485                 }
2486         } else {
2487                 pci_read_config_dword(adapter->pdev,
2488                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2489                 pci_read_config_dword(adapter->pdev,
2490                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2491                 pci_read_config_dword(adapter->pdev,
2492                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2493                 pci_read_config_dword(adapter->pdev,
2494                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2495
2496                 ue_lo = (ue_lo & ~ue_lo_mask);
2497                 ue_hi = (ue_hi & ~ue_hi_mask);
2498
2499                 /* On certain platforms BE hardware can indicate spurious UEs.
2500                  * Allow HW to stop working completely in case of a real UE.
2501                  * Hence not setting the hw_error for UE detection.
2502                  */
2503
2504                 if (ue_lo || ue_hi) {
2505                         error_detected = true;
2506                         dev_err(dev,
2507                                 "Unrecoverable Error detected in the adapter");
2508                         dev_err(dev, "Please reboot server to recover");
2509                         if (skyhawk_chip(adapter))
2510                                 adapter->hw_error = true;
2511                         for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2512                                 if (ue_lo & 1)
2513                                         dev_err(dev, "UE: %s bit set\n",
2514                                                 ue_status_low_desc[i]);
2515                         }
2516                         for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2517                                 if (ue_hi & 1)
2518                                         dev_err(dev, "UE: %s bit set\n",
2519                                                 ue_status_hi_desc[i]);
2520                         }
2521                 }
2522         }
2523         if (error_detected)
2524                 netif_carrier_off(netdev);
2525 }
2526
2527 static void be_msix_disable(struct be_adapter *adapter)
2528 {
2529         if (msix_enabled(adapter)) {
2530                 pci_disable_msix(adapter->pdev);
2531                 adapter->num_msix_vec = 0;
2532                 adapter->num_msix_roce_vec = 0;
2533         }
2534 }
2535
2536 static int be_msix_enable(struct be_adapter *adapter)
2537 {
2538         int i, num_vec;
2539         struct device *dev = &adapter->pdev->dev;
2540
2541         /* If RoCE is supported, program the max number of NIC vectors that
2542          * may be configured via set-channels, along with vectors needed for
2543          * RoCe. Else, just program the number we'll use initially.
2544          */
2545         if (be_roce_supported(adapter))
2546                 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2547                                 2 * num_online_cpus());
2548         else
2549                 num_vec = adapter->cfg_num_qs;
2550
2551         for (i = 0; i < num_vec; i++)
2552                 adapter->msix_entries[i].entry = i;
2553
2554         num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2555                                         MIN_MSIX_VECTORS, num_vec);
2556         if (num_vec < 0)
2557                 goto fail;
2558
2559         if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2560                 adapter->num_msix_roce_vec = num_vec / 2;
2561                 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2562                          adapter->num_msix_roce_vec);
2563         }
2564
2565         adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2566
2567         dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2568                  adapter->num_msix_vec);
2569         return 0;
2570
2571 fail:
2572         dev_warn(dev, "MSIx enable failed\n");
2573
2574         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2575         if (!be_physfn(adapter))
2576                 return num_vec;
2577         return 0;
2578 }
2579
2580 static inline int be_msix_vec_get(struct be_adapter *adapter,
2581                                 struct be_eq_obj *eqo)
2582 {
2583         return adapter->msix_entries[eqo->msix_idx].vector;
2584 }
2585
2586 static int be_msix_register(struct be_adapter *adapter)
2587 {
2588         struct net_device *netdev = adapter->netdev;
2589         struct be_eq_obj *eqo;
2590         int status, i, vec;
2591
2592         for_all_evt_queues(adapter, eqo, i) {
2593                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2594                 vec = be_msix_vec_get(adapter, eqo);
2595                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2596                 if (status)
2597                         goto err_msix;
2598         }
2599
2600         return 0;
2601 err_msix:
2602         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2603                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2604         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2605                 status);
2606         be_msix_disable(adapter);
2607         return status;
2608 }
2609
2610 static int be_irq_register(struct be_adapter *adapter)
2611 {
2612         struct net_device *netdev = adapter->netdev;
2613         int status;
2614
2615         if (msix_enabled(adapter)) {
2616                 status = be_msix_register(adapter);
2617                 if (status == 0)
2618                         goto done;
2619                 /* INTx is not supported for VF */
2620                 if (!be_physfn(adapter))
2621                         return status;
2622         }
2623
2624         /* INTx: only the first EQ is used */
2625         netdev->irq = adapter->pdev->irq;
2626         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2627                              &adapter->eq_obj[0]);
2628         if (status) {
2629                 dev_err(&adapter->pdev->dev,
2630                         "INTx request IRQ failed - err %d\n", status);
2631                 return status;
2632         }
2633 done:
2634         adapter->isr_registered = true;
2635         return 0;
2636 }
2637
2638 static void be_irq_unregister(struct be_adapter *adapter)
2639 {
2640         struct net_device *netdev = adapter->netdev;
2641         struct be_eq_obj *eqo;
2642         int i;
2643
2644         if (!adapter->isr_registered)
2645                 return;
2646
2647         /* INTx */
2648         if (!msix_enabled(adapter)) {
2649                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2650                 goto done;
2651         }
2652
2653         /* MSIx */
2654         for_all_evt_queues(adapter, eqo, i)
2655                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2656
2657 done:
2658         adapter->isr_registered = false;
2659 }
2660
2661 static void be_rx_qs_destroy(struct be_adapter *adapter)
2662 {
2663         struct be_queue_info *q;
2664         struct be_rx_obj *rxo;
2665         int i;
2666
2667         for_all_rx_queues(adapter, rxo, i) {
2668                 q = &rxo->q;
2669                 if (q->created) {
2670                         be_cmd_rxq_destroy(adapter, q);
2671                         be_rx_cq_clean(rxo);
2672                 }
2673                 be_queue_free(adapter, q);
2674         }
2675 }
2676
2677 static int be_close(struct net_device *netdev)
2678 {
2679         struct be_adapter *adapter = netdev_priv(netdev);
2680         struct be_eq_obj *eqo;
2681         int i;
2682
2683         be_roce_dev_close(adapter);
2684
2685         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2686                 for_all_evt_queues(adapter, eqo, i) {
2687                         napi_disable(&eqo->napi);
2688                         be_disable_busy_poll(eqo);
2689                 }
2690                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2691         }
2692
2693         be_async_mcc_disable(adapter);
2694
2695         /* Wait for all pending tx completions to arrive so that
2696          * all tx skbs are freed.
2697          */
2698         netif_tx_disable(netdev);
2699         be_tx_compl_clean(adapter);
2700
2701         be_rx_qs_destroy(adapter);
2702
2703         for (i = 1; i < (adapter->uc_macs + 1); i++)
2704                 be_cmd_pmac_del(adapter, adapter->if_handle,
2705                                 adapter->pmac_id[i], 0);
2706         adapter->uc_macs = 0;
2707
2708         for_all_evt_queues(adapter, eqo, i) {
2709                 if (msix_enabled(adapter))
2710                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2711                 else
2712                         synchronize_irq(netdev->irq);
2713                 be_eq_clean(eqo);
2714         }
2715
2716         be_irq_unregister(adapter);
2717
2718         return 0;
2719 }
2720
2721 static int be_rx_qs_create(struct be_adapter *adapter)
2722 {
2723         struct be_rx_obj *rxo;
2724         int rc, i, j;
2725         u8 rsstable[128];
2726
2727         for_all_rx_queues(adapter, rxo, i) {
2728                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2729                                     sizeof(struct be_eth_rx_d));
2730                 if (rc)
2731                         return rc;
2732         }
2733
2734         /* The FW would like the default RXQ to be created first */
2735         rxo = default_rxo(adapter);
2736         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2737                                adapter->if_handle, false, &rxo->rss_id);
2738         if (rc)
2739                 return rc;
2740
2741         for_all_rss_queues(adapter, rxo, i) {
2742                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2743                                        rx_frag_size, adapter->if_handle,
2744                                        true, &rxo->rss_id);
2745                 if (rc)
2746                         return rc;
2747         }
2748
2749         if (be_multi_rxq(adapter)) {
2750                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2751                         for_all_rss_queues(adapter, rxo, i) {
2752                                 if ((j + i) >= 128)
2753                                         break;
2754                                 rsstable[j + i] = rxo->rss_id;
2755                         }
2756                 }
2757                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2758                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2759
2760                 if (!BEx_chip(adapter))
2761                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2762                                                 RSS_ENABLE_UDP_IPV6;
2763         } else {
2764                 /* Disable RSS, if only default RX Q is created */
2765                 adapter->rss_flags = RSS_ENABLE_NONE;
2766         }
2767
2768         rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2769                                128);
2770         if (rc) {
2771                 adapter->rss_flags = RSS_ENABLE_NONE;
2772                 return rc;
2773         }
2774
2775         /* First time posting */
2776         for_all_rx_queues(adapter, rxo, i)
2777                 be_post_rx_frags(rxo, GFP_KERNEL);
2778         return 0;
2779 }
2780
2781 static int be_open(struct net_device *netdev)
2782 {
2783         struct be_adapter *adapter = netdev_priv(netdev);
2784         struct be_eq_obj *eqo;
2785         struct be_rx_obj *rxo;
2786         struct be_tx_obj *txo;
2787         u8 link_status;
2788         int status, i;
2789
2790         status = be_rx_qs_create(adapter);
2791         if (status)
2792                 goto err;
2793
2794         status = be_irq_register(adapter);
2795         if (status)
2796                 goto err;
2797
2798         for_all_rx_queues(adapter, rxo, i)
2799                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2800
2801         for_all_tx_queues(adapter, txo, i)
2802                 be_cq_notify(adapter, txo->cq.id, true, 0);
2803
2804         be_async_mcc_enable(adapter);
2805
2806         for_all_evt_queues(adapter, eqo, i) {
2807                 napi_enable(&eqo->napi);
2808                 be_enable_busy_poll(eqo);
2809                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2810         }
2811         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2812
2813         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2814         if (!status)
2815                 be_link_status_update(adapter, link_status);
2816
2817         netif_tx_start_all_queues(netdev);
2818         be_roce_dev_open(adapter);
2819         return 0;
2820 err:
2821         be_close(adapter->netdev);
2822         return -EIO;
2823 }
2824
2825 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2826 {
2827         struct be_dma_mem cmd;
2828         int status = 0;
2829         u8 mac[ETH_ALEN];
2830
2831         memset(mac, 0, ETH_ALEN);
2832
2833         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2834         cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2835                                      GFP_KERNEL);
2836         if (cmd.va == NULL)
2837                 return -1;
2838
2839         if (enable) {
2840                 status = pci_write_config_dword(adapter->pdev,
2841                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2842                 if (status) {
2843                         dev_err(&adapter->pdev->dev,
2844                                 "Could not enable Wake-on-lan\n");
2845                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2846                                           cmd.dma);
2847                         return status;
2848                 }
2849                 status = be_cmd_enable_magic_wol(adapter,
2850                                 adapter->netdev->dev_addr, &cmd);
2851                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2852                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2853         } else {
2854                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2855                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2856                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2857         }
2858
2859         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2860         return status;
2861 }
2862
2863 /*
2864  * Generate a seed MAC address from the PF MAC Address using jhash.
2865  * MAC Address for VFs are assigned incrementally starting from the seed.
2866  * These addresses are programmed in the ASIC by the PF and the VF driver
2867  * queries for the MAC address during its probe.
2868  */
2869 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2870 {
2871         u32 vf;
2872         int status = 0;
2873         u8 mac[ETH_ALEN];
2874         struct be_vf_cfg *vf_cfg;
2875
2876         be_vf_eth_addr_generate(adapter, mac);
2877
2878         for_all_vfs(adapter, vf_cfg, vf) {
2879                 if (BEx_chip(adapter))
2880                         status = be_cmd_pmac_add(adapter, mac,
2881                                                  vf_cfg->if_handle,
2882                                                  &vf_cfg->pmac_id, vf + 1);
2883                 else
2884                         status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2885                                                 vf + 1);
2886
2887                 if (status)
2888                         dev_err(&adapter->pdev->dev,
2889                         "Mac address assignment failed for VF %d\n", vf);
2890                 else
2891                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2892
2893                 mac[5] += 1;
2894         }
2895         return status;
2896 }
2897
2898 static int be_vfs_mac_query(struct be_adapter *adapter)
2899 {
2900         int status, vf;
2901         u8 mac[ETH_ALEN];
2902         struct be_vf_cfg *vf_cfg;
2903
2904         for_all_vfs(adapter, vf_cfg, vf) {
2905                 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2906                                                mac, vf_cfg->if_handle,
2907                                                false, vf+1);
2908                 if (status)
2909                         return status;
2910                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2911         }
2912         return 0;
2913 }
2914
2915 static void be_vf_clear(struct be_adapter *adapter)
2916 {
2917         struct be_vf_cfg *vf_cfg;
2918         u32 vf;
2919
2920         if (pci_vfs_assigned(adapter->pdev)) {
2921                 dev_warn(&adapter->pdev->dev,
2922                          "VFs are assigned to VMs: not disabling VFs\n");
2923                 goto done;
2924         }
2925
2926         pci_disable_sriov(adapter->pdev);
2927
2928         for_all_vfs(adapter, vf_cfg, vf) {
2929                 if (BEx_chip(adapter))
2930                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2931                                         vf_cfg->pmac_id, vf + 1);
2932                 else
2933                         be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2934                                        vf + 1);
2935
2936                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2937         }
2938 done:
2939         kfree(adapter->vf_cfg);
2940         adapter->num_vfs = 0;
2941 }
2942
2943 static void be_clear_queues(struct be_adapter *adapter)
2944 {
2945         be_mcc_queues_destroy(adapter);
2946         be_rx_cqs_destroy(adapter);
2947         be_tx_queues_destroy(adapter);
2948         be_evt_queues_destroy(adapter);
2949 }
2950
2951 static void be_cancel_worker(struct be_adapter *adapter)
2952 {
2953         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2954                 cancel_delayed_work_sync(&adapter->work);
2955                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2956         }
2957 }
2958
2959 static void be_mac_clear(struct be_adapter *adapter)
2960 {
2961         int i;
2962
2963         if (adapter->pmac_id) {
2964                 for (i = 0; i < (adapter->uc_macs + 1); i++)
2965                         be_cmd_pmac_del(adapter, adapter->if_handle,
2966                                         adapter->pmac_id[i], 0);
2967                 adapter->uc_macs = 0;
2968
2969                 kfree(adapter->pmac_id);
2970                 adapter->pmac_id = NULL;
2971         }
2972 }
2973
2974 static int be_clear(struct be_adapter *adapter)
2975 {
2976         be_cancel_worker(adapter);
2977
2978         if (sriov_enabled(adapter))
2979                 be_vf_clear(adapter);
2980
2981         /* delete the primary mac along with the uc-mac list */
2982         be_mac_clear(adapter);
2983
2984         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2985
2986         be_clear_queues(adapter);
2987
2988         be_msix_disable(adapter);
2989         return 0;
2990 }
2991
2992 static int be_vfs_if_create(struct be_adapter *adapter)
2993 {
2994         struct be_resources res = {0};
2995         struct be_vf_cfg *vf_cfg;
2996         u32 cap_flags, en_flags, vf;
2997         int status = 0;
2998
2999         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3000                     BE_IF_FLAGS_MULTICAST;
3001
3002         for_all_vfs(adapter, vf_cfg, vf) {
3003                 if (!BE3_chip(adapter)) {
3004                         status = be_cmd_get_profile_config(adapter, &res,
3005                                                            vf + 1);
3006                         if (!status)
3007                                 cap_flags = res.if_cap_flags;
3008                 }
3009
3010                 /* If a FW profile exists, then cap_flags are updated */
3011                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3012                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
3013                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3014                                           &vf_cfg->if_handle, vf + 1);
3015                 if (status)
3016                         goto err;
3017         }
3018 err:
3019         return status;
3020 }
3021
3022 static int be_vf_setup_init(struct be_adapter *adapter)
3023 {
3024         struct be_vf_cfg *vf_cfg;
3025         int vf;
3026
3027         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3028                                   GFP_KERNEL);
3029         if (!adapter->vf_cfg)
3030                 return -ENOMEM;
3031
3032         for_all_vfs(adapter, vf_cfg, vf) {
3033                 vf_cfg->if_handle = -1;
3034                 vf_cfg->pmac_id = -1;
3035         }
3036         return 0;
3037 }
3038
3039 static int be_vf_setup(struct be_adapter *adapter)
3040 {
3041         struct device *dev = &adapter->pdev->dev;
3042         struct be_vf_cfg *vf_cfg;
3043         int status, old_vfs, vf;
3044         u32 privileges;
3045         u16 lnk_speed;
3046
3047         old_vfs = pci_num_vf(adapter->pdev);
3048         if (old_vfs) {
3049                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3050                 if (old_vfs != num_vfs)
3051                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3052                 adapter->num_vfs = old_vfs;
3053         } else {
3054                 if (num_vfs > be_max_vfs(adapter))
3055                         dev_info(dev, "Device supports %d VFs and not %d\n",
3056                                  be_max_vfs(adapter), num_vfs);
3057                 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3058                 if (!adapter->num_vfs)
3059                         return 0;
3060         }
3061
3062         status = be_vf_setup_init(adapter);
3063         if (status)
3064                 goto err;
3065
3066         if (old_vfs) {
3067                 for_all_vfs(adapter, vf_cfg, vf) {
3068                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3069                         if (status)
3070                                 goto err;
3071                 }
3072         } else {
3073                 status = be_vfs_if_create(adapter);
3074                 if (status)
3075                         goto err;
3076         }
3077
3078         if (old_vfs) {
3079                 status = be_vfs_mac_query(adapter);
3080                 if (status)
3081                         goto err;
3082         } else {
3083                 status = be_vf_eth_addr_config(adapter);
3084                 if (status)
3085                         goto err;
3086         }
3087
3088         for_all_vfs(adapter, vf_cfg, vf) {
3089                 /* Allow VFs to programs MAC/VLAN filters */
3090                 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3091                 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3092                         status = be_cmd_set_fn_privileges(adapter,
3093                                                           privileges |
3094                                                           BE_PRIV_FILTMGMT,
3095                                                           vf + 1);
3096                         if (!status)
3097                                 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3098                                          vf);
3099                 }
3100
3101                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3102                  * Allow full available bandwidth
3103                  */
3104                 if (BE3_chip(adapter) && !old_vfs)
3105                         be_cmd_set_qos(adapter, 1000, vf+1);
3106
3107                 status = be_cmd_link_status_query(adapter, &lnk_speed,
3108                                                   NULL, vf + 1);
3109                 if (!status)
3110                         vf_cfg->tx_rate = lnk_speed;
3111
3112                 if (!old_vfs)
3113                         be_cmd_enable_vf(adapter, vf + 1);
3114         }
3115
3116         if (!old_vfs) {
3117                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3118                 if (status) {
3119                         dev_err(dev, "SRIOV enable failed\n");
3120                         adapter->num_vfs = 0;
3121                         goto err;
3122                 }
3123         }
3124         return 0;
3125 err:
3126         dev_err(dev, "VF setup failed\n");
3127         be_vf_clear(adapter);
3128         return status;
3129 }
3130
3131 /* Converting function_mode bits on BE3 to SH mc_type enums */
3132
3133 static u8 be_convert_mc_type(u32 function_mode)
3134 {
3135         if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3136                 return vNIC1;
3137         else if (function_mode & FLEX10_MODE)
3138                 return FLEX10;
3139         else if (function_mode & VNIC_MODE)
3140                 return vNIC2;
3141         else if (function_mode & UMC_ENABLED)
3142                 return UMC;
3143         else
3144                 return MC_NONE;
3145 }
3146
3147 /* On BE2/BE3 FW does not suggest the supported limits */
3148 static void BEx_get_resources(struct be_adapter *adapter,
3149                               struct be_resources *res)
3150 {
3151         struct pci_dev *pdev = adapter->pdev;
3152         bool use_sriov = false;
3153         int max_vfs;
3154
3155         max_vfs = pci_sriov_get_totalvfs(pdev);
3156
3157         if (BE3_chip(adapter) && sriov_want(adapter)) {
3158                 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3159                 use_sriov = res->max_vfs;
3160         }
3161
3162         if (be_physfn(adapter))
3163                 res->max_uc_mac = BE_UC_PMAC_COUNT;
3164         else
3165                 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3166
3167         adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3168
3169         if (be_is_mc(adapter)) {
3170                 /* Assuming that there are 4 channels per port,
3171                  * when multi-channel is enabled
3172                  */
3173                 if (be_is_qnq_mode(adapter))
3174                         res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3175                 else
3176                         /* In a non-qnq multichannel mode, the pvid
3177                          * takes up one vlan entry
3178                          */
3179                         res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3180         } else {
3181                 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3182         }
3183
3184         res->max_mcast_mac = BE_MAX_MC;
3185
3186         /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
3187         if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
3188             !be_physfn(adapter) || (adapter->port_num > 1))
3189                 res->max_tx_qs = 1;
3190         else
3191                 res->max_tx_qs = BE3_MAX_TX_QS;
3192
3193         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3194             !use_sriov && be_physfn(adapter))
3195                 res->max_rss_qs = (adapter->be3_native) ?
3196                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3197         res->max_rx_qs = res->max_rss_qs + 1;
3198
3199         if (be_physfn(adapter))
3200                 res->max_evt_qs = (max_vfs > 0) ?
3201                                         BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3202         else
3203                 res->max_evt_qs = 1;
3204
3205         res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3206         if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3207                 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3208 }
3209
3210 static void be_setup_init(struct be_adapter *adapter)
3211 {
3212         adapter->vlan_prio_bmap = 0xff;
3213         adapter->phy.link_speed = -1;
3214         adapter->if_handle = -1;
3215         adapter->be3_native = false;
3216         adapter->promiscuous = false;
3217         if (be_physfn(adapter))
3218                 adapter->cmd_privileges = MAX_PRIVILEGES;
3219         else
3220                 adapter->cmd_privileges = MIN_PRIVILEGES;
3221 }
3222
3223 static int be_get_resources(struct be_adapter *adapter)
3224 {
3225         struct device *dev = &adapter->pdev->dev;
3226         struct be_resources res = {0};
3227         int status;
3228
3229         if (BEx_chip(adapter)) {
3230                 BEx_get_resources(adapter, &res);
3231                 adapter->res = res;
3232         }
3233
3234         /* For Lancer, SH etc read per-function resource limits from FW.
3235          * GET_FUNC_CONFIG returns per function guaranteed limits.
3236          * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3237          */
3238         if (!BEx_chip(adapter)) {
3239                 status = be_cmd_get_func_config(adapter, &res);
3240                 if (status)
3241                         return status;
3242
3243                 /* If RoCE may be enabled stash away half the EQs for RoCE */
3244                 if (be_roce_supported(adapter))
3245                         res.max_evt_qs /= 2;
3246                 adapter->res = res;
3247
3248                 if (be_physfn(adapter)) {
3249                         status = be_cmd_get_profile_config(adapter, &res, 0);
3250                         if (status)
3251                                 return status;
3252                         adapter->res.max_vfs = res.max_vfs;
3253                 }
3254
3255                 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3256                          be_max_txqs(adapter), be_max_rxqs(adapter),
3257                          be_max_rss(adapter), be_max_eqs(adapter),
3258                          be_max_vfs(adapter));
3259                 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3260                          be_max_uc(adapter), be_max_mc(adapter),
3261                          be_max_vlans(adapter));
3262         }
3263
3264         return 0;
3265 }
3266
3267 /* Routine to query per function resource limits */
3268 static int be_get_config(struct be_adapter *adapter)
3269 {
3270         u16 profile_id;
3271         int status;
3272
3273         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3274                                      &adapter->function_mode,
3275                                      &adapter->function_caps,
3276                                      &adapter->asic_rev);
3277         if (status)
3278                 return status;
3279
3280          if (be_physfn(adapter)) {
3281                 status = be_cmd_get_active_profile(adapter, &profile_id);
3282                 if (!status)
3283                         dev_info(&adapter->pdev->dev,
3284                                  "Using profile 0x%x\n", profile_id);
3285         }
3286
3287         status = be_get_resources(adapter);
3288         if (status)
3289                 return status;
3290
3291         /* primary mac needs 1 pmac entry */
3292         adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3293                                    GFP_KERNEL);
3294         if (!adapter->pmac_id)
3295                 return -ENOMEM;
3296
3297         /* Sanitize cfg_num_qs based on HW and platform limits */
3298         adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3299
3300         return 0;
3301 }
3302
3303 static int be_mac_setup(struct be_adapter *adapter)
3304 {
3305         u8 mac[ETH_ALEN];
3306         int status;
3307
3308         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3309                 status = be_cmd_get_perm_mac(adapter, mac);
3310                 if (status)
3311                         return status;
3312
3313                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3314                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3315         } else {
3316                 /* Maybe the HW was reset; dev_addr must be re-programmed */
3317                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3318         }
3319
3320         /* For BE3-R VFs, the PF programs the initial MAC address */
3321         if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3322                 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3323                                 &adapter->pmac_id[0], 0);
3324         return 0;
3325 }
3326
3327 static void be_schedule_worker(struct be_adapter *adapter)
3328 {
3329         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3330         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3331 }
3332
3333 static int be_setup_queues(struct be_adapter *adapter)
3334 {
3335         struct net_device *netdev = adapter->netdev;
3336         int status;
3337
3338         status = be_evt_queues_create(adapter);
3339         if (status)
3340                 goto err;
3341
3342         status = be_tx_qs_create(adapter);
3343         if (status)
3344                 goto err;
3345
3346         status = be_rx_cqs_create(adapter);
3347         if (status)
3348                 goto err;
3349
3350         status = be_mcc_queues_create(adapter);
3351         if (status)
3352                 goto err;
3353
3354         status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3355         if (status)
3356                 goto err;
3357
3358         status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3359         if (status)
3360                 goto err;
3361
3362         return 0;
3363 err:
3364         dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3365         return status;
3366 }
3367
3368 int be_update_queues(struct be_adapter *adapter)
3369 {
3370         struct net_device *netdev = adapter->netdev;
3371         int status;
3372
3373         if (netif_running(netdev))
3374                 be_close(netdev);
3375
3376         be_cancel_worker(adapter);
3377
3378         /* If any vectors have been shared with RoCE we cannot re-program
3379          * the MSIx table.
3380          */
3381         if (!adapter->num_msix_roce_vec)
3382                 be_msix_disable(adapter);
3383
3384         be_clear_queues(adapter);
3385
3386         if (!msix_enabled(adapter)) {
3387                 status = be_msix_enable(adapter);
3388                 if (status)
3389                         return status;
3390         }
3391
3392         status = be_setup_queues(adapter);
3393         if (status)
3394                 return status;
3395
3396         be_schedule_worker(adapter);
3397
3398         if (netif_running(netdev))
3399                 status = be_open(netdev);
3400
3401         return status;
3402 }
3403
3404 static int be_setup(struct be_adapter *adapter)
3405 {
3406         struct device *dev = &adapter->pdev->dev;
3407         u32 tx_fc, rx_fc, en_flags;
3408         int status;
3409
3410         be_setup_init(adapter);
3411
3412         if (!lancer_chip(adapter))
3413                 be_cmd_req_native_mode(adapter);
3414
3415         status = be_get_config(adapter);
3416         if (status)
3417                 goto err;
3418
3419         status = be_msix_enable(adapter);
3420         if (status)
3421                 goto err;
3422
3423         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3424                    BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3425         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3426                 en_flags |= BE_IF_FLAGS_RSS;
3427         en_flags = en_flags & be_if_cap_flags(adapter);
3428         status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3429                                   &adapter->if_handle, 0);
3430         if (status)
3431                 goto err;
3432
3433         /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3434         rtnl_lock();
3435         status = be_setup_queues(adapter);
3436         rtnl_unlock();
3437         if (status)
3438                 goto err;
3439
3440         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3441
3442         status = be_mac_setup(adapter);
3443         if (status)
3444                 goto err;
3445
3446         be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3447
3448         if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3449                 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3450                         adapter->fw_ver);
3451                 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3452         }
3453
3454         if (adapter->vlans_added)
3455                 be_vid_config(adapter);
3456
3457         be_set_rx_mode(adapter->netdev);
3458
3459         be_cmd_get_acpi_wol_cap(adapter);
3460
3461         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3462
3463         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3464                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3465                                         adapter->rx_fc);
3466
3467         if (sriov_want(adapter)) {
3468                 if (be_max_vfs(adapter))
3469                         be_vf_setup(adapter);
3470                 else
3471                         dev_warn(dev, "device doesn't support SRIOV\n");
3472         }
3473
3474         status = be_cmd_get_phy_info(adapter);
3475         if (!status && be_pause_supported(adapter))
3476                 adapter->phy.fc_autoneg = 1;
3477
3478         be_schedule_worker(adapter);
3479         return 0;
3480 err:
3481         be_clear(adapter);
3482         return status;
3483 }
3484
3485 #ifdef CONFIG_NET_POLL_CONTROLLER
3486 static void be_netpoll(struct net_device *netdev)
3487 {
3488         struct be_adapter *adapter = netdev_priv(netdev);
3489         struct be_eq_obj *eqo;
3490         int i;
3491
3492         for_all_evt_queues(adapter, eqo, i) {
3493                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3494                 napi_schedule(&eqo->napi);
3495         }
3496
3497         return;
3498 }
3499 #endif
3500
3501 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3502 static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3503
3504 static bool be_flash_redboot(struct be_adapter *adapter,
3505                         const u8 *p, u32 img_start, int image_size,
3506                         int hdr_size)
3507 {
3508         u32 crc_offset;
3509         u8 flashed_crc[4];
3510         int status;
3511
3512         crc_offset = hdr_size + img_start + image_size - 4;
3513
3514         p += crc_offset;
3515
3516         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3517                         (image_size - 4));
3518         if (status) {
3519                 dev_err(&adapter->pdev->dev,
3520                 "could not get crc from flash, not flashing redboot\n");
3521                 return false;
3522         }
3523
3524         /*update redboot only if crc does not match*/
3525         if (!memcmp(flashed_crc, p, 4))
3526                 return false;
3527         else
3528                 return true;
3529 }
3530
3531 static bool phy_flashing_required(struct be_adapter *adapter)
3532 {
3533         return (adapter->phy.phy_type == TN_8022 &&
3534                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3535 }
3536
3537 static bool is_comp_in_ufi(struct be_adapter *adapter,
3538                            struct flash_section_info *fsec, int type)
3539 {
3540         int i = 0, img_type = 0;
3541         struct flash_section_info_g2 *fsec_g2 = NULL;
3542
3543         if (BE2_chip(adapter))
3544                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3545
3546         for (i = 0; i < MAX_FLASH_COMP; i++) {
3547                 if (fsec_g2)
3548                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3549                 else
3550                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3551
3552                 if (img_type == type)
3553                         return true;
3554         }
3555         return false;
3556
3557 }
3558
3559 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3560                                          int header_size,
3561                                          const struct firmware *fw)
3562 {
3563         struct flash_section_info *fsec = NULL;
3564         const u8 *p = fw->data;
3565
3566         p += header_size;
3567         while (p < (fw->data + fw->size)) {
3568                 fsec = (struct flash_section_info *)p;
3569                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3570                         return fsec;
3571                 p += 32;
3572         }
3573         return NULL;
3574 }
3575
3576 static int be_flash(struct be_adapter *adapter, const u8 *img,
3577                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3578 {
3579         u32 total_bytes = 0, flash_op, num_bytes = 0;
3580         int status = 0;
3581         struct be_cmd_write_flashrom *req = flash_cmd->va;
3582
3583         total_bytes = img_size;
3584         while (total_bytes) {
3585                 num_bytes = min_t(u32, 32*1024, total_bytes);
3586
3587                 total_bytes -= num_bytes;
3588
3589                 if (!total_bytes) {
3590                         if (optype == OPTYPE_PHY_FW)
3591                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3592                         else
3593                                 flash_op = FLASHROM_OPER_FLASH;
3594                 } else {
3595                         if (optype == OPTYPE_PHY_FW)
3596                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3597                         else
3598                                 flash_op = FLASHROM_OPER_SAVE;
3599                 }
3600
3601                 memcpy(req->data_buf, img, num_bytes);
3602                 img += num_bytes;
3603                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3604                                                 flash_op, num_bytes);
3605                 if (status) {
3606                         if (status == ILLEGAL_IOCTL_REQ &&
3607                             optype == OPTYPE_PHY_FW)
3608                                 break;
3609                         dev_err(&adapter->pdev->dev,
3610                                 "cmd to write to flash rom failed.\n");
3611                         return status;
3612                 }
3613         }
3614         return 0;
3615 }
3616
3617 /* For BE2, BE3 and BE3-R */
3618 static int be_flash_BEx(struct be_adapter *adapter,
3619                          const struct firmware *fw,
3620                          struct be_dma_mem *flash_cmd,
3621                          int num_of_images)
3622
3623 {
3624         int status = 0, i, filehdr_size = 0;
3625         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3626         const u8 *p = fw->data;
3627         const struct flash_comp *pflashcomp;
3628         int num_comp, redboot;
3629         struct flash_section_info *fsec = NULL;
3630
3631         struct flash_comp gen3_flash_types[] = {
3632                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3633                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3634                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3635                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3636                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3637                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3638                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3639                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3640                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3641                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3642                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3643                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3644                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3645                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3646                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3647                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3648                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3649                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3650                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3651                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3652         };
3653
3654         struct flash_comp gen2_flash_types[] = {
3655                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3656                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3657                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3658                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3659                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3660                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3661                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3662                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3663                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3664                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3665                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3666                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3667                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3668                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3669                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3670                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3671         };
3672
3673         if (BE3_chip(adapter)) {
3674                 pflashcomp = gen3_flash_types;
3675                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3676                 num_comp = ARRAY_SIZE(gen3_flash_types);
3677         } else {
3678                 pflashcomp = gen2_flash_types;
3679                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3680                 num_comp = ARRAY_SIZE(gen2_flash_types);
3681         }
3682
3683         /* Get flash section info*/
3684         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3685         if (!fsec) {
3686                 dev_err(&adapter->pdev->dev,
3687                         "Invalid Cookie. UFI corrupted ?\n");
3688                 return -1;
3689         }
3690         for (i = 0; i < num_comp; i++) {
3691                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3692                         continue;
3693
3694                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3695                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3696                         continue;
3697
3698                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3699                     !phy_flashing_required(adapter))
3700                                 continue;
3701
3702                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3703                         redboot = be_flash_redboot(adapter, fw->data,
3704                                 pflashcomp[i].offset, pflashcomp[i].size,
3705                                 filehdr_size + img_hdrs_size);
3706                         if (!redboot)
3707                                 continue;
3708                 }
3709
3710                 p = fw->data;
3711                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3712                 if (p + pflashcomp[i].size > fw->data + fw->size)
3713                         return -1;
3714
3715                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3716                                         pflashcomp[i].size);
3717                 if (status) {
3718                         dev_err(&adapter->pdev->dev,
3719                                 "Flashing section type %d failed.\n",
3720                                 pflashcomp[i].img_type);
3721                         return status;
3722                 }
3723         }
3724         return 0;
3725 }
3726
3727 static int be_flash_skyhawk(struct be_adapter *adapter,
3728                 const struct firmware *fw,
3729                 struct be_dma_mem *flash_cmd, int num_of_images)
3730 {
3731         int status = 0, i, filehdr_size = 0;
3732         int img_offset, img_size, img_optype, redboot;
3733         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3734         const u8 *p = fw->data;
3735         struct flash_section_info *fsec = NULL;
3736
3737         filehdr_size = sizeof(struct flash_file_hdr_g3);
3738         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3739         if (!fsec) {
3740                 dev_err(&adapter->pdev->dev,
3741                         "Invalid Cookie. UFI corrupted ?\n");
3742                 return -1;
3743         }
3744
3745         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3746                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3747                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3748
3749                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3750                 case IMAGE_FIRMWARE_iSCSI:
3751                         img_optype = OPTYPE_ISCSI_ACTIVE;
3752                         break;
3753                 case IMAGE_BOOT_CODE:
3754                         img_optype = OPTYPE_REDBOOT;
3755                         break;
3756                 case IMAGE_OPTION_ROM_ISCSI:
3757                         img_optype = OPTYPE_BIOS;
3758                         break;
3759                 case IMAGE_OPTION_ROM_PXE:
3760                         img_optype = OPTYPE_PXE_BIOS;
3761                         break;
3762                 case IMAGE_OPTION_ROM_FCoE:
3763                         img_optype = OPTYPE_FCOE_BIOS;
3764                         break;
3765                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3766                         img_optype = OPTYPE_ISCSI_BACKUP;
3767                         break;
3768                 case IMAGE_NCSI:
3769                         img_optype = OPTYPE_NCSI_FW;
3770                         break;
3771                 default:
3772                         continue;
3773                 }
3774
3775                 if (img_optype == OPTYPE_REDBOOT) {
3776                         redboot = be_flash_redboot(adapter, fw->data,
3777                                         img_offset, img_size,
3778                                         filehdr_size + img_hdrs_size);
3779                         if (!redboot)
3780                                 continue;
3781                 }
3782
3783                 p = fw->data;
3784                 p += filehdr_size + img_offset + img_hdrs_size;
3785                 if (p + img_size > fw->data + fw->size)
3786                         return -1;
3787
3788                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3789                 if (status) {
3790                         dev_err(&adapter->pdev->dev,
3791                                 "Flashing section type %d failed.\n",
3792                                 fsec->fsec_entry[i].type);
3793                         return status;
3794                 }
3795         }
3796         return 0;
3797 }
3798
3799 static int lancer_fw_download(struct be_adapter *adapter,
3800                                 const struct firmware *fw)
3801 {
3802 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3803 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3804         struct be_dma_mem flash_cmd;
3805         const u8 *data_ptr = NULL;
3806         u8 *dest_image_ptr = NULL;
3807         size_t image_size = 0;
3808         u32 chunk_size = 0;
3809         u32 data_written = 0;
3810         u32 offset = 0;
3811         int status = 0;
3812         u8 add_status = 0;
3813         u8 change_status;
3814
3815         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3816                 dev_err(&adapter->pdev->dev,
3817                         "FW Image not properly aligned. "
3818                         "Length must be 4 byte aligned.\n");
3819                 status = -EINVAL;
3820                 goto lancer_fw_exit;
3821         }
3822
3823         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3824                                 + LANCER_FW_DOWNLOAD_CHUNK;
3825         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3826                                           &flash_cmd.dma, GFP_KERNEL);
3827         if (!flash_cmd.va) {
3828                 status = -ENOMEM;
3829                 goto lancer_fw_exit;
3830         }
3831
3832         dest_image_ptr = flash_cmd.va +
3833                                 sizeof(struct lancer_cmd_req_write_object);
3834         image_size = fw->size;
3835         data_ptr = fw->data;
3836
3837         while (image_size) {
3838                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3839
3840                 /* Copy the image chunk content. */
3841                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3842
3843                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3844                                                  chunk_size, offset,
3845                                                  LANCER_FW_DOWNLOAD_LOCATION,
3846                                                  &data_written, &change_status,
3847                                                  &add_status);
3848                 if (status)
3849                         break;
3850
3851                 offset += data_written;
3852                 data_ptr += data_written;
3853                 image_size -= data_written;
3854         }
3855
3856         if (!status) {
3857                 /* Commit the FW written */
3858                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3859                                                  0, offset,
3860                                                  LANCER_FW_DOWNLOAD_LOCATION,
3861                                                  &data_written, &change_status,
3862                                                  &add_status);
3863         }
3864
3865         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3866                                 flash_cmd.dma);
3867         if (status) {
3868                 dev_err(&adapter->pdev->dev,
3869                         "Firmware load error. "
3870                         "Status code: 0x%x Additional Status: 0x%x\n",
3871                         status, add_status);
3872                 goto lancer_fw_exit;
3873         }
3874
3875         if (change_status == LANCER_FW_RESET_NEEDED) {
3876                 dev_info(&adapter->pdev->dev,
3877                          "Resetting adapter to activate new FW\n");
3878                 status = lancer_physdev_ctrl(adapter,
3879                                              PHYSDEV_CONTROL_FW_RESET_MASK);
3880                 if (status) {
3881                         dev_err(&adapter->pdev->dev,
3882                                 "Adapter busy for FW reset.\n"
3883                                 "New FW will not be active.\n");
3884                         goto lancer_fw_exit;
3885                 }
3886         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3887                         dev_err(&adapter->pdev->dev,
3888                                 "System reboot required for new FW"
3889                                 " to be active\n");
3890         }
3891
3892         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3893 lancer_fw_exit:
3894         return status;
3895 }
3896
3897 #define UFI_TYPE2               2
3898 #define UFI_TYPE3               3
3899 #define UFI_TYPE3R              10
3900 #define UFI_TYPE4               4
3901 static int be_get_ufi_type(struct be_adapter *adapter,
3902                            struct flash_file_hdr_g3 *fhdr)
3903 {
3904         if (fhdr == NULL)
3905                 goto be_get_ufi_exit;
3906
3907         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3908                 return UFI_TYPE4;
3909         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3910                 if (fhdr->asic_type_rev == 0x10)
3911                         return UFI_TYPE3R;
3912                 else
3913                         return UFI_TYPE3;
3914         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3915                 return UFI_TYPE2;
3916
3917 be_get_ufi_exit:
3918         dev_err(&adapter->pdev->dev,
3919                 "UFI and Interface are not compatible for flashing\n");
3920         return -1;
3921 }
3922
3923 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3924 {
3925         struct flash_file_hdr_g3 *fhdr3;
3926         struct image_hdr *img_hdr_ptr = NULL;
3927         struct be_dma_mem flash_cmd;
3928         const u8 *p;
3929         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3930
3931         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3932         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3933                                           &flash_cmd.dma, GFP_KERNEL);
3934         if (!flash_cmd.va) {
3935                 status = -ENOMEM;
3936                 goto be_fw_exit;
3937         }
3938
3939         p = fw->data;
3940         fhdr3 = (struct flash_file_hdr_g3 *)p;
3941
3942         ufi_type = be_get_ufi_type(adapter, fhdr3);
3943
3944         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3945         for (i = 0; i < num_imgs; i++) {
3946                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3947                                 (sizeof(struct flash_file_hdr_g3) +
3948                                  i * sizeof(struct image_hdr)));
3949                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3950                         switch (ufi_type) {
3951                         case UFI_TYPE4:
3952                                 status = be_flash_skyhawk(adapter, fw,
3953                                                         &flash_cmd, num_imgs);
3954                                 break;
3955                         case UFI_TYPE3R:
3956                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3957                                                       num_imgs);
3958                                 break;
3959                         case UFI_TYPE3:
3960                                 /* Do not flash this ufi on BE3-R cards */
3961                                 if (adapter->asic_rev < 0x10)
3962                                         status = be_flash_BEx(adapter, fw,
3963                                                               &flash_cmd,
3964                                                               num_imgs);
3965                                 else {
3966                                         status = -1;
3967                                         dev_err(&adapter->pdev->dev,
3968                                                 "Can't load BE3 UFI on BE3R\n");
3969                                 }
3970                         }
3971                 }
3972         }
3973
3974         if (ufi_type == UFI_TYPE2)
3975                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3976         else if (ufi_type == -1)
3977                 status = -1;
3978
3979         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3980                           flash_cmd.dma);
3981         if (status) {
3982                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3983                 goto be_fw_exit;
3984         }
3985
3986         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3987
3988 be_fw_exit:
3989         return status;
3990 }
3991
3992 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3993 {
3994         const struct firmware *fw;
3995         int status;
3996
3997         if (!netif_running(adapter->netdev)) {
3998                 dev_err(&adapter->pdev->dev,
3999                         "Firmware load not allowed (interface is down)\n");
4000                 return -1;
4001         }
4002
4003         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4004         if (status)
4005                 goto fw_exit;
4006
4007         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4008
4009         if (lancer_chip(adapter))
4010                 status = lancer_fw_download(adapter, fw);
4011         else
4012                 status = be_fw_download(adapter, fw);
4013
4014         if (!status)
4015                 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4016                                   adapter->fw_on_flash);
4017
4018 fw_exit:
4019         release_firmware(fw);
4020         return status;
4021 }
4022
4023 static int be_ndo_bridge_setlink(struct net_device *dev,
4024                                     struct nlmsghdr *nlh)
4025 {
4026         struct be_adapter *adapter = netdev_priv(dev);
4027         struct nlattr *attr, *br_spec;
4028         int rem;
4029         int status = 0;
4030         u16 mode = 0;
4031
4032         if (!sriov_enabled(adapter))
4033                 return -EOPNOTSUPP;
4034
4035         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4036
4037         nla_for_each_nested(attr, br_spec, rem) {
4038                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4039                         continue;
4040
4041                 mode = nla_get_u16(attr);
4042                 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4043                         return -EINVAL;
4044
4045                 status = be_cmd_set_hsw_config(adapter, 0, 0,
4046                                                adapter->if_handle,
4047                                                mode == BRIDGE_MODE_VEPA ?
4048                                                PORT_FWD_TYPE_VEPA :
4049                                                PORT_FWD_TYPE_VEB);
4050                 if (status)
4051                         goto err;
4052
4053                 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4054                          mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4055
4056                 return status;
4057         }
4058 err:
4059         dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4060                 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4061
4062         return status;
4063 }
4064
4065 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4066                                     struct net_device *dev,
4067                                     u32 filter_mask)
4068 {
4069         struct be_adapter *adapter = netdev_priv(dev);
4070         int status = 0;
4071         u8 hsw_mode;
4072
4073         if (!sriov_enabled(adapter))
4074                 return 0;
4075
4076         /* BE and Lancer chips support VEB mode only */
4077         if (BEx_chip(adapter) || lancer_chip(adapter)) {
4078                 hsw_mode = PORT_FWD_TYPE_VEB;
4079         } else {
4080                 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4081                                                adapter->if_handle, &hsw_mode);
4082                 if (status)
4083                         return 0;
4084         }
4085
4086         return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4087                                        hsw_mode == PORT_FWD_TYPE_VEPA ?
4088                                        BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4089 }
4090
4091 static const struct net_device_ops be_netdev_ops = {
4092         .ndo_open               = be_open,
4093         .ndo_stop               = be_close,
4094         .ndo_start_xmit         = be_xmit,
4095         .ndo_set_rx_mode        = be_set_rx_mode,
4096         .ndo_set_mac_address    = be_mac_addr_set,
4097         .ndo_change_mtu         = be_change_mtu,
4098         .ndo_get_stats64        = be_get_stats64,
4099         .ndo_validate_addr      = eth_validate_addr,
4100         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
4101         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
4102         .ndo_set_vf_mac         = be_set_vf_mac,
4103         .ndo_set_vf_vlan        = be_set_vf_vlan,
4104         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
4105         .ndo_get_vf_config      = be_get_vf_config,
4106 #ifdef CONFIG_NET_POLL_CONTROLLER
4107         .ndo_poll_controller    = be_netpoll,
4108 #endif
4109         .ndo_bridge_setlink     = be_ndo_bridge_setlink,
4110         .ndo_bridge_getlink     = be_ndo_bridge_getlink,
4111 #ifdef CONFIG_NET_RX_BUSY_POLL
4112         .ndo_busy_poll          = be_busy_poll
4113 #endif
4114 };
4115
4116 static void be_netdev_init(struct net_device *netdev)
4117 {
4118         struct be_adapter *adapter = netdev_priv(netdev);
4119
4120         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4121                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4122                 NETIF_F_HW_VLAN_CTAG_TX;
4123         if (be_multi_rxq(adapter))
4124                 netdev->hw_features |= NETIF_F_RXHASH;
4125
4126         netdev->features |= netdev->hw_features |
4127                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4128
4129         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4130                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4131
4132         netdev->priv_flags |= IFF_UNICAST_FLT;
4133
4134         netdev->flags |= IFF_MULTICAST;
4135
4136         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4137
4138         netdev->netdev_ops = &be_netdev_ops;
4139
4140         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
4141 }
4142
4143 static void be_unmap_pci_bars(struct be_adapter *adapter)
4144 {
4145         if (adapter->csr)
4146                 pci_iounmap(adapter->pdev, adapter->csr);
4147         if (adapter->db)
4148                 pci_iounmap(adapter->pdev, adapter->db);
4149 }
4150
4151 static int db_bar(struct be_adapter *adapter)
4152 {
4153         if (lancer_chip(adapter) || !be_physfn(adapter))
4154                 return 0;
4155         else
4156                 return 4;
4157 }
4158
4159 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4160 {
4161         if (skyhawk_chip(adapter)) {
4162                 adapter->roce_db.size = 4096;
4163                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4164                                                               db_bar(adapter));
4165                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4166                                                                db_bar(adapter));
4167         }
4168         return 0;
4169 }
4170
4171 static int be_map_pci_bars(struct be_adapter *adapter)
4172 {
4173         u8 __iomem *addr;
4174
4175         if (BEx_chip(adapter) && be_physfn(adapter)) {
4176                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4177                 if (adapter->csr == NULL)
4178                         return -ENOMEM;
4179         }
4180
4181         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4182         if (addr == NULL)
4183                 goto pci_map_err;
4184         adapter->db = addr;
4185
4186         be_roce_map_pci_bars(adapter);
4187         return 0;
4188
4189 pci_map_err:
4190         be_unmap_pci_bars(adapter);
4191         return -ENOMEM;
4192 }
4193
4194 static void be_ctrl_cleanup(struct be_adapter *adapter)
4195 {
4196         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4197
4198         be_unmap_pci_bars(adapter);
4199
4200         if (mem->va)
4201                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4202                                   mem->dma);
4203
4204         mem = &adapter->rx_filter;
4205         if (mem->va)
4206                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4207                                   mem->dma);
4208 }
4209
4210 static int be_ctrl_init(struct be_adapter *adapter)
4211 {
4212         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4213         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4214         struct be_dma_mem *rx_filter = &adapter->rx_filter;
4215         u32 sli_intf;
4216         int status;
4217
4218         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4219         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4220                                  SLI_INTF_FAMILY_SHIFT;
4221         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4222
4223         status = be_map_pci_bars(adapter);
4224         if (status)
4225                 goto done;
4226
4227         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4228         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4229                                                 mbox_mem_alloc->size,
4230                                                 &mbox_mem_alloc->dma,
4231                                                 GFP_KERNEL);
4232         if (!mbox_mem_alloc->va) {
4233                 status = -ENOMEM;
4234                 goto unmap_pci_bars;
4235         }
4236         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4237         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4238         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4239         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4240
4241         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4242         rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4243                                             rx_filter->size, &rx_filter->dma,
4244                                             GFP_KERNEL);
4245         if (rx_filter->va == NULL) {
4246                 status = -ENOMEM;
4247                 goto free_mbox;
4248         }
4249
4250         mutex_init(&adapter->mbox_lock);
4251         spin_lock_init(&adapter->mcc_lock);
4252         spin_lock_init(&adapter->mcc_cq_lock);
4253
4254         init_completion(&adapter->et_cmd_compl);
4255         pci_save_state(adapter->pdev);
4256         return 0;
4257
4258 free_mbox:
4259         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4260                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
4261
4262 unmap_pci_bars:
4263         be_unmap_pci_bars(adapter);
4264
4265 done:
4266         return status;
4267 }
4268
4269 static void be_stats_cleanup(struct be_adapter *adapter)
4270 {
4271         struct be_dma_mem *cmd = &adapter->stats_cmd;
4272
4273         if (cmd->va)
4274                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4275                                   cmd->va, cmd->dma);
4276 }
4277
4278 static int be_stats_init(struct be_adapter *adapter)
4279 {
4280         struct be_dma_mem *cmd = &adapter->stats_cmd;
4281
4282         if (lancer_chip(adapter))
4283                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4284         else if (BE2_chip(adapter))
4285                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4286         else if (BE3_chip(adapter))
4287                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4288         else
4289                 /* ALL non-BE ASICs */
4290                 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4291
4292         cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4293                                       GFP_KERNEL);
4294         if (cmd->va == NULL)
4295                 return -1;
4296         return 0;
4297 }
4298
4299 static void be_remove(struct pci_dev *pdev)
4300 {
4301         struct be_adapter *adapter = pci_get_drvdata(pdev);
4302
4303         if (!adapter)
4304                 return;
4305
4306         be_roce_dev_remove(adapter);
4307         be_intr_set(adapter, false);
4308
4309         cancel_delayed_work_sync(&adapter->func_recovery_work);
4310
4311         unregister_netdev(adapter->netdev);
4312
4313         be_clear(adapter);
4314
4315         /* tell fw we're done with firing cmds */
4316         be_cmd_fw_clean(adapter);
4317
4318         be_stats_cleanup(adapter);
4319
4320         be_ctrl_cleanup(adapter);
4321
4322         pci_disable_pcie_error_reporting(pdev);
4323
4324         pci_release_regions(pdev);
4325         pci_disable_device(pdev);
4326
4327         free_netdev(adapter->netdev);
4328 }
4329
4330 static int be_get_initial_config(struct be_adapter *adapter)
4331 {
4332         int status, level;
4333
4334         status = be_cmd_get_cntl_attributes(adapter);
4335         if (status)
4336                 return status;
4337
4338         /* Must be a power of 2 or else MODULO will BUG_ON */
4339         adapter->be_get_temp_freq = 64;
4340
4341         if (BEx_chip(adapter)) {
4342                 level = be_cmd_get_fw_log_level(adapter);
4343                 adapter->msg_enable =
4344                         level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4345         }
4346
4347         adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4348         return 0;
4349 }
4350
4351 static int lancer_recover_func(struct be_adapter *adapter)
4352 {
4353         struct device *dev = &adapter->pdev->dev;
4354         int status;
4355
4356         status = lancer_test_and_set_rdy_state(adapter);
4357         if (status)
4358                 goto err;
4359
4360         if (netif_running(adapter->netdev))
4361                 be_close(adapter->netdev);
4362
4363         be_clear(adapter);
4364
4365         be_clear_all_error(adapter);
4366
4367         status = be_setup(adapter);
4368         if (status)
4369                 goto err;
4370
4371         if (netif_running(adapter->netdev)) {
4372                 status = be_open(adapter->netdev);
4373                 if (status)
4374                         goto err;
4375         }
4376
4377         dev_err(dev, "Adapter recovery successful\n");
4378         return 0;
4379 err:
4380         if (status == -EAGAIN)
4381                 dev_err(dev, "Waiting for resource provisioning\n");
4382         else
4383                 dev_err(dev, "Adapter recovery failed\n");
4384
4385         return status;
4386 }
4387
4388 static void be_func_recovery_task(struct work_struct *work)
4389 {
4390         struct be_adapter *adapter =
4391                 container_of(work, struct be_adapter,  func_recovery_work.work);
4392         int status = 0;
4393
4394         be_detect_error(adapter);
4395
4396         if (adapter->hw_error && lancer_chip(adapter)) {
4397
4398                 rtnl_lock();
4399                 netif_device_detach(adapter->netdev);
4400                 rtnl_unlock();
4401
4402                 status = lancer_recover_func(adapter);
4403                 if (!status)
4404                         netif_device_attach(adapter->netdev);
4405         }
4406
4407         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4408          * no need to attempt further recovery.
4409          */
4410         if (!status || status == -EAGAIN)
4411                 schedule_delayed_work(&adapter->func_recovery_work,
4412                                       msecs_to_jiffies(1000));
4413 }
4414
4415 static void be_worker(struct work_struct *work)
4416 {
4417         struct be_adapter *adapter =
4418                 container_of(work, struct be_adapter, work.work);
4419         struct be_rx_obj *rxo;
4420         int i;
4421
4422         /* when interrupts are not yet enabled, just reap any pending
4423         * mcc completions */
4424         if (!netif_running(adapter->netdev)) {
4425                 local_bh_disable();
4426                 be_process_mcc(adapter);
4427                 local_bh_enable();
4428                 goto reschedule;
4429         }
4430
4431         if (!adapter->stats_cmd_sent) {
4432                 if (lancer_chip(adapter))
4433                         lancer_cmd_get_pport_stats(adapter,
4434                                                 &adapter->stats_cmd);
4435                 else
4436                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4437         }
4438
4439         if (be_physfn(adapter) &&
4440             MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4441                 be_cmd_get_die_temperature(adapter);
4442
4443         for_all_rx_queues(adapter, rxo, i) {
4444                 /* Replenish RX-queues starved due to memory
4445                  * allocation failures.
4446                  */
4447                 if (rxo->rx_post_starved)
4448                         be_post_rx_frags(rxo, GFP_KERNEL);
4449         }
4450
4451         be_eqd_update(adapter);
4452
4453 reschedule:
4454         adapter->work_counter++;
4455         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4456 }
4457
4458 /* If any VFs are already enabled don't FLR the PF */
4459 static bool be_reset_required(struct be_adapter *adapter)
4460 {
4461         return pci_num_vf(adapter->pdev) ? false : true;
4462 }
4463
4464 static char *mc_name(struct be_adapter *adapter)
4465 {
4466         char *str = ""; /* default */
4467
4468         switch (adapter->mc_type) {
4469         case UMC:
4470                 str = "UMC";
4471                 break;
4472         case FLEX10:
4473                 str = "FLEX10";
4474                 break;
4475         case vNIC1:
4476                 str = "vNIC-1";
4477                 break;
4478         case nPAR:
4479                 str = "nPAR";
4480                 break;
4481         case UFP:
4482                 str = "UFP";
4483                 break;
4484         case vNIC2:
4485                 str = "vNIC-2";
4486                 break;
4487         default:
4488                 str = "";
4489         }
4490
4491         return str;
4492 }
4493
4494 static inline char *func_name(struct be_adapter *adapter)
4495 {
4496         return be_physfn(adapter) ? "PF" : "VF";
4497 }
4498
4499 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4500 {
4501         int status = 0;
4502         struct be_adapter *adapter;
4503         struct net_device *netdev;
4504         char port_name;
4505
4506         status = pci_enable_device(pdev);
4507         if (status)
4508                 goto do_none;
4509
4510         status = pci_request_regions(pdev, DRV_NAME);
4511         if (status)
4512                 goto disable_dev;
4513         pci_set_master(pdev);
4514
4515         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4516         if (netdev == NULL) {
4517                 status = -ENOMEM;
4518                 goto rel_reg;
4519         }
4520         adapter = netdev_priv(netdev);
4521         adapter->pdev = pdev;
4522         pci_set_drvdata(pdev, adapter);
4523         adapter->netdev = netdev;
4524         SET_NETDEV_DEV(netdev, &pdev->dev);
4525
4526         status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4527         if (!status) {
4528                 netdev->features |= NETIF_F_HIGHDMA;
4529         } else {
4530                 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4531                 if (status) {
4532                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4533                         goto free_netdev;
4534                 }
4535         }
4536
4537         if (be_physfn(adapter)) {
4538                 status = pci_enable_pcie_error_reporting(pdev);
4539                 if (!status)
4540                         dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4541         }
4542
4543         status = be_ctrl_init(adapter);
4544         if (status)
4545                 goto free_netdev;
4546
4547         /* sync up with fw's ready state */
4548         if (be_physfn(adapter)) {
4549                 status = be_fw_wait_ready(adapter);
4550                 if (status)
4551                         goto ctrl_clean;
4552         }
4553
4554         if (be_reset_required(adapter)) {
4555                 status = be_cmd_reset_function(adapter);
4556                 if (status)
4557                         goto ctrl_clean;
4558
4559                 /* Wait for interrupts to quiesce after an FLR */
4560                 msleep(100);
4561         }
4562
4563         /* Allow interrupts for other ULPs running on NIC function */
4564         be_intr_set(adapter, true);
4565
4566         /* tell fw we're ready to fire cmds */
4567         status = be_cmd_fw_init(adapter);
4568         if (status)
4569                 goto ctrl_clean;
4570
4571         status = be_stats_init(adapter);
4572         if (status)
4573                 goto ctrl_clean;
4574
4575         status = be_get_initial_config(adapter);
4576         if (status)
4577                 goto stats_clean;
4578
4579         INIT_DELAYED_WORK(&adapter->work, be_worker);
4580         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4581         adapter->rx_fc = adapter->tx_fc = true;
4582
4583         status = be_setup(adapter);
4584         if (status)
4585                 goto stats_clean;
4586
4587         be_netdev_init(netdev);
4588         status = register_netdev(netdev);
4589         if (status != 0)
4590                 goto unsetup;
4591
4592         be_roce_dev_add(adapter);
4593
4594         schedule_delayed_work(&adapter->func_recovery_work,
4595                               msecs_to_jiffies(1000));
4596
4597         be_cmd_query_port_name(adapter, &port_name);
4598
4599         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4600                  func_name(adapter), mc_name(adapter), port_name);
4601
4602         return 0;
4603
4604 unsetup:
4605         be_clear(adapter);
4606 stats_clean:
4607         be_stats_cleanup(adapter);
4608 ctrl_clean:
4609         be_ctrl_cleanup(adapter);
4610 free_netdev:
4611         free_netdev(netdev);
4612 rel_reg:
4613         pci_release_regions(pdev);
4614 disable_dev:
4615         pci_disable_device(pdev);
4616 do_none:
4617         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4618         return status;
4619 }
4620
4621 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4622 {
4623         struct be_adapter *adapter = pci_get_drvdata(pdev);
4624         struct net_device *netdev =  adapter->netdev;
4625
4626         if (adapter->wol_en)
4627                 be_setup_wol(adapter, true);
4628
4629         be_intr_set(adapter, false);
4630         cancel_delayed_work_sync(&adapter->func_recovery_work);
4631
4632         netif_device_detach(netdev);
4633         if (netif_running(netdev)) {
4634                 rtnl_lock();
4635                 be_close(netdev);
4636                 rtnl_unlock();
4637         }
4638         be_clear(adapter);
4639
4640         pci_save_state(pdev);
4641         pci_disable_device(pdev);
4642         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4643         return 0;
4644 }
4645
4646 static int be_resume(struct pci_dev *pdev)
4647 {
4648         int status = 0;
4649         struct be_adapter *adapter = pci_get_drvdata(pdev);
4650         struct net_device *netdev =  adapter->netdev;
4651
4652         netif_device_detach(netdev);
4653
4654         status = pci_enable_device(pdev);
4655         if (status)
4656                 return status;
4657
4658         pci_set_power_state(pdev, PCI_D0);
4659         pci_restore_state(pdev);
4660
4661         status = be_fw_wait_ready(adapter);
4662         if (status)
4663                 return status;
4664
4665         be_intr_set(adapter, true);
4666         /* tell fw we're ready to fire cmds */
4667         status = be_cmd_fw_init(adapter);
4668         if (status)
4669                 return status;
4670
4671         be_setup(adapter);
4672         if (netif_running(netdev)) {
4673                 rtnl_lock();
4674                 be_open(netdev);
4675                 rtnl_unlock();
4676         }
4677
4678         schedule_delayed_work(&adapter->func_recovery_work,
4679                               msecs_to_jiffies(1000));
4680         netif_device_attach(netdev);
4681
4682         if (adapter->wol_en)
4683                 be_setup_wol(adapter, false);
4684
4685         return 0;
4686 }
4687
4688 /*
4689  * An FLR will stop BE from DMAing any data.
4690  */
4691 static void be_shutdown(struct pci_dev *pdev)
4692 {
4693         struct be_adapter *adapter = pci_get_drvdata(pdev);
4694
4695         if (!adapter)
4696                 return;
4697
4698         cancel_delayed_work_sync(&adapter->work);
4699         cancel_delayed_work_sync(&adapter->func_recovery_work);
4700
4701         netif_device_detach(adapter->netdev);
4702
4703         be_cmd_reset_function(adapter);
4704
4705         pci_disable_device(pdev);
4706 }
4707
4708 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4709                                 pci_channel_state_t state)
4710 {
4711         struct be_adapter *adapter = pci_get_drvdata(pdev);
4712         struct net_device *netdev =  adapter->netdev;
4713
4714         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4715
4716         if (!adapter->eeh_error) {
4717                 adapter->eeh_error = true;
4718
4719                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4720
4721                 rtnl_lock();
4722                 netif_device_detach(netdev);
4723                 if (netif_running(netdev))
4724                         be_close(netdev);
4725                 rtnl_unlock();
4726
4727                 be_clear(adapter);
4728         }
4729
4730         if (state == pci_channel_io_perm_failure)
4731                 return PCI_ERS_RESULT_DISCONNECT;
4732
4733         pci_disable_device(pdev);
4734
4735         /* The error could cause the FW to trigger a flash debug dump.
4736          * Resetting the card while flash dump is in progress
4737          * can cause it not to recover; wait for it to finish.
4738          * Wait only for first function as it is needed only once per
4739          * adapter.
4740          */
4741         if (pdev->devfn == 0)
4742                 ssleep(30);
4743
4744         return PCI_ERS_RESULT_NEED_RESET;
4745 }
4746
4747 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4748 {
4749         struct be_adapter *adapter = pci_get_drvdata(pdev);
4750         int status;
4751
4752         dev_info(&adapter->pdev->dev, "EEH reset\n");
4753
4754         status = pci_enable_device(pdev);
4755         if (status)
4756                 return PCI_ERS_RESULT_DISCONNECT;
4757
4758         pci_set_master(pdev);
4759         pci_set_power_state(pdev, PCI_D0);
4760         pci_restore_state(pdev);
4761
4762         /* Check if card is ok and fw is ready */
4763         dev_info(&adapter->pdev->dev,
4764                  "Waiting for FW to be ready after EEH reset\n");
4765         status = be_fw_wait_ready(adapter);
4766         if (status)
4767                 return PCI_ERS_RESULT_DISCONNECT;
4768
4769         pci_cleanup_aer_uncorrect_error_status(pdev);
4770         be_clear_all_error(adapter);
4771         return PCI_ERS_RESULT_RECOVERED;
4772 }
4773
4774 static void be_eeh_resume(struct pci_dev *pdev)
4775 {
4776         int status = 0;
4777         struct be_adapter *adapter = pci_get_drvdata(pdev);
4778         struct net_device *netdev =  adapter->netdev;
4779
4780         dev_info(&adapter->pdev->dev, "EEH resume\n");
4781
4782         pci_save_state(pdev);
4783
4784         status = be_cmd_reset_function(adapter);
4785         if (status)
4786                 goto err;
4787
4788         /* tell fw we're ready to fire cmds */
4789         status = be_cmd_fw_init(adapter);
4790         if (status)
4791                 goto err;
4792
4793         status = be_setup(adapter);
4794         if (status)
4795                 goto err;
4796
4797         if (netif_running(netdev)) {
4798                 status = be_open(netdev);
4799                 if (status)
4800                         goto err;
4801         }
4802
4803         schedule_delayed_work(&adapter->func_recovery_work,
4804                               msecs_to_jiffies(1000));
4805         netif_device_attach(netdev);
4806         return;
4807 err:
4808         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4809 }
4810
4811 static const struct pci_error_handlers be_eeh_handlers = {
4812         .error_detected = be_eeh_err_detected,
4813         .slot_reset = be_eeh_reset,
4814         .resume = be_eeh_resume,
4815 };
4816
4817 static struct pci_driver be_driver = {
4818         .name = DRV_NAME,
4819         .id_table = be_dev_ids,
4820         .probe = be_probe,
4821         .remove = be_remove,
4822         .suspend = be_suspend,
4823         .resume = be_resume,
4824         .shutdown = be_shutdown,
4825         .err_handler = &be_eeh_handlers
4826 };
4827
4828 static int __init be_init_module(void)
4829 {
4830         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4831             rx_frag_size != 2048) {
4832                 printk(KERN_WARNING DRV_NAME
4833                         " : Module param rx_frag_size must be 2048/4096/8192."
4834                         " Using 2048\n");
4835                 rx_frag_size = 2048;
4836         }
4837
4838         return pci_register_driver(&be_driver);
4839 }
4840 module_init(be_init_module);
4841
4842 static void __exit be_exit_module(void)
4843 {
4844         pci_unregister_driver(&be_driver);
4845 }
4846 module_exit(be_exit_module);