2f02bcbf31643e3c4e34acb4f061da5914a27d90
[cascardo/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2014 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26
27 MODULE_VERSION(DRV_VER);
28 MODULE_DEVICE_TABLE(pci, be_dev_ids);
29 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
32
33 static unsigned int num_vfs;
34 module_param(num_vfs, uint, S_IRUGO);
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36
37 static ushort rx_frag_size = 2048;
38 module_param(rx_frag_size, ushort, S_IRUGO);
39 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
41 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
42         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
44         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
48         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
49         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
50         { 0 }
51 };
52 MODULE_DEVICE_TABLE(pci, be_dev_ids);
53 /* UE Status Low CSR */
54 static const char * const ue_status_low_desc[] = {
55         "CEV",
56         "CTX",
57         "DBUF",
58         "ERX",
59         "Host",
60         "MPU",
61         "NDMA",
62         "PTC ",
63         "RDMA ",
64         "RXF ",
65         "RXIPS ",
66         "RXULP0 ",
67         "RXULP1 ",
68         "RXULP2 ",
69         "TIM ",
70         "TPOST ",
71         "TPRE ",
72         "TXIPS ",
73         "TXULP0 ",
74         "TXULP1 ",
75         "UC ",
76         "WDMA ",
77         "TXULP2 ",
78         "HOST1 ",
79         "P0_OB_LINK ",
80         "P1_OB_LINK ",
81         "HOST_GPIO ",
82         "MBOX ",
83         "AXGMAC0",
84         "AXGMAC1",
85         "JTAG",
86         "MPU_INTPEND"
87 };
88 /* UE Status High CSR */
89 static const char * const ue_status_hi_desc[] = {
90         "LPCMEMHOST",
91         "MGMT_MAC",
92         "PCS0ONLINE",
93         "MPU_IRAM",
94         "PCS1ONLINE",
95         "PCTL0",
96         "PCTL1",
97         "PMEM",
98         "RR",
99         "TXPB",
100         "RXPP",
101         "XAUI",
102         "TXP",
103         "ARM",
104         "IPC",
105         "HOST2",
106         "HOST3",
107         "HOST4",
108         "HOST5",
109         "HOST6",
110         "HOST7",
111         "HOST8",
112         "HOST9",
113         "NETC",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown",
121         "Unknown"
122 };
123
124
125 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126 {
127         struct be_dma_mem *mem = &q->dma_mem;
128         if (mem->va) {
129                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130                                   mem->dma);
131                 mem->va = NULL;
132         }
133 }
134
135 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136                 u16 len, u16 entry_size)
137 {
138         struct be_dma_mem *mem = &q->dma_mem;
139
140         memset(q, 0, sizeof(*q));
141         q->len = len;
142         q->entry_size = entry_size;
143         mem->size = len * entry_size;
144         mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145                                       GFP_KERNEL);
146         if (!mem->va)
147                 return -ENOMEM;
148         return 0;
149 }
150
151 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
152 {
153         u32 reg, enabled;
154
155         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156                                 &reg);
157         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
159         if (!enabled && enable)
160                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
161         else if (enabled && !enable)
162                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163         else
164                 return;
165
166         pci_write_config_dword(adapter->pdev,
167                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
168 }
169
170 static void be_intr_set(struct be_adapter *adapter, bool enable)
171 {
172         int status = 0;
173
174         /* On lancer interrupts can't be controlled via this register */
175         if (lancer_chip(adapter))
176                 return;
177
178         if (adapter->eeh_error)
179                 return;
180
181         status = be_cmd_intr_set(adapter, enable);
182         if (status)
183                 be_reg_intr_set(adapter, enable);
184 }
185
186 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187 {
188         u32 val = 0;
189         val |= qid & DB_RQ_RING_ID_MASK;
190         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
191
192         wmb();
193         iowrite32(val, adapter->db + DB_RQ_OFFSET);
194 }
195
196 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197                           u16 posted)
198 {
199         u32 val = 0;
200         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
201         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
202
203         wmb();
204         iowrite32(val, adapter->db + txo->db_offset);
205 }
206
207 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
208                 bool arm, bool clear_int, u16 num_popped)
209 {
210         u32 val = 0;
211         val |= qid & DB_EQ_RING_ID_MASK;
212         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
214
215         if (adapter->eeh_error)
216                 return;
217
218         if (arm)
219                 val |= 1 << DB_EQ_REARM_SHIFT;
220         if (clear_int)
221                 val |= 1 << DB_EQ_CLR_SHIFT;
222         val |= 1 << DB_EQ_EVNT_SHIFT;
223         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
224         iowrite32(val, adapter->db + DB_EQ_OFFSET);
225 }
226
227 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
228 {
229         u32 val = 0;
230         val |= qid & DB_CQ_RING_ID_MASK;
231         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
233
234         if (adapter->eeh_error)
235                 return;
236
237         if (arm)
238                 val |= 1 << DB_CQ_REARM_SHIFT;
239         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
240         iowrite32(val, adapter->db + DB_CQ_OFFSET);
241 }
242
243 static int be_mac_addr_set(struct net_device *netdev, void *p)
244 {
245         struct be_adapter *adapter = netdev_priv(netdev);
246         struct device *dev = &adapter->pdev->dev;
247         struct sockaddr *addr = p;
248         int status;
249         u8 mac[ETH_ALEN];
250         u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
251
252         if (!is_valid_ether_addr(addr->sa_data))
253                 return -EADDRNOTAVAIL;
254
255         /* Proceed further only if, User provided MAC is different
256          * from active MAC
257          */
258         if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259                 return 0;
260
261         /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262          * privilege or if PF did not provision the new MAC address.
263          * On BE3, this cmd will always fail if the VF doesn't have the
264          * FILTMGMT privilege. This failure is OK, only if the PF programmed
265          * the MAC for the VF.
266          */
267         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268                                  adapter->if_handle, &adapter->pmac_id[0], 0);
269         if (!status) {
270                 curr_pmac_id = adapter->pmac_id[0];
271
272                 /* Delete the old programmed MAC. This call may fail if the
273                  * old MAC was already deleted by the PF driver.
274                  */
275                 if (adapter->pmac_id[0] != old_pmac_id)
276                         be_cmd_pmac_del(adapter, adapter->if_handle,
277                                         old_pmac_id, 0);
278         }
279
280         /* Decide if the new MAC is successfully activated only after
281          * querying the FW
282          */
283         status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284                                        adapter->if_handle, true, 0);
285         if (status)
286                 goto err;
287
288         /* The MAC change did not happen, either due to lack of privilege
289          * or PF didn't pre-provision.
290          */
291         if (!ether_addr_equal(addr->sa_data, mac)) {
292                 status = -EPERM;
293                 goto err;
294         }
295
296         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
297         dev_info(dev, "MAC address changed to %pM\n", mac);
298         return 0;
299 err:
300         dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
301         return status;
302 }
303
304 /* BE2 supports only v0 cmd */
305 static void *hw_stats_from_cmd(struct be_adapter *adapter)
306 {
307         if (BE2_chip(adapter)) {
308                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310                 return &cmd->hw_stats;
311         } else if (BE3_chip(adapter)) {
312                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314                 return &cmd->hw_stats;
315         } else {
316                 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318                 return &cmd->hw_stats;
319         }
320 }
321
322 /* BE2 supports only v0 cmd */
323 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324 {
325         if (BE2_chip(adapter)) {
326                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328                 return &hw_stats->erx;
329         } else if (BE3_chip(adapter)) {
330                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332                 return &hw_stats->erx;
333         } else {
334                 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336                 return &hw_stats->erx;
337         }
338 }
339
340 static void populate_be_v0_stats(struct be_adapter *adapter)
341 {
342         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
345         struct be_port_rxf_stats_v0 *port_stats =
346                                         &rxf_stats->port[adapter->port_num];
347         struct be_drv_stats *drvs = &adapter->drv_stats;
348
349         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
350         drvs->rx_pause_frames = port_stats->rx_pause_frames;
351         drvs->rx_crc_errors = port_stats->rx_crc_errors;
352         drvs->rx_control_frames = port_stats->rx_control_frames;
353         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
364         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
365         drvs->rx_dropped_header_too_small =
366                 port_stats->rx_dropped_header_too_small;
367         drvs->rx_address_filtered =
368                                         port_stats->rx_address_filtered +
369                                         port_stats->rx_vlan_filtered;
370         drvs->rx_alignment_symbol_errors =
371                 port_stats->rx_alignment_symbol_errors;
372
373         drvs->tx_pauseframes = port_stats->tx_pauseframes;
374         drvs->tx_controlframes = port_stats->tx_controlframes;
375
376         if (adapter->port_num)
377                 drvs->jabber_events = rxf_stats->port1_jabber_events;
378         else
379                 drvs->jabber_events = rxf_stats->port0_jabber_events;
380         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
381         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
382         drvs->forwarded_packets = rxf_stats->forwarded_packets;
383         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
384         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
386         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387 }
388
389 static void populate_be_v1_stats(struct be_adapter *adapter)
390 {
391         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
394         struct be_port_rxf_stats_v1 *port_stats =
395                                         &rxf_stats->port[adapter->port_num];
396         struct be_drv_stats *drvs = &adapter->drv_stats;
397
398         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
399         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
401         drvs->rx_pause_frames = port_stats->rx_pause_frames;
402         drvs->rx_crc_errors = port_stats->rx_crc_errors;
403         drvs->rx_control_frames = port_stats->rx_control_frames;
404         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414         drvs->rx_dropped_header_too_small =
415                 port_stats->rx_dropped_header_too_small;
416         drvs->rx_input_fifo_overflow_drop =
417                 port_stats->rx_input_fifo_overflow_drop;
418         drvs->rx_address_filtered = port_stats->rx_address_filtered;
419         drvs->rx_alignment_symbol_errors =
420                 port_stats->rx_alignment_symbol_errors;
421         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
422         drvs->tx_pauseframes = port_stats->tx_pauseframes;
423         drvs->tx_controlframes = port_stats->tx_controlframes;
424         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
425         drvs->jabber_events = port_stats->jabber_events;
426         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
427         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
428         drvs->forwarded_packets = rxf_stats->forwarded_packets;
429         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
430         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
432         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433 }
434
435 static void populate_be_v2_stats(struct be_adapter *adapter)
436 {
437         struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439         struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440         struct be_port_rxf_stats_v2 *port_stats =
441                                         &rxf_stats->port[adapter->port_num];
442         struct be_drv_stats *drvs = &adapter->drv_stats;
443
444         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447         drvs->rx_pause_frames = port_stats->rx_pause_frames;
448         drvs->rx_crc_errors = port_stats->rx_crc_errors;
449         drvs->rx_control_frames = port_stats->rx_control_frames;
450         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460         drvs->rx_dropped_header_too_small =
461                 port_stats->rx_dropped_header_too_small;
462         drvs->rx_input_fifo_overflow_drop =
463                 port_stats->rx_input_fifo_overflow_drop;
464         drvs->rx_address_filtered = port_stats->rx_address_filtered;
465         drvs->rx_alignment_symbol_errors =
466                 port_stats->rx_alignment_symbol_errors;
467         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468         drvs->tx_pauseframes = port_stats->tx_pauseframes;
469         drvs->tx_controlframes = port_stats->tx_controlframes;
470         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471         drvs->jabber_events = port_stats->jabber_events;
472         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474         drvs->forwarded_packets = rxf_stats->forwarded_packets;
475         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
479         if (be_roce_supported(adapter))  {
480                 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481                 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482                 drvs->rx_roce_frames = port_stats->roce_frames_received;
483                 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484                 drvs->roce_drops_payload_len =
485                         port_stats->roce_drops_payload_len;
486         }
487 }
488
489 static void populate_lancer_stats(struct be_adapter *adapter)
490 {
491
492         struct be_drv_stats *drvs = &adapter->drv_stats;
493         struct lancer_pport_stats *pport_stats =
494                                         pport_stats_from_cmd(adapter);
495
496         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
500         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
501         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
502         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506         drvs->rx_dropped_tcp_length =
507                                 pport_stats->rx_dropped_invalid_tcp_length;
508         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511         drvs->rx_dropped_header_too_small =
512                                 pport_stats->rx_dropped_header_too_small;
513         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
514         drvs->rx_address_filtered =
515                                         pport_stats->rx_address_filtered +
516                                         pport_stats->rx_vlan_filtered;
517         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
518         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
519         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
521         drvs->jabber_events = pport_stats->rx_jabbers;
522         drvs->forwarded_packets = pport_stats->num_forwards_lo;
523         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
524         drvs->rx_drops_too_many_frags =
525                                 pport_stats->rx_drops_too_many_frags_lo;
526 }
527
528 static void accumulate_16bit_val(u32 *acc, u16 val)
529 {
530 #define lo(x)                   (x & 0xFFFF)
531 #define hi(x)                   (x & 0xFFFF0000)
532         bool wrapped = val < lo(*acc);
533         u32 newacc = hi(*acc) + val;
534
535         if (wrapped)
536                 newacc += 65536;
537         ACCESS_ONCE(*acc) = newacc;
538 }
539
540 static void populate_erx_stats(struct be_adapter *adapter,
541                         struct be_rx_obj *rxo,
542                         u32 erx_stat)
543 {
544         if (!BEx_chip(adapter))
545                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546         else
547                 /* below erx HW counter can actually wrap around after
548                  * 65535. Driver accumulates a 32-bit value
549                  */
550                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551                                      (u16)erx_stat);
552 }
553
554 void be_parse_stats(struct be_adapter *adapter)
555 {
556         struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
557         struct be_rx_obj *rxo;
558         int i;
559         u32 erx_stat;
560
561         if (lancer_chip(adapter)) {
562                 populate_lancer_stats(adapter);
563         } else {
564                 if (BE2_chip(adapter))
565                         populate_be_v0_stats(adapter);
566                 else if (BE3_chip(adapter))
567                         /* for BE3 */
568                         populate_be_v1_stats(adapter);
569                 else
570                         populate_be_v2_stats(adapter);
571
572                 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
573                 for_all_rx_queues(adapter, rxo, i) {
574                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575                         populate_erx_stats(adapter, rxo, erx_stat);
576                 }
577         }
578 }
579
580 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581                                         struct rtnl_link_stats64 *stats)
582 {
583         struct be_adapter *adapter = netdev_priv(netdev);
584         struct be_drv_stats *drvs = &adapter->drv_stats;
585         struct be_rx_obj *rxo;
586         struct be_tx_obj *txo;
587         u64 pkts, bytes;
588         unsigned int start;
589         int i;
590
591         for_all_rx_queues(adapter, rxo, i) {
592                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593                 do {
594                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
595                         pkts = rx_stats(rxo)->rx_pkts;
596                         bytes = rx_stats(rxo)->rx_bytes;
597                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
598                 stats->rx_packets += pkts;
599                 stats->rx_bytes += bytes;
600                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602                                         rx_stats(rxo)->rx_drops_no_frags;
603         }
604
605         for_all_tx_queues(adapter, txo, i) {
606                 const struct be_tx_stats *tx_stats = tx_stats(txo);
607                 do {
608                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
609                         pkts = tx_stats(txo)->tx_pkts;
610                         bytes = tx_stats(txo)->tx_bytes;
611                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
612                 stats->tx_packets += pkts;
613                 stats->tx_bytes += bytes;
614         }
615
616         /* bad pkts received */
617         stats->rx_errors = drvs->rx_crc_errors +
618                 drvs->rx_alignment_symbol_errors +
619                 drvs->rx_in_range_errors +
620                 drvs->rx_out_range_errors +
621                 drvs->rx_frame_too_long +
622                 drvs->rx_dropped_too_small +
623                 drvs->rx_dropped_too_short +
624                 drvs->rx_dropped_header_too_small +
625                 drvs->rx_dropped_tcp_length +
626                 drvs->rx_dropped_runt;
627
628         /* detailed rx errors */
629         stats->rx_length_errors = drvs->rx_in_range_errors +
630                 drvs->rx_out_range_errors +
631                 drvs->rx_frame_too_long;
632
633         stats->rx_crc_errors = drvs->rx_crc_errors;
634
635         /* frame alignment errors */
636         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
637
638         /* receiver fifo overrun */
639         /* drops_no_pbuf is no per i/f, it's per BE card */
640         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
641                                 drvs->rx_input_fifo_overflow_drop +
642                                 drvs->rx_drops_no_pbuf;
643         return stats;
644 }
645
646 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
647 {
648         struct net_device *netdev = adapter->netdev;
649
650         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
651                 netif_carrier_off(netdev);
652                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
653         }
654
655         if (link_status)
656                 netif_carrier_on(netdev);
657         else
658                 netif_carrier_off(netdev);
659 }
660
661 static void be_tx_stats_update(struct be_tx_obj *txo,
662                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
663 {
664         struct be_tx_stats *stats = tx_stats(txo);
665
666         u64_stats_update_begin(&stats->sync);
667         stats->tx_reqs++;
668         stats->tx_wrbs += wrb_cnt;
669         stats->tx_bytes += copied;
670         stats->tx_pkts += (gso_segs ? gso_segs : 1);
671         if (stopped)
672                 stats->tx_stops++;
673         u64_stats_update_end(&stats->sync);
674 }
675
676 /* Determine number of WRB entries needed to xmit data in an skb */
677 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678                                                                 bool *dummy)
679 {
680         int cnt = (skb->len > skb->data_len);
681
682         cnt += skb_shinfo(skb)->nr_frags;
683
684         /* to account for hdr wrb */
685         cnt++;
686         if (lancer_chip(adapter) || !(cnt & 1)) {
687                 *dummy = false;
688         } else {
689                 /* add a dummy to make it an even num */
690                 cnt++;
691                 *dummy = true;
692         }
693         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694         return cnt;
695 }
696
697 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698 {
699         wrb->frag_pa_hi = upper_32_bits(addr);
700         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
702         wrb->rsvd0 = 0;
703 }
704
705 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706                                         struct sk_buff *skb)
707 {
708         u8 vlan_prio;
709         u16 vlan_tag;
710
711         vlan_tag = vlan_tx_tag_get(skb);
712         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713         /* If vlan priority provided by OS is NOT in available bmap */
714         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716                                 adapter->recommended_prio;
717
718         return vlan_tag;
719 }
720
721 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
722                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
723 {
724         u16 vlan_tag;
725
726         memset(hdr, 0, sizeof(*hdr));
727
728         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
730         if (skb_is_gso(skb)) {
731                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733                         hdr, skb_shinfo(skb)->gso_size);
734                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
735                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
736         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737                 if (is_tcp_pkt(skb))
738                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739                 else if (is_udp_pkt(skb))
740                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741         }
742
743         if (vlan_tx_tag_present(skb)) {
744                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
745                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
746                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
747         }
748
749         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
751         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
752         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754 }
755
756 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
757                 bool unmap_single)
758 {
759         dma_addr_t dma;
760
761         be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
764         if (wrb->frag_len) {
765                 if (unmap_single)
766                         dma_unmap_single(dev, dma, wrb->frag_len,
767                                          DMA_TO_DEVICE);
768                 else
769                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
770         }
771 }
772
773 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
774                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775                 bool skip_hw_vlan)
776 {
777         dma_addr_t busaddr;
778         int i, copied = 0;
779         struct device *dev = &adapter->pdev->dev;
780         struct sk_buff *first_skb = skb;
781         struct be_eth_wrb *wrb;
782         struct be_eth_hdr_wrb *hdr;
783         bool map_single = false;
784         u16 map_head;
785
786         hdr = queue_head_node(txq);
787         queue_head_inc(txq);
788         map_head = txq->head;
789
790         if (skb->len > skb->data_len) {
791                 int len = skb_headlen(skb);
792                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793                 if (dma_mapping_error(dev, busaddr))
794                         goto dma_err;
795                 map_single = true;
796                 wrb = queue_head_node(txq);
797                 wrb_fill(wrb, busaddr, len);
798                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799                 queue_head_inc(txq);
800                 copied += len;
801         }
802
803         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
804                 const struct skb_frag_struct *frag =
805                         &skb_shinfo(skb)->frags[i];
806                 busaddr = skb_frag_dma_map(dev, frag, 0,
807                                            skb_frag_size(frag), DMA_TO_DEVICE);
808                 if (dma_mapping_error(dev, busaddr))
809                         goto dma_err;
810                 wrb = queue_head_node(txq);
811                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
812                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813                 queue_head_inc(txq);
814                 copied += skb_frag_size(frag);
815         }
816
817         if (dummy_wrb) {
818                 wrb = queue_head_node(txq);
819                 wrb_fill(wrb, 0, 0);
820                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821                 queue_head_inc(txq);
822         }
823
824         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
825         be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827         return copied;
828 dma_err:
829         txq->head = map_head;
830         while (copied) {
831                 wrb = queue_head_node(txq);
832                 unmap_tx_frag(dev, wrb, map_single);
833                 map_single = false;
834                 copied -= wrb->frag_len;
835                 queue_head_inc(txq);
836         }
837         return 0;
838 }
839
840 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
841                                              struct sk_buff *skb,
842                                              bool *skip_hw_vlan)
843 {
844         u16 vlan_tag = 0;
845
846         skb = skb_share_check(skb, GFP_ATOMIC);
847         if (unlikely(!skb))
848                 return skb;
849
850         if (vlan_tx_tag_present(skb))
851                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
852
853         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854                 if (!vlan_tag)
855                         vlan_tag = adapter->pvid;
856                 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857                  * skip VLAN insertion
858                  */
859                 if (skip_hw_vlan)
860                         *skip_hw_vlan = true;
861         }
862
863         if (vlan_tag) {
864                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
865                 if (unlikely(!skb))
866                         return skb;
867                 skb->vlan_tci = 0;
868         }
869
870         /* Insert the outer VLAN, if any */
871         if (adapter->qnq_vid) {
872                 vlan_tag = adapter->qnq_vid;
873                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
874                 if (unlikely(!skb))
875                         return skb;
876                 if (skip_hw_vlan)
877                         *skip_hw_vlan = true;
878         }
879
880         return skb;
881 }
882
883 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884 {
885         struct ethhdr *eh = (struct ethhdr *)skb->data;
886         u16 offset = ETH_HLEN;
887
888         if (eh->h_proto == htons(ETH_P_IPV6)) {
889                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891                 offset += sizeof(struct ipv6hdr);
892                 if (ip6h->nexthdr != NEXTHDR_TCP &&
893                     ip6h->nexthdr != NEXTHDR_UDP) {
894                         struct ipv6_opt_hdr *ehdr =
895                                 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898                         if (ehdr->hdrlen == 0xff)
899                                 return true;
900                 }
901         }
902         return false;
903 }
904
905 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906 {
907         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908 }
909
910 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911                                 struct sk_buff *skb)
912 {
913         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
914 }
915
916 static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
917                                                   struct sk_buff *skb,
918                                                   bool *skip_hw_vlan)
919 {
920         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
921         unsigned int eth_hdr_len;
922         struct iphdr *ip;
923
924         /* For padded packets, BE HW modifies tot_len field in IP header
925          * incorrecly when VLAN tag is inserted by HW.
926          * For padded packets, Lancer computes incorrect checksum.
927          */
928         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
929                                                 VLAN_ETH_HLEN : ETH_HLEN;
930         if (skb->len <= 60 &&
931             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
932             is_ipv4_pkt(skb)) {
933                 ip = (struct iphdr *)ip_hdr(skb);
934                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
935         }
936
937         /* If vlan tag is already inlined in the packet, skip HW VLAN
938          * tagging in pvid-tagging mode
939          */
940         if (be_pvid_tagging_enabled(adapter) &&
941             veh->h_vlan_proto == htons(ETH_P_8021Q))
942                         *skip_hw_vlan = true;
943
944         /* HW has a bug wherein it will calculate CSUM for VLAN
945          * pkts even though it is disabled.
946          * Manually insert VLAN in pkt.
947          */
948         if (skb->ip_summed != CHECKSUM_PARTIAL &&
949             vlan_tx_tag_present(skb)) {
950                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
951                 if (unlikely(!skb))
952                         goto err;
953         }
954
955         /* HW may lockup when VLAN HW tagging is requested on
956          * certain ipv6 packets. Drop such pkts if the HW workaround to
957          * skip HW tagging is not enabled by FW.
958          */
959         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
960             (adapter->pvid || adapter->qnq_vid) &&
961             !qnq_async_evt_rcvd(adapter)))
962                 goto tx_drop;
963
964         /* Manual VLAN tag insertion to prevent:
965          * ASIC lockup when the ASIC inserts VLAN tag into
966          * certain ipv6 packets. Insert VLAN tags in driver,
967          * and set event, completion, vlan bits accordingly
968          * in the Tx WRB.
969          */
970         if (be_ipv6_tx_stall_chk(adapter, skb) &&
971             be_vlan_tag_tx_chk(adapter, skb)) {
972                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
973                 if (unlikely(!skb))
974                         goto err;
975         }
976
977         return skb;
978 tx_drop:
979         dev_kfree_skb_any(skb);
980 err:
981         return NULL;
982 }
983
984 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
985                                            struct sk_buff *skb,
986                                            bool *skip_hw_vlan)
987 {
988         /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
989          * less may cause a transmit stall on that port. So the work-around is
990          * to pad short packets (<= 32 bytes) to a 36-byte length.
991          */
992         if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
993                 if (skb_padto(skb, 36))
994                         return NULL;
995                 skb->len = 36;
996         }
997
998         if (BEx_chip(adapter) || lancer_chip(adapter)) {
999                 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1000                 if (!skb)
1001                         return NULL;
1002         }
1003
1004         return skb;
1005 }
1006
1007 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1008 {
1009         struct be_adapter *adapter = netdev_priv(netdev);
1010         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1011         struct be_queue_info *txq = &txo->q;
1012         bool dummy_wrb, stopped = false;
1013         u32 wrb_cnt = 0, copied = 0;
1014         bool skip_hw_vlan = false;
1015         u32 start = txq->head;
1016
1017         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1018         if (!skb) {
1019                 tx_stats(txo)->tx_drv_drops++;
1020                 return NETDEV_TX_OK;
1021         }
1022
1023         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1024
1025         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1026                               skip_hw_vlan);
1027         if (copied) {
1028                 int gso_segs = skb_shinfo(skb)->gso_segs;
1029
1030                 /* record the sent skb in the sent_skb table */
1031                 BUG_ON(txo->sent_skb_list[start]);
1032                 txo->sent_skb_list[start] = skb;
1033
1034                 /* Ensure txq has space for the next skb; Else stop the queue
1035                  * *BEFORE* ringing the tx doorbell, so that we serialze the
1036                  * tx compls of the current transmit which'll wake up the queue
1037                  */
1038                 atomic_add(wrb_cnt, &txq->used);
1039                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1040                                                                 txq->len) {
1041                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1042                         stopped = true;
1043                 }
1044
1045                 be_txq_notify(adapter, txo, wrb_cnt);
1046
1047                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1048         } else {
1049                 txq->head = start;
1050                 tx_stats(txo)->tx_drv_drops++;
1051                 dev_kfree_skb_any(skb);
1052         }
1053         return NETDEV_TX_OK;
1054 }
1055
1056 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1057 {
1058         struct be_adapter *adapter = netdev_priv(netdev);
1059         if (new_mtu < BE_MIN_MTU ||
1060                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1061                                         (ETH_HLEN + ETH_FCS_LEN))) {
1062                 dev_info(&adapter->pdev->dev,
1063                         "MTU must be between %d and %d bytes\n",
1064                         BE_MIN_MTU,
1065                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1066                 return -EINVAL;
1067         }
1068         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1069                         netdev->mtu, new_mtu);
1070         netdev->mtu = new_mtu;
1071         return 0;
1072 }
1073
1074 /*
1075  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1076  * If the user configures more, place BE in vlan promiscuous mode.
1077  */
1078 static int be_vid_config(struct be_adapter *adapter)
1079 {
1080         u16 vids[BE_NUM_VLANS_SUPPORTED];
1081         u16 num = 0, i;
1082         int status = 0;
1083
1084         /* No need to further configure vids if in promiscuous mode */
1085         if (adapter->promiscuous)
1086                 return 0;
1087
1088         if (adapter->vlans_added > be_max_vlans(adapter))
1089                 goto set_vlan_promisc;
1090
1091         /* Construct VLAN Table to give to HW */
1092         for (i = 0; i < VLAN_N_VID; i++)
1093                 if (adapter->vlan_tag[i])
1094                         vids[num++] = cpu_to_le16(i);
1095
1096         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1097                                     vids, num, 0);
1098
1099         if (status) {
1100                 /* Set to VLAN promisc mode as setting VLAN filter failed */
1101                 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1102                         goto set_vlan_promisc;
1103                 dev_err(&adapter->pdev->dev,
1104                         "Setting HW VLAN filtering failed.\n");
1105         } else {
1106                 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1107                         /* hw VLAN filtering re-enabled. */
1108                         status = be_cmd_rx_filter(adapter,
1109                                                   BE_FLAGS_VLAN_PROMISC, OFF);
1110                         if (!status) {
1111                                 dev_info(&adapter->pdev->dev,
1112                                          "Disabling VLAN Promiscuous mode.\n");
1113                                 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1114                         }
1115                 }
1116         }
1117
1118         return status;
1119
1120 set_vlan_promisc:
1121         if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1122                 return 0;
1123
1124         status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1125         if (!status) {
1126                 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1127                 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1128         } else
1129                 dev_err(&adapter->pdev->dev,
1130                         "Failed to enable VLAN Promiscuous mode.\n");
1131         return status;
1132 }
1133
1134 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1135 {
1136         struct be_adapter *adapter = netdev_priv(netdev);
1137         int status = 0;
1138
1139         /* Packets with VID 0 are always received by Lancer by default */
1140         if (lancer_chip(adapter) && vid == 0)
1141                 goto ret;
1142
1143         adapter->vlan_tag[vid] = 1;
1144         adapter->vlans_added++;
1145
1146         status = be_vid_config(adapter);
1147         if (status) {
1148                 adapter->vlans_added--;
1149                 adapter->vlan_tag[vid] = 0;
1150         }
1151 ret:
1152         return status;
1153 }
1154
1155 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1156 {
1157         struct be_adapter *adapter = netdev_priv(netdev);
1158         int status = 0;
1159
1160         /* Packets with VID 0 are always received by Lancer by default */
1161         if (lancer_chip(adapter) && vid == 0)
1162                 goto ret;
1163
1164         adapter->vlan_tag[vid] = 0;
1165         status = be_vid_config(adapter);
1166         if (!status)
1167                 adapter->vlans_added--;
1168         else
1169                 adapter->vlan_tag[vid] = 1;
1170 ret:
1171         return status;
1172 }
1173
1174 static void be_clear_promisc(struct be_adapter *adapter)
1175 {
1176         adapter->promiscuous = false;
1177         adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1178
1179         be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1180 }
1181
1182 static void be_set_rx_mode(struct net_device *netdev)
1183 {
1184         struct be_adapter *adapter = netdev_priv(netdev);
1185         int status;
1186
1187         if (netdev->flags & IFF_PROMISC) {
1188                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1189                 adapter->promiscuous = true;
1190                 goto done;
1191         }
1192
1193         /* BE was previously in promiscuous mode; disable it */
1194         if (adapter->promiscuous) {
1195                 be_clear_promisc(adapter);
1196                 if (adapter->vlans_added)
1197                         be_vid_config(adapter);
1198         }
1199
1200         /* Enable multicast promisc if num configured exceeds what we support */
1201         if (netdev->flags & IFF_ALLMULTI ||
1202             netdev_mc_count(netdev) > be_max_mc(adapter)) {
1203                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1204                 goto done;
1205         }
1206
1207         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1208                 struct netdev_hw_addr *ha;
1209                 int i = 1; /* First slot is claimed by the Primary MAC */
1210
1211                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1212                         be_cmd_pmac_del(adapter, adapter->if_handle,
1213                                         adapter->pmac_id[i], 0);
1214                 }
1215
1216                 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1217                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1218                         adapter->promiscuous = true;
1219                         goto done;
1220                 }
1221
1222                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1223                         adapter->uc_macs++; /* First slot is for Primary MAC */
1224                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1225                                         adapter->if_handle,
1226                                         &adapter->pmac_id[adapter->uc_macs], 0);
1227                 }
1228         }
1229
1230         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1231
1232         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1233         if (status) {
1234                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1235                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1236                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1237         }
1238 done:
1239         return;
1240 }
1241
1242 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1243 {
1244         struct be_adapter *adapter = netdev_priv(netdev);
1245         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1246         int status;
1247
1248         if (!sriov_enabled(adapter))
1249                 return -EPERM;
1250
1251         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1252                 return -EINVAL;
1253
1254         if (BEx_chip(adapter)) {
1255                 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1256                                 vf + 1);
1257
1258                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1259                                          &vf_cfg->pmac_id, vf + 1);
1260         } else {
1261                 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1262                                         vf + 1);
1263         }
1264
1265         if (status)
1266                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1267                                 mac, vf);
1268         else
1269                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1270
1271         return status;
1272 }
1273
1274 static int be_get_vf_config(struct net_device *netdev, int vf,
1275                         struct ifla_vf_info *vi)
1276 {
1277         struct be_adapter *adapter = netdev_priv(netdev);
1278         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1279
1280         if (!sriov_enabled(adapter))
1281                 return -EPERM;
1282
1283         if (vf >= adapter->num_vfs)
1284                 return -EINVAL;
1285
1286         vi->vf = vf;
1287         vi->tx_rate = vf_cfg->tx_rate;
1288         vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1289         vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1290         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1291         vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1292
1293         return 0;
1294 }
1295
1296 static int be_set_vf_vlan(struct net_device *netdev,
1297                         int vf, u16 vlan, u8 qos)
1298 {
1299         struct be_adapter *adapter = netdev_priv(netdev);
1300         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1301         int status = 0;
1302
1303         if (!sriov_enabled(adapter))
1304                 return -EPERM;
1305
1306         if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1307                 return -EINVAL;
1308
1309         if (vlan || qos) {
1310                 vlan |= qos << VLAN_PRIO_SHIFT;
1311                 if (vf_cfg->vlan_tag != vlan)
1312                         status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1313                                                        vf_cfg->if_handle, 0);
1314         } else {
1315                 /* Reset Transparent Vlan Tagging. */
1316                 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1317                                                vf + 1, vf_cfg->if_handle, 0);
1318         }
1319
1320         if (!status)
1321                 vf_cfg->vlan_tag = vlan;
1322         else
1323                 dev_info(&adapter->pdev->dev,
1324                          "VLAN %d config on VF %d failed\n", vlan, vf);
1325         return status;
1326 }
1327
1328 static int be_set_vf_tx_rate(struct net_device *netdev,
1329                         int vf, int rate)
1330 {
1331         struct be_adapter *adapter = netdev_priv(netdev);
1332         int status = 0;
1333
1334         if (!sriov_enabled(adapter))
1335                 return -EPERM;
1336
1337         if (vf >= adapter->num_vfs)
1338                 return -EINVAL;
1339
1340         if (rate < 100 || rate > 10000) {
1341                 dev_err(&adapter->pdev->dev,
1342                         "tx rate must be between 100 and 10000 Mbps\n");
1343                 return -EINVAL;
1344         }
1345
1346         if (lancer_chip(adapter))
1347                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1348         else
1349                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1350
1351         if (status)
1352                 dev_err(&adapter->pdev->dev,
1353                                 "tx rate %d on VF %d failed\n", rate, vf);
1354         else
1355                 adapter->vf_cfg[vf].tx_rate = rate;
1356         return status;
1357 }
1358 static int be_set_vf_link_state(struct net_device *netdev, int vf,
1359                                 int link_state)
1360 {
1361         struct be_adapter *adapter = netdev_priv(netdev);
1362         int status;
1363
1364         if (!sriov_enabled(adapter))
1365                 return -EPERM;
1366
1367         if (vf >= adapter->num_vfs)
1368                 return -EINVAL;
1369
1370         status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1371         if (!status)
1372                 adapter->vf_cfg[vf].plink_tracking = link_state;
1373
1374         return status;
1375 }
1376
1377 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1378                           ulong now)
1379 {
1380         aic->rx_pkts_prev = rx_pkts;
1381         aic->tx_reqs_prev = tx_pkts;
1382         aic->jiffies = now;
1383 }
1384
1385 static void be_eqd_update(struct be_adapter *adapter)
1386 {
1387         struct be_set_eqd set_eqd[MAX_EVT_QS];
1388         int eqd, i, num = 0, start;
1389         struct be_aic_obj *aic;
1390         struct be_eq_obj *eqo;
1391         struct be_rx_obj *rxo;
1392         struct be_tx_obj *txo;
1393         u64 rx_pkts, tx_pkts;
1394         ulong now;
1395         u32 pps, delta;
1396
1397         for_all_evt_queues(adapter, eqo, i) {
1398                 aic = &adapter->aic_obj[eqo->idx];
1399                 if (!aic->enable) {
1400                         if (aic->jiffies)
1401                                 aic->jiffies = 0;
1402                         eqd = aic->et_eqd;
1403                         goto modify_eqd;
1404                 }
1405
1406                 rxo = &adapter->rx_obj[eqo->idx];
1407                 do {
1408                         start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1409                         rx_pkts = rxo->stats.rx_pkts;
1410                 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
1411
1412                 txo = &adapter->tx_obj[eqo->idx];
1413                 do {
1414                         start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1415                         tx_pkts = txo->stats.tx_reqs;
1416                 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
1417
1418
1419                 /* Skip, if wrapped around or first calculation */
1420                 now = jiffies;
1421                 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1422                     rx_pkts < aic->rx_pkts_prev ||
1423                     tx_pkts < aic->tx_reqs_prev) {
1424                         be_aic_update(aic, rx_pkts, tx_pkts, now);
1425                         continue;
1426                 }
1427
1428                 delta = jiffies_to_msecs(now - aic->jiffies);
1429                 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1430                         (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1431                 eqd = (pps / 15000) << 2;
1432
1433                 if (eqd < 8)
1434                         eqd = 0;
1435                 eqd = min_t(u32, eqd, aic->max_eqd);
1436                 eqd = max_t(u32, eqd, aic->min_eqd);
1437
1438                 be_aic_update(aic, rx_pkts, tx_pkts, now);
1439 modify_eqd:
1440                 if (eqd != aic->prev_eqd) {
1441                         set_eqd[num].delay_multiplier = (eqd * 65)/100;
1442                         set_eqd[num].eq_id = eqo->q.id;
1443                         aic->prev_eqd = eqd;
1444                         num++;
1445                 }
1446         }
1447
1448         if (num)
1449                 be_cmd_modify_eqd(adapter, set_eqd, num);
1450 }
1451
1452 static void be_rx_stats_update(struct be_rx_obj *rxo,
1453                 struct be_rx_compl_info *rxcp)
1454 {
1455         struct be_rx_stats *stats = rx_stats(rxo);
1456
1457         u64_stats_update_begin(&stats->sync);
1458         stats->rx_compl++;
1459         stats->rx_bytes += rxcp->pkt_size;
1460         stats->rx_pkts++;
1461         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1462                 stats->rx_mcast_pkts++;
1463         if (rxcp->err)
1464                 stats->rx_compl_err++;
1465         u64_stats_update_end(&stats->sync);
1466 }
1467
1468 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1469 {
1470         /* L4 checksum is not reliable for non TCP/UDP packets.
1471          * Also ignore ipcksm for ipv6 pkts */
1472         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1473                                 (rxcp->ip_csum || rxcp->ipv6);
1474 }
1475
1476 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1477 {
1478         struct be_adapter *adapter = rxo->adapter;
1479         struct be_rx_page_info *rx_page_info;
1480         struct be_queue_info *rxq = &rxo->q;
1481         u16 frag_idx = rxq->tail;
1482
1483         rx_page_info = &rxo->page_info_tbl[frag_idx];
1484         BUG_ON(!rx_page_info->page);
1485
1486         if (rx_page_info->last_frag) {
1487                 dma_unmap_page(&adapter->pdev->dev,
1488                                dma_unmap_addr(rx_page_info, bus),
1489                                adapter->big_page_size, DMA_FROM_DEVICE);
1490                 rx_page_info->last_frag = false;
1491         } else {
1492                 dma_sync_single_for_cpu(&adapter->pdev->dev,
1493                                         dma_unmap_addr(rx_page_info, bus),
1494                                         rx_frag_size, DMA_FROM_DEVICE);
1495         }
1496
1497         queue_tail_inc(rxq);
1498         atomic_dec(&rxq->used);
1499         return rx_page_info;
1500 }
1501
1502 /* Throwaway the data in the Rx completion */
1503 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1504                                 struct be_rx_compl_info *rxcp)
1505 {
1506         struct be_rx_page_info *page_info;
1507         u16 i, num_rcvd = rxcp->num_rcvd;
1508
1509         for (i = 0; i < num_rcvd; i++) {
1510                 page_info = get_rx_page_info(rxo);
1511                 put_page(page_info->page);
1512                 memset(page_info, 0, sizeof(*page_info));
1513         }
1514 }
1515
1516 /*
1517  * skb_fill_rx_data forms a complete skb for an ether frame
1518  * indicated by rxcp.
1519  */
1520 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1521                              struct be_rx_compl_info *rxcp)
1522 {
1523         struct be_rx_page_info *page_info;
1524         u16 i, j;
1525         u16 hdr_len, curr_frag_len, remaining;
1526         u8 *start;
1527
1528         page_info = get_rx_page_info(rxo);
1529         start = page_address(page_info->page) + page_info->page_offset;
1530         prefetch(start);
1531
1532         /* Copy data in the first descriptor of this completion */
1533         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1534
1535         skb->len = curr_frag_len;
1536         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1537                 memcpy(skb->data, start, curr_frag_len);
1538                 /* Complete packet has now been moved to data */
1539                 put_page(page_info->page);
1540                 skb->data_len = 0;
1541                 skb->tail += curr_frag_len;
1542         } else {
1543                 hdr_len = ETH_HLEN;
1544                 memcpy(skb->data, start, hdr_len);
1545                 skb_shinfo(skb)->nr_frags = 1;
1546                 skb_frag_set_page(skb, 0, page_info->page);
1547                 skb_shinfo(skb)->frags[0].page_offset =
1548                                         page_info->page_offset + hdr_len;
1549                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1550                 skb->data_len = curr_frag_len - hdr_len;
1551                 skb->truesize += rx_frag_size;
1552                 skb->tail += hdr_len;
1553         }
1554         page_info->page = NULL;
1555
1556         if (rxcp->pkt_size <= rx_frag_size) {
1557                 BUG_ON(rxcp->num_rcvd != 1);
1558                 return;
1559         }
1560
1561         /* More frags present for this completion */
1562         remaining = rxcp->pkt_size - curr_frag_len;
1563         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1564                 page_info = get_rx_page_info(rxo);
1565                 curr_frag_len = min(remaining, rx_frag_size);
1566
1567                 /* Coalesce all frags from the same physical page in one slot */
1568                 if (page_info->page_offset == 0) {
1569                         /* Fresh page */
1570                         j++;
1571                         skb_frag_set_page(skb, j, page_info->page);
1572                         skb_shinfo(skb)->frags[j].page_offset =
1573                                                         page_info->page_offset;
1574                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1575                         skb_shinfo(skb)->nr_frags++;
1576                 } else {
1577                         put_page(page_info->page);
1578                 }
1579
1580                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1581                 skb->len += curr_frag_len;
1582                 skb->data_len += curr_frag_len;
1583                 skb->truesize += rx_frag_size;
1584                 remaining -= curr_frag_len;
1585                 page_info->page = NULL;
1586         }
1587         BUG_ON(j > MAX_SKB_FRAGS);
1588 }
1589
1590 /* Process the RX completion indicated by rxcp when GRO is disabled */
1591 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1592                                 struct be_rx_compl_info *rxcp)
1593 {
1594         struct be_adapter *adapter = rxo->adapter;
1595         struct net_device *netdev = adapter->netdev;
1596         struct sk_buff *skb;
1597
1598         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1599         if (unlikely(!skb)) {
1600                 rx_stats(rxo)->rx_drops_no_skbs++;
1601                 be_rx_compl_discard(rxo, rxcp);
1602                 return;
1603         }
1604
1605         skb_fill_rx_data(rxo, skb, rxcp);
1606
1607         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1608                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1609         else
1610                 skb_checksum_none_assert(skb);
1611
1612         skb->protocol = eth_type_trans(skb, netdev);
1613         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1614         if (netdev->features & NETIF_F_RXHASH)
1615                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1616         skb_mark_napi_id(skb, napi);
1617
1618         if (rxcp->vlanf)
1619                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1620
1621         netif_receive_skb(skb);
1622 }
1623
1624 /* Process the RX completion indicated by rxcp when GRO is enabled */
1625 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1626                                     struct napi_struct *napi,
1627                                     struct be_rx_compl_info *rxcp)
1628 {
1629         struct be_adapter *adapter = rxo->adapter;
1630         struct be_rx_page_info *page_info;
1631         struct sk_buff *skb = NULL;
1632         u16 remaining, curr_frag_len;
1633         u16 i, j;
1634
1635         skb = napi_get_frags(napi);
1636         if (!skb) {
1637                 be_rx_compl_discard(rxo, rxcp);
1638                 return;
1639         }
1640
1641         remaining = rxcp->pkt_size;
1642         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1643                 page_info = get_rx_page_info(rxo);
1644
1645                 curr_frag_len = min(remaining, rx_frag_size);
1646
1647                 /* Coalesce all frags from the same physical page in one slot */
1648                 if (i == 0 || page_info->page_offset == 0) {
1649                         /* First frag or Fresh page */
1650                         j++;
1651                         skb_frag_set_page(skb, j, page_info->page);
1652                         skb_shinfo(skb)->frags[j].page_offset =
1653                                                         page_info->page_offset;
1654                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1655                 } else {
1656                         put_page(page_info->page);
1657                 }
1658                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1659                 skb->truesize += rx_frag_size;
1660                 remaining -= curr_frag_len;
1661                 memset(page_info, 0, sizeof(*page_info));
1662         }
1663         BUG_ON(j > MAX_SKB_FRAGS);
1664
1665         skb_shinfo(skb)->nr_frags = j + 1;
1666         skb->len = rxcp->pkt_size;
1667         skb->data_len = rxcp->pkt_size;
1668         skb->ip_summed = CHECKSUM_UNNECESSARY;
1669         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1670         if (adapter->netdev->features & NETIF_F_RXHASH)
1671                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1672         skb_mark_napi_id(skb, napi);
1673
1674         if (rxcp->vlanf)
1675                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1676
1677         napi_gro_frags(napi);
1678 }
1679
1680 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1681                                  struct be_rx_compl_info *rxcp)
1682 {
1683         rxcp->pkt_size =
1684                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1685         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1686         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1687         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1688         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1689         rxcp->ip_csum =
1690                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1691         rxcp->l4_csum =
1692                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1693         rxcp->ipv6 =
1694                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1695         rxcp->num_rcvd =
1696                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1697         rxcp->pkt_type =
1698                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1699         rxcp->rss_hash =
1700                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1701         if (rxcp->vlanf) {
1702                 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
1703                                           compl);
1704                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1705                                                compl);
1706         }
1707         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1708 }
1709
1710 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1711                                  struct be_rx_compl_info *rxcp)
1712 {
1713         rxcp->pkt_size =
1714                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1715         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1716         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1717         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1718         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1719         rxcp->ip_csum =
1720                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1721         rxcp->l4_csum =
1722                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1723         rxcp->ipv6 =
1724                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1725         rxcp->num_rcvd =
1726                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1727         rxcp->pkt_type =
1728                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1729         rxcp->rss_hash =
1730                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1731         if (rxcp->vlanf) {
1732                 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
1733                                           compl);
1734                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1735                                                compl);
1736         }
1737         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1738         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1739                                       ip_frag, compl);
1740 }
1741
1742 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1743 {
1744         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1745         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1746         struct be_adapter *adapter = rxo->adapter;
1747
1748         /* For checking the valid bit it is Ok to use either definition as the
1749          * valid bit is at the same position in both v0 and v1 Rx compl */
1750         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1751                 return NULL;
1752
1753         rmb();
1754         be_dws_le_to_cpu(compl, sizeof(*compl));
1755
1756         if (adapter->be3_native)
1757                 be_parse_rx_compl_v1(compl, rxcp);
1758         else
1759                 be_parse_rx_compl_v0(compl, rxcp);
1760
1761         if (rxcp->ip_frag)
1762                 rxcp->l4_csum = 0;
1763
1764         if (rxcp->vlanf) {
1765                 /* In QNQ modes, if qnq bit is not set, then the packet was
1766                  * tagged only with the transparent outer vlan-tag and must
1767                  * not be treated as a vlan packet by host
1768                  */
1769                 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
1770                         rxcp->vlanf = 0;
1771
1772                 if (!lancer_chip(adapter))
1773                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1774
1775                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1776                     !adapter->vlan_tag[rxcp->vlan_tag])
1777                         rxcp->vlanf = 0;
1778         }
1779
1780         /* As the compl has been parsed, reset it; we wont touch it again */
1781         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1782
1783         queue_tail_inc(&rxo->cq);
1784         return rxcp;
1785 }
1786
1787 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1788 {
1789         u32 order = get_order(size);
1790
1791         if (order > 0)
1792                 gfp |= __GFP_COMP;
1793         return  alloc_pages(gfp, order);
1794 }
1795
1796 /*
1797  * Allocate a page, split it to fragments of size rx_frag_size and post as
1798  * receive buffers to BE
1799  */
1800 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1801 {
1802         struct be_adapter *adapter = rxo->adapter;
1803         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1804         struct be_queue_info *rxq = &rxo->q;
1805         struct page *pagep = NULL;
1806         struct device *dev = &adapter->pdev->dev;
1807         struct be_eth_rx_d *rxd;
1808         u64 page_dmaaddr = 0, frag_dmaaddr;
1809         u32 posted, page_offset = 0;
1810
1811         page_info = &rxo->page_info_tbl[rxq->head];
1812         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1813                 if (!pagep) {
1814                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1815                         if (unlikely(!pagep)) {
1816                                 rx_stats(rxo)->rx_post_fail++;
1817                                 break;
1818                         }
1819                         page_dmaaddr = dma_map_page(dev, pagep, 0,
1820                                                     adapter->big_page_size,
1821                                                     DMA_FROM_DEVICE);
1822                         if (dma_mapping_error(dev, page_dmaaddr)) {
1823                                 put_page(pagep);
1824                                 pagep = NULL;
1825                                 rx_stats(rxo)->rx_post_fail++;
1826                                 break;
1827                         }
1828                         page_offset = 0;
1829                 } else {
1830                         get_page(pagep);
1831                         page_offset += rx_frag_size;
1832                 }
1833                 page_info->page_offset = page_offset;
1834                 page_info->page = pagep;
1835
1836                 rxd = queue_head_node(rxq);
1837                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1838                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1839                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1840
1841                 /* Any space left in the current big page for another frag? */
1842                 if ((page_offset + rx_frag_size + rx_frag_size) >
1843                                         adapter->big_page_size) {
1844                         pagep = NULL;
1845                         page_info->last_frag = true;
1846                         dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1847                 } else {
1848                         dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
1849                 }
1850
1851                 prev_page_info = page_info;
1852                 queue_head_inc(rxq);
1853                 page_info = &rxo->page_info_tbl[rxq->head];
1854         }
1855
1856         /* Mark the last frag of a page when we break out of the above loop
1857          * with no more slots available in the RXQ
1858          */
1859         if (pagep) {
1860                 prev_page_info->last_frag = true;
1861                 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1862         }
1863
1864         if (posted) {
1865                 atomic_add(posted, &rxq->used);
1866                 if (rxo->rx_post_starved)
1867                         rxo->rx_post_starved = false;
1868                 be_rxq_notify(adapter, rxq->id, posted);
1869         } else if (atomic_read(&rxq->used) == 0) {
1870                 /* Let be_worker replenish when memory is available */
1871                 rxo->rx_post_starved = true;
1872         }
1873 }
1874
1875 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1876 {
1877         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1878
1879         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1880                 return NULL;
1881
1882         rmb();
1883         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1884
1885         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1886
1887         queue_tail_inc(tx_cq);
1888         return txcp;
1889 }
1890
1891 static u16 be_tx_compl_process(struct be_adapter *adapter,
1892                 struct be_tx_obj *txo, u16 last_index)
1893 {
1894         struct be_queue_info *txq = &txo->q;
1895         struct be_eth_wrb *wrb;
1896         struct sk_buff **sent_skbs = txo->sent_skb_list;
1897         struct sk_buff *sent_skb;
1898         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1899         bool unmap_skb_hdr = true;
1900
1901         sent_skb = sent_skbs[txq->tail];
1902         BUG_ON(!sent_skb);
1903         sent_skbs[txq->tail] = NULL;
1904
1905         /* skip header wrb */
1906         queue_tail_inc(txq);
1907
1908         do {
1909                 cur_index = txq->tail;
1910                 wrb = queue_tail_node(txq);
1911                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1912                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1913                 unmap_skb_hdr = false;
1914
1915                 num_wrbs++;
1916                 queue_tail_inc(txq);
1917         } while (cur_index != last_index);
1918
1919         kfree_skb(sent_skb);
1920         return num_wrbs;
1921 }
1922
1923 /* Return the number of events in the event queue */
1924 static inline int events_get(struct be_eq_obj *eqo)
1925 {
1926         struct be_eq_entry *eqe;
1927         int num = 0;
1928
1929         do {
1930                 eqe = queue_tail_node(&eqo->q);
1931                 if (eqe->evt == 0)
1932                         break;
1933
1934                 rmb();
1935                 eqe->evt = 0;
1936                 num++;
1937                 queue_tail_inc(&eqo->q);
1938         } while (true);
1939
1940         return num;
1941 }
1942
1943 /* Leaves the EQ is disarmed state */
1944 static void be_eq_clean(struct be_eq_obj *eqo)
1945 {
1946         int num = events_get(eqo);
1947
1948         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1949 }
1950
1951 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1952 {
1953         struct be_rx_page_info *page_info;
1954         struct be_queue_info *rxq = &rxo->q;
1955         struct be_queue_info *rx_cq = &rxo->cq;
1956         struct be_rx_compl_info *rxcp;
1957         struct be_adapter *adapter = rxo->adapter;
1958         int flush_wait = 0;
1959
1960         /* Consume pending rx completions.
1961          * Wait for the flush completion (identified by zero num_rcvd)
1962          * to arrive. Notify CQ even when there are no more CQ entries
1963          * for HW to flush partially coalesced CQ entries.
1964          * In Lancer, there is no need to wait for flush compl.
1965          */
1966         for (;;) {
1967                 rxcp = be_rx_compl_get(rxo);
1968                 if (rxcp == NULL) {
1969                         if (lancer_chip(adapter))
1970                                 break;
1971
1972                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1973                                 dev_warn(&adapter->pdev->dev,
1974                                          "did not receive flush compl\n");
1975                                 break;
1976                         }
1977                         be_cq_notify(adapter, rx_cq->id, true, 0);
1978                         mdelay(1);
1979                 } else {
1980                         be_rx_compl_discard(rxo, rxcp);
1981                         be_cq_notify(adapter, rx_cq->id, false, 1);
1982                         if (rxcp->num_rcvd == 0)
1983                                 break;
1984                 }
1985         }
1986
1987         /* After cleanup, leave the CQ in unarmed state */
1988         be_cq_notify(adapter, rx_cq->id, false, 0);
1989
1990         /* Then free posted rx buffers that were not used */
1991         while (atomic_read(&rxq->used) > 0) {
1992                 page_info = get_rx_page_info(rxo);
1993                 put_page(page_info->page);
1994                 memset(page_info, 0, sizeof(*page_info));
1995         }
1996         BUG_ON(atomic_read(&rxq->used));
1997         rxq->tail = rxq->head = 0;
1998 }
1999
2000 static void be_tx_compl_clean(struct be_adapter *adapter)
2001 {
2002         struct be_tx_obj *txo;
2003         struct be_queue_info *txq;
2004         struct be_eth_tx_compl *txcp;
2005         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2006         struct sk_buff *sent_skb;
2007         bool dummy_wrb;
2008         int i, pending_txqs;
2009
2010         /* Wait for a max of 200ms for all the tx-completions to arrive. */
2011         do {
2012                 pending_txqs = adapter->num_tx_qs;
2013
2014                 for_all_tx_queues(adapter, txo, i) {
2015                         txq = &txo->q;
2016                         while ((txcp = be_tx_compl_get(&txo->cq))) {
2017                                 end_idx =
2018                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
2019                                                       wrb_index, txcp);
2020                                 num_wrbs += be_tx_compl_process(adapter, txo,
2021                                                                 end_idx);
2022                                 cmpl++;
2023                         }
2024                         if (cmpl) {
2025                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2026                                 atomic_sub(num_wrbs, &txq->used);
2027                                 cmpl = 0;
2028                                 num_wrbs = 0;
2029                         }
2030                         if (atomic_read(&txq->used) == 0)
2031                                 pending_txqs--;
2032                 }
2033
2034                 if (pending_txqs == 0 || ++timeo > 200)
2035                         break;
2036
2037                 mdelay(1);
2038         } while (true);
2039
2040         for_all_tx_queues(adapter, txo, i) {
2041                 txq = &txo->q;
2042                 if (atomic_read(&txq->used))
2043                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2044                                 atomic_read(&txq->used));
2045
2046                 /* free posted tx for which compls will never arrive */
2047                 while (atomic_read(&txq->used)) {
2048                         sent_skb = txo->sent_skb_list[txq->tail];
2049                         end_idx = txq->tail;
2050                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2051                                                    &dummy_wrb);
2052                         index_adv(&end_idx, num_wrbs - 1, txq->len);
2053                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2054                         atomic_sub(num_wrbs, &txq->used);
2055                 }
2056         }
2057 }
2058
2059 static void be_evt_queues_destroy(struct be_adapter *adapter)
2060 {
2061         struct be_eq_obj *eqo;
2062         int i;
2063
2064         for_all_evt_queues(adapter, eqo, i) {
2065                 if (eqo->q.created) {
2066                         be_eq_clean(eqo);
2067                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2068                         napi_hash_del(&eqo->napi);
2069                         netif_napi_del(&eqo->napi);
2070                 }
2071                 be_queue_free(adapter, &eqo->q);
2072         }
2073 }
2074
2075 static int be_evt_queues_create(struct be_adapter *adapter)
2076 {
2077         struct be_queue_info *eq;
2078         struct be_eq_obj *eqo;
2079         struct be_aic_obj *aic;
2080         int i, rc;
2081
2082         adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2083                                     adapter->cfg_num_qs);
2084
2085         for_all_evt_queues(adapter, eqo, i) {
2086                 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2087                                BE_NAPI_WEIGHT);
2088                 napi_hash_add(&eqo->napi);
2089                 aic = &adapter->aic_obj[i];
2090                 eqo->adapter = adapter;
2091                 eqo->tx_budget = BE_TX_BUDGET;
2092                 eqo->idx = i;
2093                 aic->max_eqd = BE_MAX_EQD;
2094                 aic->enable = true;
2095
2096                 eq = &eqo->q;
2097                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2098                                         sizeof(struct be_eq_entry));
2099                 if (rc)
2100                         return rc;
2101
2102                 rc = be_cmd_eq_create(adapter, eqo);
2103                 if (rc)
2104                         return rc;
2105         }
2106         return 0;
2107 }
2108
2109 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2110 {
2111         struct be_queue_info *q;
2112
2113         q = &adapter->mcc_obj.q;
2114         if (q->created)
2115                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2116         be_queue_free(adapter, q);
2117
2118         q = &adapter->mcc_obj.cq;
2119         if (q->created)
2120                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2121         be_queue_free(adapter, q);
2122 }
2123
2124 /* Must be called only after TX qs are created as MCC shares TX EQ */
2125 static int be_mcc_queues_create(struct be_adapter *adapter)
2126 {
2127         struct be_queue_info *q, *cq;
2128
2129         cq = &adapter->mcc_obj.cq;
2130         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2131                         sizeof(struct be_mcc_compl)))
2132                 goto err;
2133
2134         /* Use the default EQ for MCC completions */
2135         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2136                 goto mcc_cq_free;
2137
2138         q = &adapter->mcc_obj.q;
2139         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2140                 goto mcc_cq_destroy;
2141
2142         if (be_cmd_mccq_create(adapter, q, cq))
2143                 goto mcc_q_free;
2144
2145         return 0;
2146
2147 mcc_q_free:
2148         be_queue_free(adapter, q);
2149 mcc_cq_destroy:
2150         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2151 mcc_cq_free:
2152         be_queue_free(adapter, cq);
2153 err:
2154         return -1;
2155 }
2156
2157 static void be_tx_queues_destroy(struct be_adapter *adapter)
2158 {
2159         struct be_queue_info *q;
2160         struct be_tx_obj *txo;
2161         u8 i;
2162
2163         for_all_tx_queues(adapter, txo, i) {
2164                 q = &txo->q;
2165                 if (q->created)
2166                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2167                 be_queue_free(adapter, q);
2168
2169                 q = &txo->cq;
2170                 if (q->created)
2171                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2172                 be_queue_free(adapter, q);
2173         }
2174 }
2175
2176 static int be_tx_qs_create(struct be_adapter *adapter)
2177 {
2178         struct be_queue_info *cq, *eq;
2179         struct be_tx_obj *txo;
2180         int status, i;
2181
2182         adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2183
2184         for_all_tx_queues(adapter, txo, i) {
2185                 cq = &txo->cq;
2186                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2187                                         sizeof(struct be_eth_tx_compl));
2188                 if (status)
2189                         return status;
2190
2191                 u64_stats_init(&txo->stats.sync);
2192                 u64_stats_init(&txo->stats.sync_compl);
2193
2194                 /* If num_evt_qs is less than num_tx_qs, then more than
2195                  * one txq share an eq
2196                  */
2197                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2198                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2199                 if (status)
2200                         return status;
2201
2202                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2203                                         sizeof(struct be_eth_wrb));
2204                 if (status)
2205                         return status;
2206
2207                 status = be_cmd_txq_create(adapter, txo);
2208                 if (status)
2209                         return status;
2210         }
2211
2212         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2213                  adapter->num_tx_qs);
2214         return 0;
2215 }
2216
2217 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2218 {
2219         struct be_queue_info *q;
2220         struct be_rx_obj *rxo;
2221         int i;
2222
2223         for_all_rx_queues(adapter, rxo, i) {
2224                 q = &rxo->cq;
2225                 if (q->created)
2226                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2227                 be_queue_free(adapter, q);
2228         }
2229 }
2230
2231 static int be_rx_cqs_create(struct be_adapter *adapter)
2232 {
2233         struct be_queue_info *eq, *cq;
2234         struct be_rx_obj *rxo;
2235         int rc, i;
2236
2237         /* We can create as many RSS rings as there are EQs. */
2238         adapter->num_rx_qs = adapter->num_evt_qs;
2239
2240         /* We'll use RSS only if atleast 2 RSS rings are supported.
2241          * When RSS is used, we'll need a default RXQ for non-IP traffic.
2242          */
2243         if (adapter->num_rx_qs > 1)
2244                 adapter->num_rx_qs++;
2245
2246         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2247         for_all_rx_queues(adapter, rxo, i) {
2248                 rxo->adapter = adapter;
2249                 cq = &rxo->cq;
2250                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2251                                 sizeof(struct be_eth_rx_compl));
2252                 if (rc)
2253                         return rc;
2254
2255                 u64_stats_init(&rxo->stats.sync);
2256                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2257                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2258                 if (rc)
2259                         return rc;
2260         }
2261
2262         dev_info(&adapter->pdev->dev,
2263                  "created %d RSS queue(s) and 1 default RX queue\n",
2264                  adapter->num_rx_qs - 1);
2265         return 0;
2266 }
2267
2268 static irqreturn_t be_intx(int irq, void *dev)
2269 {
2270         struct be_eq_obj *eqo = dev;
2271         struct be_adapter *adapter = eqo->adapter;
2272         int num_evts = 0;
2273
2274         /* IRQ is not expected when NAPI is scheduled as the EQ
2275          * will not be armed.
2276          * But, this can happen on Lancer INTx where it takes
2277          * a while to de-assert INTx or in BE2 where occasionaly
2278          * an interrupt may be raised even when EQ is unarmed.
2279          * If NAPI is already scheduled, then counting & notifying
2280          * events will orphan them.
2281          */
2282         if (napi_schedule_prep(&eqo->napi)) {
2283                 num_evts = events_get(eqo);
2284                 __napi_schedule(&eqo->napi);
2285                 if (num_evts)
2286                         eqo->spurious_intr = 0;
2287         }
2288         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2289
2290         /* Return IRQ_HANDLED only for the the first spurious intr
2291          * after a valid intr to stop the kernel from branding
2292          * this irq as a bad one!
2293          */
2294         if (num_evts || eqo->spurious_intr++ == 0)
2295                 return IRQ_HANDLED;
2296         else
2297                 return IRQ_NONE;
2298 }
2299
2300 static irqreturn_t be_msix(int irq, void *dev)
2301 {
2302         struct be_eq_obj *eqo = dev;
2303
2304         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2305         napi_schedule(&eqo->napi);
2306         return IRQ_HANDLED;
2307 }
2308
2309 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2310 {
2311         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2312 }
2313
2314 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2315                         int budget, int polling)
2316 {
2317         struct be_adapter *adapter = rxo->adapter;
2318         struct be_queue_info *rx_cq = &rxo->cq;
2319         struct be_rx_compl_info *rxcp;
2320         u32 work_done;
2321
2322         for (work_done = 0; work_done < budget; work_done++) {
2323                 rxcp = be_rx_compl_get(rxo);
2324                 if (!rxcp)
2325                         break;
2326
2327                 /* Is it a flush compl that has no data */
2328                 if (unlikely(rxcp->num_rcvd == 0))
2329                         goto loop_continue;
2330
2331                 /* Discard compl with partial DMA Lancer B0 */
2332                 if (unlikely(!rxcp->pkt_size)) {
2333                         be_rx_compl_discard(rxo, rxcp);
2334                         goto loop_continue;
2335                 }
2336
2337                 /* On BE drop pkts that arrive due to imperfect filtering in
2338                  * promiscuous mode on some skews
2339                  */
2340                 if (unlikely(rxcp->port != adapter->port_num &&
2341                                 !lancer_chip(adapter))) {
2342                         be_rx_compl_discard(rxo, rxcp);
2343                         goto loop_continue;
2344                 }
2345
2346                 /* Don't do gro when we're busy_polling */
2347                 if (do_gro(rxcp) && polling != BUSY_POLLING)
2348                         be_rx_compl_process_gro(rxo, napi, rxcp);
2349                 else
2350                         be_rx_compl_process(rxo, napi, rxcp);
2351
2352 loop_continue:
2353                 be_rx_stats_update(rxo, rxcp);
2354         }
2355
2356         if (work_done) {
2357                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2358
2359                 /* When an rx-obj gets into post_starved state, just
2360                  * let be_worker do the posting.
2361                  */
2362                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2363                     !rxo->rx_post_starved)
2364                         be_post_rx_frags(rxo, GFP_ATOMIC);
2365         }
2366
2367         return work_done;
2368 }
2369
2370 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2371                           int budget, int idx)
2372 {
2373         struct be_eth_tx_compl *txcp;
2374         int num_wrbs = 0, work_done;
2375
2376         for (work_done = 0; work_done < budget; work_done++) {
2377                 txcp = be_tx_compl_get(&txo->cq);
2378                 if (!txcp)
2379                         break;
2380                 num_wrbs += be_tx_compl_process(adapter, txo,
2381                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2382                                         wrb_index, txcp));
2383         }
2384
2385         if (work_done) {
2386                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2387                 atomic_sub(num_wrbs, &txo->q.used);
2388
2389                 /* As Tx wrbs have been freed up, wake up netdev queue
2390                  * if it was stopped due to lack of tx wrbs.  */
2391                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2392                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2393                         netif_wake_subqueue(adapter->netdev, idx);
2394                 }
2395
2396                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2397                 tx_stats(txo)->tx_compl += work_done;
2398                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2399         }
2400         return (work_done < budget); /* Done */
2401 }
2402
2403 int be_poll(struct napi_struct *napi, int budget)
2404 {
2405         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2406         struct be_adapter *adapter = eqo->adapter;
2407         int max_work = 0, work, i, num_evts;
2408         struct be_rx_obj *rxo;
2409         bool tx_done;
2410
2411         num_evts = events_get(eqo);
2412
2413         /* Process all TXQs serviced by this EQ */
2414         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2415                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2416                                         eqo->tx_budget, i);
2417                 if (!tx_done)
2418                         max_work = budget;
2419         }
2420
2421         if (be_lock_napi(eqo)) {
2422                 /* This loop will iterate twice for EQ0 in which
2423                  * completions of the last RXQ (default one) are also processed
2424                  * For other EQs the loop iterates only once
2425                  */
2426                 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2427                         work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2428                         max_work = max(work, max_work);
2429                 }
2430                 be_unlock_napi(eqo);
2431         } else {
2432                 max_work = budget;
2433         }
2434
2435         if (is_mcc_eqo(eqo))
2436                 be_process_mcc(adapter);
2437
2438         if (max_work < budget) {
2439                 napi_complete(napi);
2440                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2441         } else {
2442                 /* As we'll continue in polling mode, count and clear events */
2443                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2444         }
2445         return max_work;
2446 }
2447
2448 #ifdef CONFIG_NET_RX_BUSY_POLL
2449 static int be_busy_poll(struct napi_struct *napi)
2450 {
2451         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2452         struct be_adapter *adapter = eqo->adapter;
2453         struct be_rx_obj *rxo;
2454         int i, work = 0;
2455
2456         if (!be_lock_busy_poll(eqo))
2457                 return LL_FLUSH_BUSY;
2458
2459         for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2460                 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2461                 if (work)
2462                         break;
2463         }
2464
2465         be_unlock_busy_poll(eqo);
2466         return work;
2467 }
2468 #endif
2469
2470 void be_detect_error(struct be_adapter *adapter)
2471 {
2472         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2473         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2474         u32 i;
2475         bool error_detected = false;
2476         struct device *dev = &adapter->pdev->dev;
2477         struct net_device *netdev = adapter->netdev;
2478
2479         if (be_hw_error(adapter))
2480                 return;
2481
2482         if (lancer_chip(adapter)) {
2483                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2484                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2485                         sliport_err1 = ioread32(adapter->db +
2486                                         SLIPORT_ERROR1_OFFSET);
2487                         sliport_err2 = ioread32(adapter->db +
2488                                         SLIPORT_ERROR2_OFFSET);
2489                         adapter->hw_error = true;
2490                         /* Do not log error messages if its a FW reset */
2491                         if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2492                             sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2493                                 dev_info(dev, "Firmware update in progress\n");
2494                         } else {
2495                                 error_detected = true;
2496                                 dev_err(dev, "Error detected in the card\n");
2497                                 dev_err(dev, "ERR: sliport status 0x%x\n",
2498                                         sliport_status);
2499                                 dev_err(dev, "ERR: sliport error1 0x%x\n",
2500                                         sliport_err1);
2501                                 dev_err(dev, "ERR: sliport error2 0x%x\n",
2502                                         sliport_err2);
2503                         }
2504                 }
2505         } else {
2506                 pci_read_config_dword(adapter->pdev,
2507                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2508                 pci_read_config_dword(adapter->pdev,
2509                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2510                 pci_read_config_dword(adapter->pdev,
2511                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2512                 pci_read_config_dword(adapter->pdev,
2513                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2514
2515                 ue_lo = (ue_lo & ~ue_lo_mask);
2516                 ue_hi = (ue_hi & ~ue_hi_mask);
2517
2518                 /* On certain platforms BE hardware can indicate spurious UEs.
2519                  * Allow HW to stop working completely in case of a real UE.
2520                  * Hence not setting the hw_error for UE detection.
2521                  */
2522
2523                 if (ue_lo || ue_hi) {
2524                         error_detected = true;
2525                         dev_err(dev,
2526                                 "Unrecoverable Error detected in the adapter");
2527                         dev_err(dev, "Please reboot server to recover");
2528                         if (skyhawk_chip(adapter))
2529                                 adapter->hw_error = true;
2530                         for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2531                                 if (ue_lo & 1)
2532                                         dev_err(dev, "UE: %s bit set\n",
2533                                                 ue_status_low_desc[i]);
2534                         }
2535                         for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2536                                 if (ue_hi & 1)
2537                                         dev_err(dev, "UE: %s bit set\n",
2538                                                 ue_status_hi_desc[i]);
2539                         }
2540                 }
2541         }
2542         if (error_detected)
2543                 netif_carrier_off(netdev);
2544 }
2545
2546 static void be_msix_disable(struct be_adapter *adapter)
2547 {
2548         if (msix_enabled(adapter)) {
2549                 pci_disable_msix(adapter->pdev);
2550                 adapter->num_msix_vec = 0;
2551                 adapter->num_msix_roce_vec = 0;
2552         }
2553 }
2554
2555 static int be_msix_enable(struct be_adapter *adapter)
2556 {
2557         int i, num_vec;
2558         struct device *dev = &adapter->pdev->dev;
2559
2560         /* If RoCE is supported, program the max number of NIC vectors that
2561          * may be configured via set-channels, along with vectors needed for
2562          * RoCe. Else, just program the number we'll use initially.
2563          */
2564         if (be_roce_supported(adapter))
2565                 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2566                                 2 * num_online_cpus());
2567         else
2568                 num_vec = adapter->cfg_num_qs;
2569
2570         for (i = 0; i < num_vec; i++)
2571                 adapter->msix_entries[i].entry = i;
2572
2573         num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2574                                         MIN_MSIX_VECTORS, num_vec);
2575         if (num_vec < 0)
2576                 goto fail;
2577
2578         if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2579                 adapter->num_msix_roce_vec = num_vec / 2;
2580                 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2581                          adapter->num_msix_roce_vec);
2582         }
2583
2584         adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2585
2586         dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2587                  adapter->num_msix_vec);
2588         return 0;
2589
2590 fail:
2591         dev_warn(dev, "MSIx enable failed\n");
2592
2593         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2594         if (!be_physfn(adapter))
2595                 return num_vec;
2596         return 0;
2597 }
2598
2599 static inline int be_msix_vec_get(struct be_adapter *adapter,
2600                                 struct be_eq_obj *eqo)
2601 {
2602         return adapter->msix_entries[eqo->msix_idx].vector;
2603 }
2604
2605 static int be_msix_register(struct be_adapter *adapter)
2606 {
2607         struct net_device *netdev = adapter->netdev;
2608         struct be_eq_obj *eqo;
2609         int status, i, vec;
2610
2611         for_all_evt_queues(adapter, eqo, i) {
2612                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2613                 vec = be_msix_vec_get(adapter, eqo);
2614                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2615                 if (status)
2616                         goto err_msix;
2617         }
2618
2619         return 0;
2620 err_msix:
2621         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2622                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2623         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2624                 status);
2625         be_msix_disable(adapter);
2626         return status;
2627 }
2628
2629 static int be_irq_register(struct be_adapter *adapter)
2630 {
2631         struct net_device *netdev = adapter->netdev;
2632         int status;
2633
2634         if (msix_enabled(adapter)) {
2635                 status = be_msix_register(adapter);
2636                 if (status == 0)
2637                         goto done;
2638                 /* INTx is not supported for VF */
2639                 if (!be_physfn(adapter))
2640                         return status;
2641         }
2642
2643         /* INTx: only the first EQ is used */
2644         netdev->irq = adapter->pdev->irq;
2645         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2646                              &adapter->eq_obj[0]);
2647         if (status) {
2648                 dev_err(&adapter->pdev->dev,
2649                         "INTx request IRQ failed - err %d\n", status);
2650                 return status;
2651         }
2652 done:
2653         adapter->isr_registered = true;
2654         return 0;
2655 }
2656
2657 static void be_irq_unregister(struct be_adapter *adapter)
2658 {
2659         struct net_device *netdev = adapter->netdev;
2660         struct be_eq_obj *eqo;
2661         int i;
2662
2663         if (!adapter->isr_registered)
2664                 return;
2665
2666         /* INTx */
2667         if (!msix_enabled(adapter)) {
2668                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2669                 goto done;
2670         }
2671
2672         /* MSIx */
2673         for_all_evt_queues(adapter, eqo, i)
2674                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2675
2676 done:
2677         adapter->isr_registered = false;
2678 }
2679
2680 static void be_rx_qs_destroy(struct be_adapter *adapter)
2681 {
2682         struct be_queue_info *q;
2683         struct be_rx_obj *rxo;
2684         int i;
2685
2686         for_all_rx_queues(adapter, rxo, i) {
2687                 q = &rxo->q;
2688                 if (q->created) {
2689                         be_cmd_rxq_destroy(adapter, q);
2690                         be_rx_cq_clean(rxo);
2691                 }
2692                 be_queue_free(adapter, q);
2693         }
2694 }
2695
2696 static int be_close(struct net_device *netdev)
2697 {
2698         struct be_adapter *adapter = netdev_priv(netdev);
2699         struct be_eq_obj *eqo;
2700         int i;
2701
2702         be_roce_dev_close(adapter);
2703
2704         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2705                 for_all_evt_queues(adapter, eqo, i) {
2706                         napi_disable(&eqo->napi);
2707                         be_disable_busy_poll(eqo);
2708                 }
2709                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2710         }
2711
2712         be_async_mcc_disable(adapter);
2713
2714         /* Wait for all pending tx completions to arrive so that
2715          * all tx skbs are freed.
2716          */
2717         netif_tx_disable(netdev);
2718         be_tx_compl_clean(adapter);
2719
2720         be_rx_qs_destroy(adapter);
2721
2722         for (i = 1; i < (adapter->uc_macs + 1); i++)
2723                 be_cmd_pmac_del(adapter, adapter->if_handle,
2724                                 adapter->pmac_id[i], 0);
2725         adapter->uc_macs = 0;
2726
2727         for_all_evt_queues(adapter, eqo, i) {
2728                 if (msix_enabled(adapter))
2729                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2730                 else
2731                         synchronize_irq(netdev->irq);
2732                 be_eq_clean(eqo);
2733         }
2734
2735         be_irq_unregister(adapter);
2736
2737         return 0;
2738 }
2739
2740 static int be_rx_qs_create(struct be_adapter *adapter)
2741 {
2742         struct be_rx_obj *rxo;
2743         int rc, i, j;
2744         u8 rsstable[128];
2745
2746         for_all_rx_queues(adapter, rxo, i) {
2747                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2748                                     sizeof(struct be_eth_rx_d));
2749                 if (rc)
2750                         return rc;
2751         }
2752
2753         /* The FW would like the default RXQ to be created first */
2754         rxo = default_rxo(adapter);
2755         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2756                                adapter->if_handle, false, &rxo->rss_id);
2757         if (rc)
2758                 return rc;
2759
2760         for_all_rss_queues(adapter, rxo, i) {
2761                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2762                                        rx_frag_size, adapter->if_handle,
2763                                        true, &rxo->rss_id);
2764                 if (rc)
2765                         return rc;
2766         }
2767
2768         if (be_multi_rxq(adapter)) {
2769                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2770                         for_all_rss_queues(adapter, rxo, i) {
2771                                 if ((j + i) >= 128)
2772                                         break;
2773                                 rsstable[j + i] = rxo->rss_id;
2774                         }
2775                 }
2776                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2777                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2778
2779                 if (!BEx_chip(adapter))
2780                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2781                                                 RSS_ENABLE_UDP_IPV6;
2782         } else {
2783                 /* Disable RSS, if only default RX Q is created */
2784                 adapter->rss_flags = RSS_ENABLE_NONE;
2785         }
2786
2787         rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2788                                128);
2789         if (rc) {
2790                 adapter->rss_flags = RSS_ENABLE_NONE;
2791                 return rc;
2792         }
2793
2794         /* First time posting */
2795         for_all_rx_queues(adapter, rxo, i)
2796                 be_post_rx_frags(rxo, GFP_KERNEL);
2797         return 0;
2798 }
2799
2800 static int be_open(struct net_device *netdev)
2801 {
2802         struct be_adapter *adapter = netdev_priv(netdev);
2803         struct be_eq_obj *eqo;
2804         struct be_rx_obj *rxo;
2805         struct be_tx_obj *txo;
2806         u8 link_status;
2807         int status, i;
2808
2809         status = be_rx_qs_create(adapter);
2810         if (status)
2811                 goto err;
2812
2813         status = be_irq_register(adapter);
2814         if (status)
2815                 goto err;
2816
2817         for_all_rx_queues(adapter, rxo, i)
2818                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2819
2820         for_all_tx_queues(adapter, txo, i)
2821                 be_cq_notify(adapter, txo->cq.id, true, 0);
2822
2823         be_async_mcc_enable(adapter);
2824
2825         for_all_evt_queues(adapter, eqo, i) {
2826                 napi_enable(&eqo->napi);
2827                 be_enable_busy_poll(eqo);
2828                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2829         }
2830         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2831
2832         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2833         if (!status)
2834                 be_link_status_update(adapter, link_status);
2835
2836         netif_tx_start_all_queues(netdev);
2837         be_roce_dev_open(adapter);
2838         return 0;
2839 err:
2840         be_close(adapter->netdev);
2841         return -EIO;
2842 }
2843
2844 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2845 {
2846         struct be_dma_mem cmd;
2847         int status = 0;
2848         u8 mac[ETH_ALEN];
2849
2850         memset(mac, 0, ETH_ALEN);
2851
2852         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2853         cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2854                                      GFP_KERNEL);
2855         if (cmd.va == NULL)
2856                 return -1;
2857
2858         if (enable) {
2859                 status = pci_write_config_dword(adapter->pdev,
2860                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2861                 if (status) {
2862                         dev_err(&adapter->pdev->dev,
2863                                 "Could not enable Wake-on-lan\n");
2864                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2865                                           cmd.dma);
2866                         return status;
2867                 }
2868                 status = be_cmd_enable_magic_wol(adapter,
2869                                 adapter->netdev->dev_addr, &cmd);
2870                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2871                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2872         } else {
2873                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2874                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2875                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2876         }
2877
2878         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2879         return status;
2880 }
2881
2882 /*
2883  * Generate a seed MAC address from the PF MAC Address using jhash.
2884  * MAC Address for VFs are assigned incrementally starting from the seed.
2885  * These addresses are programmed in the ASIC by the PF and the VF driver
2886  * queries for the MAC address during its probe.
2887  */
2888 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2889 {
2890         u32 vf;
2891         int status = 0;
2892         u8 mac[ETH_ALEN];
2893         struct be_vf_cfg *vf_cfg;
2894
2895         be_vf_eth_addr_generate(adapter, mac);
2896
2897         for_all_vfs(adapter, vf_cfg, vf) {
2898                 if (BEx_chip(adapter))
2899                         status = be_cmd_pmac_add(adapter, mac,
2900                                                  vf_cfg->if_handle,
2901                                                  &vf_cfg->pmac_id, vf + 1);
2902                 else
2903                         status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2904                                                 vf + 1);
2905
2906                 if (status)
2907                         dev_err(&adapter->pdev->dev,
2908                         "Mac address assignment failed for VF %d\n", vf);
2909                 else
2910                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2911
2912                 mac[5] += 1;
2913         }
2914         return status;
2915 }
2916
2917 static int be_vfs_mac_query(struct be_adapter *adapter)
2918 {
2919         int status, vf;
2920         u8 mac[ETH_ALEN];
2921         struct be_vf_cfg *vf_cfg;
2922
2923         for_all_vfs(adapter, vf_cfg, vf) {
2924                 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2925                                                mac, vf_cfg->if_handle,
2926                                                false, vf+1);
2927                 if (status)
2928                         return status;
2929                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2930         }
2931         return 0;
2932 }
2933
2934 static void be_vf_clear(struct be_adapter *adapter)
2935 {
2936         struct be_vf_cfg *vf_cfg;
2937         u32 vf;
2938
2939         if (pci_vfs_assigned(adapter->pdev)) {
2940                 dev_warn(&adapter->pdev->dev,
2941                          "VFs are assigned to VMs: not disabling VFs\n");
2942                 goto done;
2943         }
2944
2945         pci_disable_sriov(adapter->pdev);
2946
2947         for_all_vfs(adapter, vf_cfg, vf) {
2948                 if (BEx_chip(adapter))
2949                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2950                                         vf_cfg->pmac_id, vf + 1);
2951                 else
2952                         be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2953                                        vf + 1);
2954
2955                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2956         }
2957 done:
2958         kfree(adapter->vf_cfg);
2959         adapter->num_vfs = 0;
2960 }
2961
2962 static void be_clear_queues(struct be_adapter *adapter)
2963 {
2964         be_mcc_queues_destroy(adapter);
2965         be_rx_cqs_destroy(adapter);
2966         be_tx_queues_destroy(adapter);
2967         be_evt_queues_destroy(adapter);
2968 }
2969
2970 static void be_cancel_worker(struct be_adapter *adapter)
2971 {
2972         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2973                 cancel_delayed_work_sync(&adapter->work);
2974                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2975         }
2976 }
2977
2978 static void be_mac_clear(struct be_adapter *adapter)
2979 {
2980         int i;
2981
2982         if (adapter->pmac_id) {
2983                 for (i = 0; i < (adapter->uc_macs + 1); i++)
2984                         be_cmd_pmac_del(adapter, adapter->if_handle,
2985                                         adapter->pmac_id[i], 0);
2986                 adapter->uc_macs = 0;
2987
2988                 kfree(adapter->pmac_id);
2989                 adapter->pmac_id = NULL;
2990         }
2991 }
2992
2993 static int be_clear(struct be_adapter *adapter)
2994 {
2995         be_cancel_worker(adapter);
2996
2997         if (sriov_enabled(adapter))
2998                 be_vf_clear(adapter);
2999
3000         /* delete the primary mac along with the uc-mac list */
3001         be_mac_clear(adapter);
3002
3003         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
3004
3005         be_clear_queues(adapter);
3006
3007         be_msix_disable(adapter);
3008         return 0;
3009 }
3010
3011 static int be_vfs_if_create(struct be_adapter *adapter)
3012 {
3013         struct be_resources res = {0};
3014         struct be_vf_cfg *vf_cfg;
3015         u32 cap_flags, en_flags, vf;
3016         int status = 0;
3017
3018         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3019                     BE_IF_FLAGS_MULTICAST;
3020
3021         for_all_vfs(adapter, vf_cfg, vf) {
3022                 if (!BE3_chip(adapter)) {
3023                         status = be_cmd_get_profile_config(adapter, &res,
3024                                                            vf + 1);
3025                         if (!status)
3026                                 cap_flags = res.if_cap_flags;
3027                 }
3028
3029                 /* If a FW profile exists, then cap_flags are updated */
3030                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3031                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
3032                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3033                                           &vf_cfg->if_handle, vf + 1);
3034                 if (status)
3035                         goto err;
3036         }
3037 err:
3038         return status;
3039 }
3040
3041 static int be_vf_setup_init(struct be_adapter *adapter)
3042 {
3043         struct be_vf_cfg *vf_cfg;
3044         int vf;
3045
3046         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3047                                   GFP_KERNEL);
3048         if (!adapter->vf_cfg)
3049                 return -ENOMEM;
3050
3051         for_all_vfs(adapter, vf_cfg, vf) {
3052                 vf_cfg->if_handle = -1;
3053                 vf_cfg->pmac_id = -1;
3054         }
3055         return 0;
3056 }
3057
3058 static int be_vf_setup(struct be_adapter *adapter)
3059 {
3060         struct device *dev = &adapter->pdev->dev;
3061         struct be_vf_cfg *vf_cfg;
3062         int status, old_vfs, vf;
3063         u32 privileges;
3064         u16 lnk_speed;
3065
3066         old_vfs = pci_num_vf(adapter->pdev);
3067         if (old_vfs) {
3068                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3069                 if (old_vfs != num_vfs)
3070                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3071                 adapter->num_vfs = old_vfs;
3072         } else {
3073                 if (num_vfs > be_max_vfs(adapter))
3074                         dev_info(dev, "Device supports %d VFs and not %d\n",
3075                                  be_max_vfs(adapter), num_vfs);
3076                 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3077                 if (!adapter->num_vfs)
3078                         return 0;
3079         }
3080
3081         status = be_vf_setup_init(adapter);
3082         if (status)
3083                 goto err;
3084
3085         if (old_vfs) {
3086                 for_all_vfs(adapter, vf_cfg, vf) {
3087                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3088                         if (status)
3089                                 goto err;
3090                 }
3091         } else {
3092                 status = be_vfs_if_create(adapter);
3093                 if (status)
3094                         goto err;
3095         }
3096
3097         if (old_vfs) {
3098                 status = be_vfs_mac_query(adapter);
3099                 if (status)
3100                         goto err;
3101         } else {
3102                 status = be_vf_eth_addr_config(adapter);
3103                 if (status)
3104                         goto err;
3105         }
3106
3107         for_all_vfs(adapter, vf_cfg, vf) {
3108                 /* Allow VFs to programs MAC/VLAN filters */
3109                 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3110                 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3111                         status = be_cmd_set_fn_privileges(adapter,
3112                                                           privileges |
3113                                                           BE_PRIV_FILTMGMT,
3114                                                           vf + 1);
3115                         if (!status)
3116                                 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3117                                          vf);
3118                 }
3119
3120                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3121                  * Allow full available bandwidth
3122                  */
3123                 if (BE3_chip(adapter) && !old_vfs)
3124                         be_cmd_set_qos(adapter, 1000, vf+1);
3125
3126                 status = be_cmd_link_status_query(adapter, &lnk_speed,
3127                                                   NULL, vf + 1);
3128                 if (!status)
3129                         vf_cfg->tx_rate = lnk_speed;
3130
3131                 if (!old_vfs) {
3132                         be_cmd_enable_vf(adapter, vf + 1);
3133                         be_cmd_set_logical_link_config(adapter,
3134                                                        IFLA_VF_LINK_STATE_AUTO,
3135                                                        vf+1);
3136                 }
3137         }
3138
3139         if (!old_vfs) {
3140                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3141                 if (status) {
3142                         dev_err(dev, "SRIOV enable failed\n");
3143                         adapter->num_vfs = 0;
3144                         goto err;
3145                 }
3146         }
3147         return 0;
3148 err:
3149         dev_err(dev, "VF setup failed\n");
3150         be_vf_clear(adapter);
3151         return status;
3152 }
3153
3154 /* Converting function_mode bits on BE3 to SH mc_type enums */
3155
3156 static u8 be_convert_mc_type(u32 function_mode)
3157 {
3158         if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3159                 return vNIC1;
3160         else if (function_mode & FLEX10_MODE)
3161                 return FLEX10;
3162         else if (function_mode & VNIC_MODE)
3163                 return vNIC2;
3164         else if (function_mode & UMC_ENABLED)
3165                 return UMC;
3166         else
3167                 return MC_NONE;
3168 }
3169
3170 /* On BE2/BE3 FW does not suggest the supported limits */
3171 static void BEx_get_resources(struct be_adapter *adapter,
3172                               struct be_resources *res)
3173 {
3174         struct pci_dev *pdev = adapter->pdev;
3175         bool use_sriov = false;
3176         int max_vfs = 0;
3177
3178         if (be_physfn(adapter) && BE3_chip(adapter)) {
3179                 be_cmd_get_profile_config(adapter, res, 0);
3180                 /* Some old versions of BE3 FW don't report max_vfs value */
3181                 if (res->max_vfs == 0) {
3182                         max_vfs = pci_sriov_get_totalvfs(pdev);
3183                         res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3184                 }
3185                 use_sriov = res->max_vfs && sriov_want(adapter);
3186         }
3187
3188         if (be_physfn(adapter))
3189                 res->max_uc_mac = BE_UC_PMAC_COUNT;
3190         else
3191                 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3192
3193         adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3194
3195         if (be_is_mc(adapter)) {
3196                 /* Assuming that there are 4 channels per port,
3197                  * when multi-channel is enabled
3198                  */
3199                 if (be_is_qnq_mode(adapter))
3200                         res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3201                 else
3202                         /* In a non-qnq multichannel mode, the pvid
3203                          * takes up one vlan entry
3204                          */
3205                         res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3206         } else {
3207                 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3208         }
3209
3210         res->max_mcast_mac = BE_MAX_MC;
3211
3212         /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
3213         if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
3214             !be_physfn(adapter) || (adapter->port_num > 1))
3215                 res->max_tx_qs = 1;
3216         else
3217                 res->max_tx_qs = BE3_MAX_TX_QS;
3218
3219         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3220             !use_sriov && be_physfn(adapter))
3221                 res->max_rss_qs = (adapter->be3_native) ?
3222                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3223         res->max_rx_qs = res->max_rss_qs + 1;
3224
3225         if (be_physfn(adapter))
3226                 res->max_evt_qs = (res->max_vfs > 0) ?
3227                                         BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3228         else
3229                 res->max_evt_qs = 1;
3230
3231         res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3232         if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3233                 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3234 }
3235
3236 static void be_setup_init(struct be_adapter *adapter)
3237 {
3238         adapter->vlan_prio_bmap = 0xff;
3239         adapter->phy.link_speed = -1;
3240         adapter->if_handle = -1;
3241         adapter->be3_native = false;
3242         adapter->promiscuous = false;
3243         if (be_physfn(adapter))
3244                 adapter->cmd_privileges = MAX_PRIVILEGES;
3245         else
3246                 adapter->cmd_privileges = MIN_PRIVILEGES;
3247 }
3248
3249 static int be_get_resources(struct be_adapter *adapter)
3250 {
3251         struct device *dev = &adapter->pdev->dev;
3252         struct be_resources res = {0};
3253         int status;
3254
3255         if (BEx_chip(adapter)) {
3256                 BEx_get_resources(adapter, &res);
3257                 adapter->res = res;
3258         }
3259
3260         /* For Lancer, SH etc read per-function resource limits from FW.
3261          * GET_FUNC_CONFIG returns per function guaranteed limits.
3262          * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3263          */
3264         if (!BEx_chip(adapter)) {
3265                 status = be_cmd_get_func_config(adapter, &res);
3266                 if (status)
3267                         return status;
3268
3269                 /* If RoCE may be enabled stash away half the EQs for RoCE */
3270                 if (be_roce_supported(adapter))
3271                         res.max_evt_qs /= 2;
3272                 adapter->res = res;
3273
3274                 if (be_physfn(adapter)) {
3275                         status = be_cmd_get_profile_config(adapter, &res, 0);
3276                         if (status)
3277                                 return status;
3278                         adapter->res.max_vfs = res.max_vfs;
3279                 }
3280
3281                 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3282                          be_max_txqs(adapter), be_max_rxqs(adapter),
3283                          be_max_rss(adapter), be_max_eqs(adapter),
3284                          be_max_vfs(adapter));
3285                 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3286                          be_max_uc(adapter), be_max_mc(adapter),
3287                          be_max_vlans(adapter));
3288         }
3289
3290         return 0;
3291 }
3292
3293 /* Routine to query per function resource limits */
3294 static int be_get_config(struct be_adapter *adapter)
3295 {
3296         u16 profile_id;
3297         int status;
3298
3299         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3300                                      &adapter->function_mode,
3301                                      &adapter->function_caps,
3302                                      &adapter->asic_rev);
3303         if (status)
3304                 return status;
3305
3306          if (be_physfn(adapter)) {
3307                 status = be_cmd_get_active_profile(adapter, &profile_id);
3308                 if (!status)
3309                         dev_info(&adapter->pdev->dev,
3310                                  "Using profile 0x%x\n", profile_id);
3311         }
3312
3313         status = be_get_resources(adapter);
3314         if (status)
3315                 return status;
3316
3317         /* primary mac needs 1 pmac entry */
3318         adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3319                                    GFP_KERNEL);
3320         if (!adapter->pmac_id)
3321                 return -ENOMEM;
3322
3323         /* Sanitize cfg_num_qs based on HW and platform limits */
3324         adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3325
3326         return 0;
3327 }
3328
3329 static int be_mac_setup(struct be_adapter *adapter)
3330 {
3331         u8 mac[ETH_ALEN];
3332         int status;
3333
3334         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3335                 status = be_cmd_get_perm_mac(adapter, mac);
3336                 if (status)
3337                         return status;
3338
3339                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3340                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3341         } else {
3342                 /* Maybe the HW was reset; dev_addr must be re-programmed */
3343                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3344         }
3345
3346         /* For BE3-R VFs, the PF programs the initial MAC address */
3347         if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3348                 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3349                                 &adapter->pmac_id[0], 0);
3350         return 0;
3351 }
3352
3353 static void be_schedule_worker(struct be_adapter *adapter)
3354 {
3355         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3356         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3357 }
3358
3359 static int be_setup_queues(struct be_adapter *adapter)
3360 {
3361         struct net_device *netdev = adapter->netdev;
3362         int status;
3363
3364         status = be_evt_queues_create(adapter);
3365         if (status)
3366                 goto err;
3367
3368         status = be_tx_qs_create(adapter);
3369         if (status)
3370                 goto err;
3371
3372         status = be_rx_cqs_create(adapter);
3373         if (status)
3374                 goto err;
3375
3376         status = be_mcc_queues_create(adapter);
3377         if (status)
3378                 goto err;
3379
3380         status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3381         if (status)
3382                 goto err;
3383
3384         status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3385         if (status)
3386                 goto err;
3387
3388         return 0;
3389 err:
3390         dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3391         return status;
3392 }
3393
3394 int be_update_queues(struct be_adapter *adapter)
3395 {
3396         struct net_device *netdev = adapter->netdev;
3397         int status;
3398
3399         if (netif_running(netdev))
3400                 be_close(netdev);
3401
3402         be_cancel_worker(adapter);
3403
3404         /* If any vectors have been shared with RoCE we cannot re-program
3405          * the MSIx table.
3406          */
3407         if (!adapter->num_msix_roce_vec)
3408                 be_msix_disable(adapter);
3409
3410         be_clear_queues(adapter);
3411
3412         if (!msix_enabled(adapter)) {
3413                 status = be_msix_enable(adapter);
3414                 if (status)
3415                         return status;
3416         }
3417
3418         status = be_setup_queues(adapter);
3419         if (status)
3420                 return status;
3421
3422         be_schedule_worker(adapter);
3423
3424         if (netif_running(netdev))
3425                 status = be_open(netdev);
3426
3427         return status;
3428 }
3429
3430 static int be_setup(struct be_adapter *adapter)
3431 {
3432         struct device *dev = &adapter->pdev->dev;
3433         u32 tx_fc, rx_fc, en_flags;
3434         int status;
3435
3436         be_setup_init(adapter);
3437
3438         if (!lancer_chip(adapter))
3439                 be_cmd_req_native_mode(adapter);
3440
3441         status = be_get_config(adapter);
3442         if (status)
3443                 goto err;
3444
3445         status = be_msix_enable(adapter);
3446         if (status)
3447                 goto err;
3448
3449         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3450                    BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3451         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3452                 en_flags |= BE_IF_FLAGS_RSS;
3453         en_flags = en_flags & be_if_cap_flags(adapter);
3454         status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3455                                   &adapter->if_handle, 0);
3456         if (status)
3457                 goto err;
3458
3459         /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3460         rtnl_lock();
3461         status = be_setup_queues(adapter);
3462         rtnl_unlock();
3463         if (status)
3464                 goto err;
3465
3466         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3467
3468         status = be_mac_setup(adapter);
3469         if (status)
3470                 goto err;
3471
3472         be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3473
3474         if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3475                 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3476                         adapter->fw_ver);
3477                 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3478         }
3479
3480         if (adapter->vlans_added)
3481                 be_vid_config(adapter);
3482
3483         be_set_rx_mode(adapter->netdev);
3484
3485         be_cmd_get_acpi_wol_cap(adapter);
3486
3487         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3488
3489         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3490                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3491                                         adapter->rx_fc);
3492
3493         if (be_physfn(adapter))
3494                 be_cmd_set_logical_link_config(adapter,
3495                                                IFLA_VF_LINK_STATE_AUTO, 0);
3496
3497         if (sriov_want(adapter)) {
3498                 if (be_max_vfs(adapter))
3499                         be_vf_setup(adapter);
3500                 else
3501                         dev_warn(dev, "device doesn't support SRIOV\n");
3502         }
3503
3504         status = be_cmd_get_phy_info(adapter);
3505         if (!status && be_pause_supported(adapter))
3506                 adapter->phy.fc_autoneg = 1;
3507
3508         be_schedule_worker(adapter);
3509         return 0;
3510 err:
3511         be_clear(adapter);
3512         return status;
3513 }
3514
3515 #ifdef CONFIG_NET_POLL_CONTROLLER
3516 static void be_netpoll(struct net_device *netdev)
3517 {
3518         struct be_adapter *adapter = netdev_priv(netdev);
3519         struct be_eq_obj *eqo;
3520         int i;
3521
3522         for_all_evt_queues(adapter, eqo, i) {
3523                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3524                 napi_schedule(&eqo->napi);
3525         }
3526
3527         return;
3528 }
3529 #endif
3530
3531 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3532 static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3533
3534 static bool be_flash_redboot(struct be_adapter *adapter,
3535                         const u8 *p, u32 img_start, int image_size,
3536                         int hdr_size)
3537 {
3538         u32 crc_offset;
3539         u8 flashed_crc[4];
3540         int status;
3541
3542         crc_offset = hdr_size + img_start + image_size - 4;
3543
3544         p += crc_offset;
3545
3546         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3547                         (image_size - 4));
3548         if (status) {
3549                 dev_err(&adapter->pdev->dev,
3550                 "could not get crc from flash, not flashing redboot\n");
3551                 return false;
3552         }
3553
3554         /*update redboot only if crc does not match*/
3555         if (!memcmp(flashed_crc, p, 4))
3556                 return false;
3557         else
3558                 return true;
3559 }
3560
3561 static bool phy_flashing_required(struct be_adapter *adapter)
3562 {
3563         return (adapter->phy.phy_type == TN_8022 &&
3564                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3565 }
3566
3567 static bool is_comp_in_ufi(struct be_adapter *adapter,
3568                            struct flash_section_info *fsec, int type)
3569 {
3570         int i = 0, img_type = 0;
3571         struct flash_section_info_g2 *fsec_g2 = NULL;
3572
3573         if (BE2_chip(adapter))
3574                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3575
3576         for (i = 0; i < MAX_FLASH_COMP; i++) {
3577                 if (fsec_g2)
3578                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3579                 else
3580                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3581
3582                 if (img_type == type)
3583                         return true;
3584         }
3585         return false;
3586
3587 }
3588
3589 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3590                                          int header_size,
3591                                          const struct firmware *fw)
3592 {
3593         struct flash_section_info *fsec = NULL;
3594         const u8 *p = fw->data;
3595
3596         p += header_size;
3597         while (p < (fw->data + fw->size)) {
3598                 fsec = (struct flash_section_info *)p;
3599                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3600                         return fsec;
3601                 p += 32;
3602         }
3603         return NULL;
3604 }
3605
3606 static int be_flash(struct be_adapter *adapter, const u8 *img,
3607                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3608 {
3609         u32 total_bytes = 0, flash_op, num_bytes = 0;
3610         int status = 0;
3611         struct be_cmd_write_flashrom *req = flash_cmd->va;
3612
3613         total_bytes = img_size;
3614         while (total_bytes) {
3615                 num_bytes = min_t(u32, 32*1024, total_bytes);
3616
3617                 total_bytes -= num_bytes;
3618
3619                 if (!total_bytes) {
3620                         if (optype == OPTYPE_PHY_FW)
3621                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3622                         else
3623                                 flash_op = FLASHROM_OPER_FLASH;
3624                 } else {
3625                         if (optype == OPTYPE_PHY_FW)
3626                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3627                         else
3628                                 flash_op = FLASHROM_OPER_SAVE;
3629                 }
3630
3631                 memcpy(req->data_buf, img, num_bytes);
3632                 img += num_bytes;
3633                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3634                                                 flash_op, num_bytes);
3635                 if (status) {
3636                         if (status == ILLEGAL_IOCTL_REQ &&
3637                             optype == OPTYPE_PHY_FW)
3638                                 break;
3639                         dev_err(&adapter->pdev->dev,
3640                                 "cmd to write to flash rom failed.\n");
3641                         return status;
3642                 }
3643         }
3644         return 0;
3645 }
3646
3647 /* For BE2, BE3 and BE3-R */
3648 static int be_flash_BEx(struct be_adapter *adapter,
3649                          const struct firmware *fw,
3650                          struct be_dma_mem *flash_cmd,
3651                          int num_of_images)
3652
3653 {
3654         int status = 0, i, filehdr_size = 0;
3655         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3656         const u8 *p = fw->data;
3657         const struct flash_comp *pflashcomp;
3658         int num_comp, redboot;
3659         struct flash_section_info *fsec = NULL;
3660
3661         struct flash_comp gen3_flash_types[] = {
3662                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3663                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3664                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3665                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3666                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3667                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3668                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3669                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3670                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3671                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3672                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3673                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3674                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3675                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3676                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3677                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3678                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3679                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3680                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3681                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3682         };
3683
3684         struct flash_comp gen2_flash_types[] = {
3685                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3686                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3687                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3688                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3689                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3690                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3691                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3692                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3693                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3694                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3695                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3696                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3697                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3698                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3699                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3700                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3701         };
3702
3703         if (BE3_chip(adapter)) {
3704                 pflashcomp = gen3_flash_types;
3705                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3706                 num_comp = ARRAY_SIZE(gen3_flash_types);
3707         } else {
3708                 pflashcomp = gen2_flash_types;
3709                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3710                 num_comp = ARRAY_SIZE(gen2_flash_types);
3711         }
3712
3713         /* Get flash section info*/
3714         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3715         if (!fsec) {
3716                 dev_err(&adapter->pdev->dev,
3717                         "Invalid Cookie. UFI corrupted ?\n");
3718                 return -1;
3719         }
3720         for (i = 0; i < num_comp; i++) {
3721                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3722                         continue;
3723
3724                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3725                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3726                         continue;
3727
3728                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3729                     !phy_flashing_required(adapter))
3730                                 continue;
3731
3732                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3733                         redboot = be_flash_redboot(adapter, fw->data,
3734                                 pflashcomp[i].offset, pflashcomp[i].size,
3735                                 filehdr_size + img_hdrs_size);
3736                         if (!redboot)
3737                                 continue;
3738                 }
3739
3740                 p = fw->data;
3741                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3742                 if (p + pflashcomp[i].size > fw->data + fw->size)
3743                         return -1;
3744
3745                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3746                                         pflashcomp[i].size);
3747                 if (status) {
3748                         dev_err(&adapter->pdev->dev,
3749                                 "Flashing section type %d failed.\n",
3750                                 pflashcomp[i].img_type);
3751                         return status;
3752                 }
3753         }
3754         return 0;
3755 }
3756
3757 static int be_flash_skyhawk(struct be_adapter *adapter,
3758                 const struct firmware *fw,
3759                 struct be_dma_mem *flash_cmd, int num_of_images)
3760 {
3761         int status = 0, i, filehdr_size = 0;
3762         int img_offset, img_size, img_optype, redboot;
3763         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3764         const u8 *p = fw->data;
3765         struct flash_section_info *fsec = NULL;
3766
3767         filehdr_size = sizeof(struct flash_file_hdr_g3);
3768         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3769         if (!fsec) {
3770                 dev_err(&adapter->pdev->dev,
3771                         "Invalid Cookie. UFI corrupted ?\n");
3772                 return -1;
3773         }
3774
3775         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3776                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3777                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3778
3779                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3780                 case IMAGE_FIRMWARE_iSCSI:
3781                         img_optype = OPTYPE_ISCSI_ACTIVE;
3782                         break;
3783                 case IMAGE_BOOT_CODE:
3784                         img_optype = OPTYPE_REDBOOT;
3785                         break;
3786                 case IMAGE_OPTION_ROM_ISCSI:
3787                         img_optype = OPTYPE_BIOS;
3788                         break;
3789                 case IMAGE_OPTION_ROM_PXE:
3790                         img_optype = OPTYPE_PXE_BIOS;
3791                         break;
3792                 case IMAGE_OPTION_ROM_FCoE:
3793                         img_optype = OPTYPE_FCOE_BIOS;
3794                         break;
3795                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3796                         img_optype = OPTYPE_ISCSI_BACKUP;
3797                         break;
3798                 case IMAGE_NCSI:
3799                         img_optype = OPTYPE_NCSI_FW;
3800                         break;
3801                 default:
3802                         continue;
3803                 }
3804
3805                 if (img_optype == OPTYPE_REDBOOT) {
3806                         redboot = be_flash_redboot(adapter, fw->data,
3807                                         img_offset, img_size,
3808                                         filehdr_size + img_hdrs_size);
3809                         if (!redboot)
3810                                 continue;
3811                 }
3812
3813                 p = fw->data;
3814                 p += filehdr_size + img_offset + img_hdrs_size;
3815                 if (p + img_size > fw->data + fw->size)
3816                         return -1;
3817
3818                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3819                 if (status) {
3820                         dev_err(&adapter->pdev->dev,
3821                                 "Flashing section type %d failed.\n",
3822                                 fsec->fsec_entry[i].type);
3823                         return status;
3824                 }
3825         }
3826         return 0;
3827 }
3828
3829 static int lancer_fw_download(struct be_adapter *adapter,
3830                                 const struct firmware *fw)
3831 {
3832 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3833 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3834         struct be_dma_mem flash_cmd;
3835         const u8 *data_ptr = NULL;
3836         u8 *dest_image_ptr = NULL;
3837         size_t image_size = 0;
3838         u32 chunk_size = 0;
3839         u32 data_written = 0;
3840         u32 offset = 0;
3841         int status = 0;
3842         u8 add_status = 0;
3843         u8 change_status;
3844
3845         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3846                 dev_err(&adapter->pdev->dev,
3847                         "FW Image not properly aligned. "
3848                         "Length must be 4 byte aligned.\n");
3849                 status = -EINVAL;
3850                 goto lancer_fw_exit;
3851         }
3852
3853         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3854                                 + LANCER_FW_DOWNLOAD_CHUNK;
3855         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3856                                           &flash_cmd.dma, GFP_KERNEL);
3857         if (!flash_cmd.va) {
3858                 status = -ENOMEM;
3859                 goto lancer_fw_exit;
3860         }
3861
3862         dest_image_ptr = flash_cmd.va +
3863                                 sizeof(struct lancer_cmd_req_write_object);
3864         image_size = fw->size;
3865         data_ptr = fw->data;
3866
3867         while (image_size) {
3868                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3869
3870                 /* Copy the image chunk content. */
3871                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3872
3873                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3874                                                  chunk_size, offset,
3875                                                  LANCER_FW_DOWNLOAD_LOCATION,
3876                                                  &data_written, &change_status,
3877                                                  &add_status);
3878                 if (status)
3879                         break;
3880
3881                 offset += data_written;
3882                 data_ptr += data_written;
3883                 image_size -= data_written;
3884         }
3885
3886         if (!status) {
3887                 /* Commit the FW written */
3888                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3889                                                  0, offset,
3890                                                  LANCER_FW_DOWNLOAD_LOCATION,
3891                                                  &data_written, &change_status,
3892                                                  &add_status);
3893         }
3894
3895         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3896                                 flash_cmd.dma);
3897         if (status) {
3898                 dev_err(&adapter->pdev->dev,
3899                         "Firmware load error. "
3900                         "Status code: 0x%x Additional Status: 0x%x\n",
3901                         status, add_status);
3902                 goto lancer_fw_exit;
3903         }
3904
3905         if (change_status == LANCER_FW_RESET_NEEDED) {
3906                 dev_info(&adapter->pdev->dev,
3907                          "Resetting adapter to activate new FW\n");
3908                 status = lancer_physdev_ctrl(adapter,
3909                                              PHYSDEV_CONTROL_FW_RESET_MASK);
3910                 if (status) {
3911                         dev_err(&adapter->pdev->dev,
3912                                 "Adapter busy for FW reset.\n"
3913                                 "New FW will not be active.\n");
3914                         goto lancer_fw_exit;
3915                 }
3916         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3917                         dev_err(&adapter->pdev->dev,
3918                                 "System reboot required for new FW"
3919                                 " to be active\n");
3920         }
3921
3922         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3923 lancer_fw_exit:
3924         return status;
3925 }
3926
3927 #define UFI_TYPE2               2
3928 #define UFI_TYPE3               3
3929 #define UFI_TYPE3R              10
3930 #define UFI_TYPE4               4
3931 static int be_get_ufi_type(struct be_adapter *adapter,
3932                            struct flash_file_hdr_g3 *fhdr)
3933 {
3934         if (fhdr == NULL)
3935                 goto be_get_ufi_exit;
3936
3937         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3938                 return UFI_TYPE4;
3939         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3940                 if (fhdr->asic_type_rev == 0x10)
3941                         return UFI_TYPE3R;
3942                 else
3943                         return UFI_TYPE3;
3944         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3945                 return UFI_TYPE2;
3946
3947 be_get_ufi_exit:
3948         dev_err(&adapter->pdev->dev,
3949                 "UFI and Interface are not compatible for flashing\n");
3950         return -1;
3951 }
3952
3953 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3954 {
3955         struct flash_file_hdr_g3 *fhdr3;
3956         struct image_hdr *img_hdr_ptr = NULL;
3957         struct be_dma_mem flash_cmd;
3958         const u8 *p;
3959         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3960
3961         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3962         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3963                                           &flash_cmd.dma, GFP_KERNEL);
3964         if (!flash_cmd.va) {
3965                 status = -ENOMEM;
3966                 goto be_fw_exit;
3967         }
3968
3969         p = fw->data;
3970         fhdr3 = (struct flash_file_hdr_g3 *)p;
3971
3972         ufi_type = be_get_ufi_type(adapter, fhdr3);
3973
3974         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3975         for (i = 0; i < num_imgs; i++) {
3976                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3977                                 (sizeof(struct flash_file_hdr_g3) +
3978                                  i * sizeof(struct image_hdr)));
3979                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3980                         switch (ufi_type) {
3981                         case UFI_TYPE4:
3982                                 status = be_flash_skyhawk(adapter, fw,
3983                                                         &flash_cmd, num_imgs);
3984                                 break;
3985                         case UFI_TYPE3R:
3986                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3987                                                       num_imgs);
3988                                 break;
3989                         case UFI_TYPE3:
3990                                 /* Do not flash this ufi on BE3-R cards */
3991                                 if (adapter->asic_rev < 0x10)
3992                                         status = be_flash_BEx(adapter, fw,
3993                                                               &flash_cmd,
3994                                                               num_imgs);
3995                                 else {
3996                                         status = -1;
3997                                         dev_err(&adapter->pdev->dev,
3998                                                 "Can't load BE3 UFI on BE3R\n");
3999                                 }
4000                         }
4001                 }
4002         }
4003
4004         if (ufi_type == UFI_TYPE2)
4005                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
4006         else if (ufi_type == -1)
4007                 status = -1;
4008
4009         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4010                           flash_cmd.dma);
4011         if (status) {
4012                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
4013                 goto be_fw_exit;
4014         }
4015
4016         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
4017
4018 be_fw_exit:
4019         return status;
4020 }
4021
4022 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4023 {
4024         const struct firmware *fw;
4025         int status;
4026
4027         if (!netif_running(adapter->netdev)) {
4028                 dev_err(&adapter->pdev->dev,
4029                         "Firmware load not allowed (interface is down)\n");
4030                 return -1;
4031         }
4032
4033         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4034         if (status)
4035                 goto fw_exit;
4036
4037         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4038
4039         if (lancer_chip(adapter))
4040                 status = lancer_fw_download(adapter, fw);
4041         else
4042                 status = be_fw_download(adapter, fw);
4043
4044         if (!status)
4045                 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4046                                   adapter->fw_on_flash);
4047
4048 fw_exit:
4049         release_firmware(fw);
4050         return status;
4051 }
4052
4053 static int be_ndo_bridge_setlink(struct net_device *dev,
4054                                     struct nlmsghdr *nlh)
4055 {
4056         struct be_adapter *adapter = netdev_priv(dev);
4057         struct nlattr *attr, *br_spec;
4058         int rem;
4059         int status = 0;
4060         u16 mode = 0;
4061
4062         if (!sriov_enabled(adapter))
4063                 return -EOPNOTSUPP;
4064
4065         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4066
4067         nla_for_each_nested(attr, br_spec, rem) {
4068                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4069                         continue;
4070
4071                 mode = nla_get_u16(attr);
4072                 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4073                         return -EINVAL;
4074
4075                 status = be_cmd_set_hsw_config(adapter, 0, 0,
4076                                                adapter->if_handle,
4077                                                mode == BRIDGE_MODE_VEPA ?
4078                                                PORT_FWD_TYPE_VEPA :
4079                                                PORT_FWD_TYPE_VEB);
4080                 if (status)
4081                         goto err;
4082
4083                 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4084                          mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4085
4086                 return status;
4087         }
4088 err:
4089         dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4090                 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4091
4092         return status;
4093 }
4094
4095 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4096                                     struct net_device *dev,
4097                                     u32 filter_mask)
4098 {
4099         struct be_adapter *adapter = netdev_priv(dev);
4100         int status = 0;
4101         u8 hsw_mode;
4102
4103         if (!sriov_enabled(adapter))
4104                 return 0;
4105
4106         /* BE and Lancer chips support VEB mode only */
4107         if (BEx_chip(adapter) || lancer_chip(adapter)) {
4108                 hsw_mode = PORT_FWD_TYPE_VEB;
4109         } else {
4110                 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4111                                                adapter->if_handle, &hsw_mode);
4112                 if (status)
4113                         return 0;
4114         }
4115
4116         return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4117                                        hsw_mode == PORT_FWD_TYPE_VEPA ?
4118                                        BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4119 }
4120
4121 static const struct net_device_ops be_netdev_ops = {
4122         .ndo_open               = be_open,
4123         .ndo_stop               = be_close,
4124         .ndo_start_xmit         = be_xmit,
4125         .ndo_set_rx_mode        = be_set_rx_mode,
4126         .ndo_set_mac_address    = be_mac_addr_set,
4127         .ndo_change_mtu         = be_change_mtu,
4128         .ndo_get_stats64        = be_get_stats64,
4129         .ndo_validate_addr      = eth_validate_addr,
4130         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
4131         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
4132         .ndo_set_vf_mac         = be_set_vf_mac,
4133         .ndo_set_vf_vlan        = be_set_vf_vlan,
4134         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
4135         .ndo_get_vf_config      = be_get_vf_config,
4136         .ndo_set_vf_link_state  = be_set_vf_link_state,
4137 #ifdef CONFIG_NET_POLL_CONTROLLER
4138         .ndo_poll_controller    = be_netpoll,
4139 #endif
4140         .ndo_bridge_setlink     = be_ndo_bridge_setlink,
4141         .ndo_bridge_getlink     = be_ndo_bridge_getlink,
4142 #ifdef CONFIG_NET_RX_BUSY_POLL
4143         .ndo_busy_poll          = be_busy_poll
4144 #endif
4145 };
4146
4147 static void be_netdev_init(struct net_device *netdev)
4148 {
4149         struct be_adapter *adapter = netdev_priv(netdev);
4150
4151         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4152                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4153                 NETIF_F_HW_VLAN_CTAG_TX;
4154         if (be_multi_rxq(adapter))
4155                 netdev->hw_features |= NETIF_F_RXHASH;
4156
4157         netdev->features |= netdev->hw_features |
4158                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4159
4160         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4161                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4162
4163         netdev->priv_flags |= IFF_UNICAST_FLT;
4164
4165         netdev->flags |= IFF_MULTICAST;
4166
4167         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4168
4169         netdev->netdev_ops = &be_netdev_ops;
4170
4171         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
4172 }
4173
4174 static void be_unmap_pci_bars(struct be_adapter *adapter)
4175 {
4176         if (adapter->csr)
4177                 pci_iounmap(adapter->pdev, adapter->csr);
4178         if (adapter->db)
4179                 pci_iounmap(adapter->pdev, adapter->db);
4180 }
4181
4182 static int db_bar(struct be_adapter *adapter)
4183 {
4184         if (lancer_chip(adapter) || !be_physfn(adapter))
4185                 return 0;
4186         else
4187                 return 4;
4188 }
4189
4190 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4191 {
4192         if (skyhawk_chip(adapter)) {
4193                 adapter->roce_db.size = 4096;
4194                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4195                                                               db_bar(adapter));
4196                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4197                                                                db_bar(adapter));
4198         }
4199         return 0;
4200 }
4201
4202 static int be_map_pci_bars(struct be_adapter *adapter)
4203 {
4204         u8 __iomem *addr;
4205
4206         if (BEx_chip(adapter) && be_physfn(adapter)) {
4207                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4208                 if (adapter->csr == NULL)
4209                         return -ENOMEM;
4210         }
4211
4212         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4213         if (addr == NULL)
4214                 goto pci_map_err;
4215         adapter->db = addr;
4216
4217         be_roce_map_pci_bars(adapter);
4218         return 0;
4219
4220 pci_map_err:
4221         be_unmap_pci_bars(adapter);
4222         return -ENOMEM;
4223 }
4224
4225 static void be_ctrl_cleanup(struct be_adapter *adapter)
4226 {
4227         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4228
4229         be_unmap_pci_bars(adapter);
4230
4231         if (mem->va)
4232                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4233                                   mem->dma);
4234
4235         mem = &adapter->rx_filter;
4236         if (mem->va)
4237                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4238                                   mem->dma);
4239 }
4240
4241 static int be_ctrl_init(struct be_adapter *adapter)
4242 {
4243         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4244         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4245         struct be_dma_mem *rx_filter = &adapter->rx_filter;
4246         u32 sli_intf;
4247         int status;
4248
4249         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4250         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4251                                  SLI_INTF_FAMILY_SHIFT;
4252         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4253
4254         status = be_map_pci_bars(adapter);
4255         if (status)
4256                 goto done;
4257
4258         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4259         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4260                                                 mbox_mem_alloc->size,
4261                                                 &mbox_mem_alloc->dma,
4262                                                 GFP_KERNEL);
4263         if (!mbox_mem_alloc->va) {
4264                 status = -ENOMEM;
4265                 goto unmap_pci_bars;
4266         }
4267         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4268         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4269         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4270         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4271
4272         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4273         rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4274                                             rx_filter->size, &rx_filter->dma,
4275                                             GFP_KERNEL);
4276         if (rx_filter->va == NULL) {
4277                 status = -ENOMEM;
4278                 goto free_mbox;
4279         }
4280
4281         mutex_init(&adapter->mbox_lock);
4282         spin_lock_init(&adapter->mcc_lock);
4283         spin_lock_init(&adapter->mcc_cq_lock);
4284
4285         init_completion(&adapter->et_cmd_compl);
4286         pci_save_state(adapter->pdev);
4287         return 0;
4288
4289 free_mbox:
4290         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4291                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
4292
4293 unmap_pci_bars:
4294         be_unmap_pci_bars(adapter);
4295
4296 done:
4297         return status;
4298 }
4299
4300 static void be_stats_cleanup(struct be_adapter *adapter)
4301 {
4302         struct be_dma_mem *cmd = &adapter->stats_cmd;
4303
4304         if (cmd->va)
4305                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4306                                   cmd->va, cmd->dma);
4307 }
4308
4309 static int be_stats_init(struct be_adapter *adapter)
4310 {
4311         struct be_dma_mem *cmd = &adapter->stats_cmd;
4312
4313         if (lancer_chip(adapter))
4314                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4315         else if (BE2_chip(adapter))
4316                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4317         else if (BE3_chip(adapter))
4318                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4319         else
4320                 /* ALL non-BE ASICs */
4321                 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4322
4323         cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4324                                       GFP_KERNEL);
4325         if (cmd->va == NULL)
4326                 return -1;
4327         return 0;
4328 }
4329
4330 static void be_remove(struct pci_dev *pdev)
4331 {
4332         struct be_adapter *adapter = pci_get_drvdata(pdev);
4333
4334         if (!adapter)
4335                 return;
4336
4337         be_roce_dev_remove(adapter);
4338         be_intr_set(adapter, false);
4339
4340         cancel_delayed_work_sync(&adapter->func_recovery_work);
4341
4342         unregister_netdev(adapter->netdev);
4343
4344         be_clear(adapter);
4345
4346         /* tell fw we're done with firing cmds */
4347         be_cmd_fw_clean(adapter);
4348
4349         be_stats_cleanup(adapter);
4350
4351         be_ctrl_cleanup(adapter);
4352
4353         pci_disable_pcie_error_reporting(pdev);
4354
4355         pci_release_regions(pdev);
4356         pci_disable_device(pdev);
4357
4358         free_netdev(adapter->netdev);
4359 }
4360
4361 static int be_get_initial_config(struct be_adapter *adapter)
4362 {
4363         int status, level;
4364
4365         status = be_cmd_get_cntl_attributes(adapter);
4366         if (status)
4367                 return status;
4368
4369         /* Must be a power of 2 or else MODULO will BUG_ON */
4370         adapter->be_get_temp_freq = 64;
4371
4372         if (BEx_chip(adapter)) {
4373                 level = be_cmd_get_fw_log_level(adapter);
4374                 adapter->msg_enable =
4375                         level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4376         }
4377
4378         adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4379         return 0;
4380 }
4381
4382 static int lancer_recover_func(struct be_adapter *adapter)
4383 {
4384         struct device *dev = &adapter->pdev->dev;
4385         int status;
4386
4387         status = lancer_test_and_set_rdy_state(adapter);
4388         if (status)
4389                 goto err;
4390
4391         if (netif_running(adapter->netdev))
4392                 be_close(adapter->netdev);
4393
4394         be_clear(adapter);
4395
4396         be_clear_all_error(adapter);
4397
4398         status = be_setup(adapter);
4399         if (status)
4400                 goto err;
4401
4402         if (netif_running(adapter->netdev)) {
4403                 status = be_open(adapter->netdev);
4404                 if (status)
4405                         goto err;
4406         }
4407
4408         dev_err(dev, "Adapter recovery successful\n");
4409         return 0;
4410 err:
4411         if (status == -EAGAIN)
4412                 dev_err(dev, "Waiting for resource provisioning\n");
4413         else
4414                 dev_err(dev, "Adapter recovery failed\n");
4415
4416         return status;
4417 }
4418
4419 static void be_func_recovery_task(struct work_struct *work)
4420 {
4421         struct be_adapter *adapter =
4422                 container_of(work, struct be_adapter,  func_recovery_work.work);
4423         int status = 0;
4424
4425         be_detect_error(adapter);
4426
4427         if (adapter->hw_error && lancer_chip(adapter)) {
4428
4429                 rtnl_lock();
4430                 netif_device_detach(adapter->netdev);
4431                 rtnl_unlock();
4432
4433                 status = lancer_recover_func(adapter);
4434                 if (!status)
4435                         netif_device_attach(adapter->netdev);
4436         }
4437
4438         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4439          * no need to attempt further recovery.
4440          */
4441         if (!status || status == -EAGAIN)
4442                 schedule_delayed_work(&adapter->func_recovery_work,
4443                                       msecs_to_jiffies(1000));
4444 }
4445
4446 static void be_worker(struct work_struct *work)
4447 {
4448         struct be_adapter *adapter =
4449                 container_of(work, struct be_adapter, work.work);
4450         struct be_rx_obj *rxo;
4451         int i;
4452
4453         /* when interrupts are not yet enabled, just reap any pending
4454         * mcc completions */
4455         if (!netif_running(adapter->netdev)) {
4456                 local_bh_disable();
4457                 be_process_mcc(adapter);
4458                 local_bh_enable();
4459                 goto reschedule;
4460         }
4461
4462         if (!adapter->stats_cmd_sent) {
4463                 if (lancer_chip(adapter))
4464                         lancer_cmd_get_pport_stats(adapter,
4465                                                 &adapter->stats_cmd);
4466                 else
4467                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4468         }
4469
4470         if (be_physfn(adapter) &&
4471             MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4472                 be_cmd_get_die_temperature(adapter);
4473
4474         for_all_rx_queues(adapter, rxo, i) {
4475                 /* Replenish RX-queues starved due to memory
4476                  * allocation failures.
4477                  */
4478                 if (rxo->rx_post_starved)
4479                         be_post_rx_frags(rxo, GFP_KERNEL);
4480         }
4481
4482         be_eqd_update(adapter);
4483
4484 reschedule:
4485         adapter->work_counter++;
4486         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4487 }
4488
4489 /* If any VFs are already enabled don't FLR the PF */
4490 static bool be_reset_required(struct be_adapter *adapter)
4491 {
4492         return pci_num_vf(adapter->pdev) ? false : true;
4493 }
4494
4495 static char *mc_name(struct be_adapter *adapter)
4496 {
4497         char *str = ""; /* default */
4498
4499         switch (adapter->mc_type) {
4500         case UMC:
4501                 str = "UMC";
4502                 break;
4503         case FLEX10:
4504                 str = "FLEX10";
4505                 break;
4506         case vNIC1:
4507                 str = "vNIC-1";
4508                 break;
4509         case nPAR:
4510                 str = "nPAR";
4511                 break;
4512         case UFP:
4513                 str = "UFP";
4514                 break;
4515         case vNIC2:
4516                 str = "vNIC-2";
4517                 break;
4518         default:
4519                 str = "";
4520         }
4521
4522         return str;
4523 }
4524
4525 static inline char *func_name(struct be_adapter *adapter)
4526 {
4527         return be_physfn(adapter) ? "PF" : "VF";
4528 }
4529
4530 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4531 {
4532         int status = 0;
4533         struct be_adapter *adapter;
4534         struct net_device *netdev;
4535         char port_name;
4536
4537         status = pci_enable_device(pdev);
4538         if (status)
4539                 goto do_none;
4540
4541         status = pci_request_regions(pdev, DRV_NAME);
4542         if (status)
4543                 goto disable_dev;
4544         pci_set_master(pdev);
4545
4546         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4547         if (netdev == NULL) {
4548                 status = -ENOMEM;
4549                 goto rel_reg;
4550         }
4551         adapter = netdev_priv(netdev);
4552         adapter->pdev = pdev;
4553         pci_set_drvdata(pdev, adapter);
4554         adapter->netdev = netdev;
4555         SET_NETDEV_DEV(netdev, &pdev->dev);
4556
4557         status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4558         if (!status) {
4559                 netdev->features |= NETIF_F_HIGHDMA;
4560         } else {
4561                 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4562                 if (status) {
4563                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4564                         goto free_netdev;
4565                 }
4566         }
4567
4568         if (be_physfn(adapter)) {
4569                 status = pci_enable_pcie_error_reporting(pdev);
4570                 if (!status)
4571                         dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4572         }
4573
4574         status = be_ctrl_init(adapter);
4575         if (status)
4576                 goto free_netdev;
4577
4578         /* sync up with fw's ready state */
4579         if (be_physfn(adapter)) {
4580                 status = be_fw_wait_ready(adapter);
4581                 if (status)
4582                         goto ctrl_clean;
4583         }
4584
4585         if (be_reset_required(adapter)) {
4586                 status = be_cmd_reset_function(adapter);
4587                 if (status)
4588                         goto ctrl_clean;
4589
4590                 /* Wait for interrupts to quiesce after an FLR */
4591                 msleep(100);
4592         }
4593
4594         /* Allow interrupts for other ULPs running on NIC function */
4595         be_intr_set(adapter, true);
4596
4597         /* tell fw we're ready to fire cmds */
4598         status = be_cmd_fw_init(adapter);
4599         if (status)
4600                 goto ctrl_clean;
4601
4602         status = be_stats_init(adapter);
4603         if (status)
4604                 goto ctrl_clean;
4605
4606         status = be_get_initial_config(adapter);
4607         if (status)
4608                 goto stats_clean;
4609
4610         INIT_DELAYED_WORK(&adapter->work, be_worker);
4611         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4612         adapter->rx_fc = adapter->tx_fc = true;
4613
4614         status = be_setup(adapter);
4615         if (status)
4616                 goto stats_clean;
4617
4618         be_netdev_init(netdev);
4619         status = register_netdev(netdev);
4620         if (status != 0)
4621                 goto unsetup;
4622
4623         be_roce_dev_add(adapter);
4624
4625         schedule_delayed_work(&adapter->func_recovery_work,
4626                               msecs_to_jiffies(1000));
4627
4628         be_cmd_query_port_name(adapter, &port_name);
4629
4630         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4631                  func_name(adapter), mc_name(adapter), port_name);
4632
4633         return 0;
4634
4635 unsetup:
4636         be_clear(adapter);
4637 stats_clean:
4638         be_stats_cleanup(adapter);
4639 ctrl_clean:
4640         be_ctrl_cleanup(adapter);
4641 free_netdev:
4642         free_netdev(netdev);
4643 rel_reg:
4644         pci_release_regions(pdev);
4645 disable_dev:
4646         pci_disable_device(pdev);
4647 do_none:
4648         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4649         return status;
4650 }
4651
4652 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4653 {
4654         struct be_adapter *adapter = pci_get_drvdata(pdev);
4655         struct net_device *netdev =  adapter->netdev;
4656
4657         if (adapter->wol_en)
4658                 be_setup_wol(adapter, true);
4659
4660         be_intr_set(adapter, false);
4661         cancel_delayed_work_sync(&adapter->func_recovery_work);
4662
4663         netif_device_detach(netdev);
4664         if (netif_running(netdev)) {
4665                 rtnl_lock();
4666                 be_close(netdev);
4667                 rtnl_unlock();
4668         }
4669         be_clear(adapter);
4670
4671         pci_save_state(pdev);
4672         pci_disable_device(pdev);
4673         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4674         return 0;
4675 }
4676
4677 static int be_resume(struct pci_dev *pdev)
4678 {
4679         int status = 0;
4680         struct be_adapter *adapter = pci_get_drvdata(pdev);
4681         struct net_device *netdev =  adapter->netdev;
4682
4683         netif_device_detach(netdev);
4684
4685         status = pci_enable_device(pdev);
4686         if (status)
4687                 return status;
4688
4689         pci_set_power_state(pdev, PCI_D0);
4690         pci_restore_state(pdev);
4691
4692         status = be_fw_wait_ready(adapter);
4693         if (status)
4694                 return status;
4695
4696         be_intr_set(adapter, true);
4697         /* tell fw we're ready to fire cmds */
4698         status = be_cmd_fw_init(adapter);
4699         if (status)
4700                 return status;
4701
4702         be_setup(adapter);
4703         if (netif_running(netdev)) {
4704                 rtnl_lock();
4705                 be_open(netdev);
4706                 rtnl_unlock();
4707         }
4708
4709         schedule_delayed_work(&adapter->func_recovery_work,
4710                               msecs_to_jiffies(1000));
4711         netif_device_attach(netdev);
4712
4713         if (adapter->wol_en)
4714                 be_setup_wol(adapter, false);
4715
4716         return 0;
4717 }
4718
4719 /*
4720  * An FLR will stop BE from DMAing any data.
4721  */
4722 static void be_shutdown(struct pci_dev *pdev)
4723 {
4724         struct be_adapter *adapter = pci_get_drvdata(pdev);
4725
4726         if (!adapter)
4727                 return;
4728
4729         cancel_delayed_work_sync(&adapter->work);
4730         cancel_delayed_work_sync(&adapter->func_recovery_work);
4731
4732         netif_device_detach(adapter->netdev);
4733
4734         be_cmd_reset_function(adapter);
4735
4736         pci_disable_device(pdev);
4737 }
4738
4739 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4740                                 pci_channel_state_t state)
4741 {
4742         struct be_adapter *adapter = pci_get_drvdata(pdev);
4743         struct net_device *netdev =  adapter->netdev;
4744
4745         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4746
4747         if (!adapter->eeh_error) {
4748                 adapter->eeh_error = true;
4749
4750                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4751
4752                 rtnl_lock();
4753                 netif_device_detach(netdev);
4754                 if (netif_running(netdev))
4755                         be_close(netdev);
4756                 rtnl_unlock();
4757
4758                 be_clear(adapter);
4759         }
4760
4761         if (state == pci_channel_io_perm_failure)
4762                 return PCI_ERS_RESULT_DISCONNECT;
4763
4764         pci_disable_device(pdev);
4765
4766         /* The error could cause the FW to trigger a flash debug dump.
4767          * Resetting the card while flash dump is in progress
4768          * can cause it not to recover; wait for it to finish.
4769          * Wait only for first function as it is needed only once per
4770          * adapter.
4771          */
4772         if (pdev->devfn == 0)
4773                 ssleep(30);
4774
4775         return PCI_ERS_RESULT_NEED_RESET;
4776 }
4777
4778 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4779 {
4780         struct be_adapter *adapter = pci_get_drvdata(pdev);
4781         int status;
4782
4783         dev_info(&adapter->pdev->dev, "EEH reset\n");
4784
4785         status = pci_enable_device(pdev);
4786         if (status)
4787                 return PCI_ERS_RESULT_DISCONNECT;
4788
4789         pci_set_master(pdev);
4790         pci_set_power_state(pdev, PCI_D0);
4791         pci_restore_state(pdev);
4792
4793         /* Check if card is ok and fw is ready */
4794         dev_info(&adapter->pdev->dev,
4795                  "Waiting for FW to be ready after EEH reset\n");
4796         status = be_fw_wait_ready(adapter);
4797         if (status)
4798                 return PCI_ERS_RESULT_DISCONNECT;
4799
4800         pci_cleanup_aer_uncorrect_error_status(pdev);
4801         be_clear_all_error(adapter);
4802         return PCI_ERS_RESULT_RECOVERED;
4803 }
4804
4805 static void be_eeh_resume(struct pci_dev *pdev)
4806 {
4807         int status = 0;
4808         struct be_adapter *adapter = pci_get_drvdata(pdev);
4809         struct net_device *netdev =  adapter->netdev;
4810
4811         dev_info(&adapter->pdev->dev, "EEH resume\n");
4812
4813         pci_save_state(pdev);
4814
4815         status = be_cmd_reset_function(adapter);
4816         if (status)
4817                 goto err;
4818
4819         /* tell fw we're ready to fire cmds */
4820         status = be_cmd_fw_init(adapter);
4821         if (status)
4822                 goto err;
4823
4824         status = be_setup(adapter);
4825         if (status)
4826                 goto err;
4827
4828         if (netif_running(netdev)) {
4829                 status = be_open(netdev);
4830                 if (status)
4831                         goto err;
4832         }
4833
4834         schedule_delayed_work(&adapter->func_recovery_work,
4835                               msecs_to_jiffies(1000));
4836         netif_device_attach(netdev);
4837         return;
4838 err:
4839         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4840 }
4841
4842 static const struct pci_error_handlers be_eeh_handlers = {
4843         .error_detected = be_eeh_err_detected,
4844         .slot_reset = be_eeh_reset,
4845         .resume = be_eeh_resume,
4846 };
4847
4848 static struct pci_driver be_driver = {
4849         .name = DRV_NAME,
4850         .id_table = be_dev_ids,
4851         .probe = be_probe,
4852         .remove = be_remove,
4853         .suspend = be_suspend,
4854         .resume = be_resume,
4855         .shutdown = be_shutdown,
4856         .err_handler = &be_eeh_handlers
4857 };
4858
4859 static int __init be_init_module(void)
4860 {
4861         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4862             rx_frag_size != 2048) {
4863                 printk(KERN_WARNING DRV_NAME
4864                         " : Module param rx_frag_size must be 2048/4096/8192."
4865                         " Using 2048\n");
4866                 rx_frag_size = 2048;
4867         }
4868
4869         return pci_register_driver(&be_driver);
4870 }
4871 module_init(be_init_module);
4872
4873 static void __exit be_exit_module(void)
4874 {
4875         pci_unregister_driver(&be_driver);
4876 }
4877 module_exit(be_exit_module);