be2net: cleanup wake-on-lan code
[cascardo/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26
27 MODULE_VERSION(DRV_VER);
28 MODULE_DEVICE_TABLE(pci, be_dev_ids);
29 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
32
33 static unsigned int num_vfs;
34 module_param(num_vfs, uint, S_IRUGO);
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36
37 static ushort rx_frag_size = 2048;
38 module_param(rx_frag_size, ushort, S_IRUGO);
39 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
41 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
42         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
44         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
48         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
49         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
50         { 0 }
51 };
52 MODULE_DEVICE_TABLE(pci, be_dev_ids);
53 /* UE Status Low CSR */
54 static const char * const ue_status_low_desc[] = {
55         "CEV",
56         "CTX",
57         "DBUF",
58         "ERX",
59         "Host",
60         "MPU",
61         "NDMA",
62         "PTC ",
63         "RDMA ",
64         "RXF ",
65         "RXIPS ",
66         "RXULP0 ",
67         "RXULP1 ",
68         "RXULP2 ",
69         "TIM ",
70         "TPOST ",
71         "TPRE ",
72         "TXIPS ",
73         "TXULP0 ",
74         "TXULP1 ",
75         "UC ",
76         "WDMA ",
77         "TXULP2 ",
78         "HOST1 ",
79         "P0_OB_LINK ",
80         "P1_OB_LINK ",
81         "HOST_GPIO ",
82         "MBOX ",
83         "AXGMAC0",
84         "AXGMAC1",
85         "JTAG",
86         "MPU_INTPEND"
87 };
88 /* UE Status High CSR */
89 static const char * const ue_status_hi_desc[] = {
90         "LPCMEMHOST",
91         "MGMT_MAC",
92         "PCS0ONLINE",
93         "MPU_IRAM",
94         "PCS1ONLINE",
95         "PCTL0",
96         "PCTL1",
97         "PMEM",
98         "RR",
99         "TXPB",
100         "RXPP",
101         "XAUI",
102         "TXP",
103         "ARM",
104         "IPC",
105         "HOST2",
106         "HOST3",
107         "HOST4",
108         "HOST5",
109         "HOST6",
110         "HOST7",
111         "HOST8",
112         "HOST9",
113         "NETC",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown",
121         "Unknown"
122 };
123
124
125 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126 {
127         struct be_dma_mem *mem = &q->dma_mem;
128         if (mem->va) {
129                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130                                   mem->dma);
131                 mem->va = NULL;
132         }
133 }
134
135 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136                 u16 len, u16 entry_size)
137 {
138         struct be_dma_mem *mem = &q->dma_mem;
139
140         memset(q, 0, sizeof(*q));
141         q->len = len;
142         q->entry_size = entry_size;
143         mem->size = len * entry_size;
144         mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145                                       GFP_KERNEL);
146         if (!mem->va)
147                 return -ENOMEM;
148         return 0;
149 }
150
151 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
152 {
153         u32 reg, enabled;
154
155         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156                                 &reg);
157         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
159         if (!enabled && enable)
160                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
161         else if (enabled && !enable)
162                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163         else
164                 return;
165
166         pci_write_config_dword(adapter->pdev,
167                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
168 }
169
170 static void be_intr_set(struct be_adapter *adapter, bool enable)
171 {
172         int status = 0;
173
174         /* On lancer interrupts can't be controlled via this register */
175         if (lancer_chip(adapter))
176                 return;
177
178         if (adapter->eeh_error)
179                 return;
180
181         status = be_cmd_intr_set(adapter, enable);
182         if (status)
183                 be_reg_intr_set(adapter, enable);
184 }
185
186 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187 {
188         u32 val = 0;
189         val |= qid & DB_RQ_RING_ID_MASK;
190         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
191
192         wmb();
193         iowrite32(val, adapter->db + DB_RQ_OFFSET);
194 }
195
196 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197                           u16 posted)
198 {
199         u32 val = 0;
200         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
201         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
202
203         wmb();
204         iowrite32(val, adapter->db + txo->db_offset);
205 }
206
207 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
208                 bool arm, bool clear_int, u16 num_popped)
209 {
210         u32 val = 0;
211         val |= qid & DB_EQ_RING_ID_MASK;
212         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
214
215         if (adapter->eeh_error)
216                 return;
217
218         if (arm)
219                 val |= 1 << DB_EQ_REARM_SHIFT;
220         if (clear_int)
221                 val |= 1 << DB_EQ_CLR_SHIFT;
222         val |= 1 << DB_EQ_EVNT_SHIFT;
223         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
224         iowrite32(val, adapter->db + DB_EQ_OFFSET);
225 }
226
227 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
228 {
229         u32 val = 0;
230         val |= qid & DB_CQ_RING_ID_MASK;
231         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
233
234         if (adapter->eeh_error)
235                 return;
236
237         if (arm)
238                 val |= 1 << DB_CQ_REARM_SHIFT;
239         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
240         iowrite32(val, adapter->db + DB_CQ_OFFSET);
241 }
242
243 static int be_mac_addr_set(struct net_device *netdev, void *p)
244 {
245         struct be_adapter *adapter = netdev_priv(netdev);
246         struct device *dev = &adapter->pdev->dev;
247         struct sockaddr *addr = p;
248         int status;
249         u8 mac[ETH_ALEN];
250         u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
251
252         if (!is_valid_ether_addr(addr->sa_data))
253                 return -EADDRNOTAVAIL;
254
255         /* Proceed further only if, User provided MAC is different
256          * from active MAC
257          */
258         if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259                 return 0;
260
261         /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262          * privilege or if PF did not provision the new MAC address.
263          * On BE3, this cmd will always fail if the VF doesn't have the
264          * FILTMGMT privilege. This failure is OK, only if the PF programmed
265          * the MAC for the VF.
266          */
267         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268                                  adapter->if_handle, &adapter->pmac_id[0], 0);
269         if (!status) {
270                 curr_pmac_id = adapter->pmac_id[0];
271
272                 /* Delete the old programmed MAC. This call may fail if the
273                  * old MAC was already deleted by the PF driver.
274                  */
275                 if (adapter->pmac_id[0] != old_pmac_id)
276                         be_cmd_pmac_del(adapter, adapter->if_handle,
277                                         old_pmac_id, 0);
278         }
279
280         /* Decide if the new MAC is successfully activated only after
281          * querying the FW
282          */
283         status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284                                        adapter->if_handle, true, 0);
285         if (status)
286                 goto err;
287
288         /* The MAC change did not happen, either due to lack of privilege
289          * or PF didn't pre-provision.
290          */
291         if (!ether_addr_equal(addr->sa_data, mac)) {
292                 status = -EPERM;
293                 goto err;
294         }
295
296         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
297         dev_info(dev, "MAC address changed to %pM\n", mac);
298         return 0;
299 err:
300         dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
301         return status;
302 }
303
304 /* BE2 supports only v0 cmd */
305 static void *hw_stats_from_cmd(struct be_adapter *adapter)
306 {
307         if (BE2_chip(adapter)) {
308                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310                 return &cmd->hw_stats;
311         } else if (BE3_chip(adapter)) {
312                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314                 return &cmd->hw_stats;
315         } else {
316                 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318                 return &cmd->hw_stats;
319         }
320 }
321
322 /* BE2 supports only v0 cmd */
323 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324 {
325         if (BE2_chip(adapter)) {
326                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328                 return &hw_stats->erx;
329         } else if (BE3_chip(adapter)) {
330                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332                 return &hw_stats->erx;
333         } else {
334                 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336                 return &hw_stats->erx;
337         }
338 }
339
340 static void populate_be_v0_stats(struct be_adapter *adapter)
341 {
342         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
345         struct be_port_rxf_stats_v0 *port_stats =
346                                         &rxf_stats->port[adapter->port_num];
347         struct be_drv_stats *drvs = &adapter->drv_stats;
348
349         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
350         drvs->rx_pause_frames = port_stats->rx_pause_frames;
351         drvs->rx_crc_errors = port_stats->rx_crc_errors;
352         drvs->rx_control_frames = port_stats->rx_control_frames;
353         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
364         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
365         drvs->rx_dropped_header_too_small =
366                 port_stats->rx_dropped_header_too_small;
367         drvs->rx_address_filtered =
368                                         port_stats->rx_address_filtered +
369                                         port_stats->rx_vlan_filtered;
370         drvs->rx_alignment_symbol_errors =
371                 port_stats->rx_alignment_symbol_errors;
372
373         drvs->tx_pauseframes = port_stats->tx_pauseframes;
374         drvs->tx_controlframes = port_stats->tx_controlframes;
375
376         if (adapter->port_num)
377                 drvs->jabber_events = rxf_stats->port1_jabber_events;
378         else
379                 drvs->jabber_events = rxf_stats->port0_jabber_events;
380         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
381         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
382         drvs->forwarded_packets = rxf_stats->forwarded_packets;
383         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
384         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
386         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387 }
388
389 static void populate_be_v1_stats(struct be_adapter *adapter)
390 {
391         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
394         struct be_port_rxf_stats_v1 *port_stats =
395                                         &rxf_stats->port[adapter->port_num];
396         struct be_drv_stats *drvs = &adapter->drv_stats;
397
398         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
399         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
401         drvs->rx_pause_frames = port_stats->rx_pause_frames;
402         drvs->rx_crc_errors = port_stats->rx_crc_errors;
403         drvs->rx_control_frames = port_stats->rx_control_frames;
404         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414         drvs->rx_dropped_header_too_small =
415                 port_stats->rx_dropped_header_too_small;
416         drvs->rx_input_fifo_overflow_drop =
417                 port_stats->rx_input_fifo_overflow_drop;
418         drvs->rx_address_filtered = port_stats->rx_address_filtered;
419         drvs->rx_alignment_symbol_errors =
420                 port_stats->rx_alignment_symbol_errors;
421         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
422         drvs->tx_pauseframes = port_stats->tx_pauseframes;
423         drvs->tx_controlframes = port_stats->tx_controlframes;
424         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
425         drvs->jabber_events = port_stats->jabber_events;
426         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
427         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
428         drvs->forwarded_packets = rxf_stats->forwarded_packets;
429         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
430         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
432         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433 }
434
435 static void populate_be_v2_stats(struct be_adapter *adapter)
436 {
437         struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439         struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440         struct be_port_rxf_stats_v2 *port_stats =
441                                         &rxf_stats->port[adapter->port_num];
442         struct be_drv_stats *drvs = &adapter->drv_stats;
443
444         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447         drvs->rx_pause_frames = port_stats->rx_pause_frames;
448         drvs->rx_crc_errors = port_stats->rx_crc_errors;
449         drvs->rx_control_frames = port_stats->rx_control_frames;
450         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460         drvs->rx_dropped_header_too_small =
461                 port_stats->rx_dropped_header_too_small;
462         drvs->rx_input_fifo_overflow_drop =
463                 port_stats->rx_input_fifo_overflow_drop;
464         drvs->rx_address_filtered = port_stats->rx_address_filtered;
465         drvs->rx_alignment_symbol_errors =
466                 port_stats->rx_alignment_symbol_errors;
467         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468         drvs->tx_pauseframes = port_stats->tx_pauseframes;
469         drvs->tx_controlframes = port_stats->tx_controlframes;
470         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471         drvs->jabber_events = port_stats->jabber_events;
472         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474         drvs->forwarded_packets = rxf_stats->forwarded_packets;
475         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
479         if (be_roce_supported(adapter))  {
480                 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481                 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482                 drvs->rx_roce_frames = port_stats->roce_frames_received;
483                 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484                 drvs->roce_drops_payload_len =
485                         port_stats->roce_drops_payload_len;
486         }
487 }
488
489 static void populate_lancer_stats(struct be_adapter *adapter)
490 {
491
492         struct be_drv_stats *drvs = &adapter->drv_stats;
493         struct lancer_pport_stats *pport_stats =
494                                         pport_stats_from_cmd(adapter);
495
496         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
500         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
501         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
502         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506         drvs->rx_dropped_tcp_length =
507                                 pport_stats->rx_dropped_invalid_tcp_length;
508         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511         drvs->rx_dropped_header_too_small =
512                                 pport_stats->rx_dropped_header_too_small;
513         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
514         drvs->rx_address_filtered =
515                                         pport_stats->rx_address_filtered +
516                                         pport_stats->rx_vlan_filtered;
517         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
518         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
519         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
521         drvs->jabber_events = pport_stats->rx_jabbers;
522         drvs->forwarded_packets = pport_stats->num_forwards_lo;
523         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
524         drvs->rx_drops_too_many_frags =
525                                 pport_stats->rx_drops_too_many_frags_lo;
526 }
527
528 static void accumulate_16bit_val(u32 *acc, u16 val)
529 {
530 #define lo(x)                   (x & 0xFFFF)
531 #define hi(x)                   (x & 0xFFFF0000)
532         bool wrapped = val < lo(*acc);
533         u32 newacc = hi(*acc) + val;
534
535         if (wrapped)
536                 newacc += 65536;
537         ACCESS_ONCE(*acc) = newacc;
538 }
539
540 static void populate_erx_stats(struct be_adapter *adapter,
541                         struct be_rx_obj *rxo,
542                         u32 erx_stat)
543 {
544         if (!BEx_chip(adapter))
545                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546         else
547                 /* below erx HW counter can actually wrap around after
548                  * 65535. Driver accumulates a 32-bit value
549                  */
550                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551                                      (u16)erx_stat);
552 }
553
554 void be_parse_stats(struct be_adapter *adapter)
555 {
556         struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
557         struct be_rx_obj *rxo;
558         int i;
559         u32 erx_stat;
560
561         if (lancer_chip(adapter)) {
562                 populate_lancer_stats(adapter);
563         } else {
564                 if (BE2_chip(adapter))
565                         populate_be_v0_stats(adapter);
566                 else if (BE3_chip(adapter))
567                         /* for BE3 */
568                         populate_be_v1_stats(adapter);
569                 else
570                         populate_be_v2_stats(adapter);
571
572                 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
573                 for_all_rx_queues(adapter, rxo, i) {
574                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575                         populate_erx_stats(adapter, rxo, erx_stat);
576                 }
577         }
578 }
579
580 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581                                         struct rtnl_link_stats64 *stats)
582 {
583         struct be_adapter *adapter = netdev_priv(netdev);
584         struct be_drv_stats *drvs = &adapter->drv_stats;
585         struct be_rx_obj *rxo;
586         struct be_tx_obj *txo;
587         u64 pkts, bytes;
588         unsigned int start;
589         int i;
590
591         for_all_rx_queues(adapter, rxo, i) {
592                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593                 do {
594                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
595                         pkts = rx_stats(rxo)->rx_pkts;
596                         bytes = rx_stats(rxo)->rx_bytes;
597                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
598                 stats->rx_packets += pkts;
599                 stats->rx_bytes += bytes;
600                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602                                         rx_stats(rxo)->rx_drops_no_frags;
603         }
604
605         for_all_tx_queues(adapter, txo, i) {
606                 const struct be_tx_stats *tx_stats = tx_stats(txo);
607                 do {
608                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
609                         pkts = tx_stats(txo)->tx_pkts;
610                         bytes = tx_stats(txo)->tx_bytes;
611                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
612                 stats->tx_packets += pkts;
613                 stats->tx_bytes += bytes;
614         }
615
616         /* bad pkts received */
617         stats->rx_errors = drvs->rx_crc_errors +
618                 drvs->rx_alignment_symbol_errors +
619                 drvs->rx_in_range_errors +
620                 drvs->rx_out_range_errors +
621                 drvs->rx_frame_too_long +
622                 drvs->rx_dropped_too_small +
623                 drvs->rx_dropped_too_short +
624                 drvs->rx_dropped_header_too_small +
625                 drvs->rx_dropped_tcp_length +
626                 drvs->rx_dropped_runt;
627
628         /* detailed rx errors */
629         stats->rx_length_errors = drvs->rx_in_range_errors +
630                 drvs->rx_out_range_errors +
631                 drvs->rx_frame_too_long;
632
633         stats->rx_crc_errors = drvs->rx_crc_errors;
634
635         /* frame alignment errors */
636         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
637
638         /* receiver fifo overrun */
639         /* drops_no_pbuf is no per i/f, it's per BE card */
640         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
641                                 drvs->rx_input_fifo_overflow_drop +
642                                 drvs->rx_drops_no_pbuf;
643         return stats;
644 }
645
646 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
647 {
648         struct net_device *netdev = adapter->netdev;
649
650         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
651                 netif_carrier_off(netdev);
652                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
653         }
654
655         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
656                 netif_carrier_on(netdev);
657         else
658                 netif_carrier_off(netdev);
659 }
660
661 static void be_tx_stats_update(struct be_tx_obj *txo,
662                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
663 {
664         struct be_tx_stats *stats = tx_stats(txo);
665
666         u64_stats_update_begin(&stats->sync);
667         stats->tx_reqs++;
668         stats->tx_wrbs += wrb_cnt;
669         stats->tx_bytes += copied;
670         stats->tx_pkts += (gso_segs ? gso_segs : 1);
671         if (stopped)
672                 stats->tx_stops++;
673         u64_stats_update_end(&stats->sync);
674 }
675
676 /* Determine number of WRB entries needed to xmit data in an skb */
677 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678                                                                 bool *dummy)
679 {
680         int cnt = (skb->len > skb->data_len);
681
682         cnt += skb_shinfo(skb)->nr_frags;
683
684         /* to account for hdr wrb */
685         cnt++;
686         if (lancer_chip(adapter) || !(cnt & 1)) {
687                 *dummy = false;
688         } else {
689                 /* add a dummy to make it an even num */
690                 cnt++;
691                 *dummy = true;
692         }
693         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694         return cnt;
695 }
696
697 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698 {
699         wrb->frag_pa_hi = upper_32_bits(addr);
700         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
702         wrb->rsvd0 = 0;
703 }
704
705 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706                                         struct sk_buff *skb)
707 {
708         u8 vlan_prio;
709         u16 vlan_tag;
710
711         vlan_tag = vlan_tx_tag_get(skb);
712         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713         /* If vlan priority provided by OS is NOT in available bmap */
714         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716                                 adapter->recommended_prio;
717
718         return vlan_tag;
719 }
720
721 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
722                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
723 {
724         u16 vlan_tag;
725
726         memset(hdr, 0, sizeof(*hdr));
727
728         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
730         if (skb_is_gso(skb)) {
731                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733                         hdr, skb_shinfo(skb)->gso_size);
734                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
735                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
736         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737                 if (is_tcp_pkt(skb))
738                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739                 else if (is_udp_pkt(skb))
740                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741         }
742
743         if (vlan_tx_tag_present(skb)) {
744                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
745                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
746                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
747         }
748
749         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
751         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
752         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754 }
755
756 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
757                 bool unmap_single)
758 {
759         dma_addr_t dma;
760
761         be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
764         if (wrb->frag_len) {
765                 if (unmap_single)
766                         dma_unmap_single(dev, dma, wrb->frag_len,
767                                          DMA_TO_DEVICE);
768                 else
769                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
770         }
771 }
772
773 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
774                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775                 bool skip_hw_vlan)
776 {
777         dma_addr_t busaddr;
778         int i, copied = 0;
779         struct device *dev = &adapter->pdev->dev;
780         struct sk_buff *first_skb = skb;
781         struct be_eth_wrb *wrb;
782         struct be_eth_hdr_wrb *hdr;
783         bool map_single = false;
784         u16 map_head;
785
786         hdr = queue_head_node(txq);
787         queue_head_inc(txq);
788         map_head = txq->head;
789
790         if (skb->len > skb->data_len) {
791                 int len = skb_headlen(skb);
792                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793                 if (dma_mapping_error(dev, busaddr))
794                         goto dma_err;
795                 map_single = true;
796                 wrb = queue_head_node(txq);
797                 wrb_fill(wrb, busaddr, len);
798                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799                 queue_head_inc(txq);
800                 copied += len;
801         }
802
803         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
804                 const struct skb_frag_struct *frag =
805                         &skb_shinfo(skb)->frags[i];
806                 busaddr = skb_frag_dma_map(dev, frag, 0,
807                                            skb_frag_size(frag), DMA_TO_DEVICE);
808                 if (dma_mapping_error(dev, busaddr))
809                         goto dma_err;
810                 wrb = queue_head_node(txq);
811                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
812                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813                 queue_head_inc(txq);
814                 copied += skb_frag_size(frag);
815         }
816
817         if (dummy_wrb) {
818                 wrb = queue_head_node(txq);
819                 wrb_fill(wrb, 0, 0);
820                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821                 queue_head_inc(txq);
822         }
823
824         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
825         be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827         return copied;
828 dma_err:
829         txq->head = map_head;
830         while (copied) {
831                 wrb = queue_head_node(txq);
832                 unmap_tx_frag(dev, wrb, map_single);
833                 map_single = false;
834                 copied -= wrb->frag_len;
835                 queue_head_inc(txq);
836         }
837         return 0;
838 }
839
840 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
841                                              struct sk_buff *skb,
842                                              bool *skip_hw_vlan)
843 {
844         u16 vlan_tag = 0;
845
846         skb = skb_share_check(skb, GFP_ATOMIC);
847         if (unlikely(!skb))
848                 return skb;
849
850         if (vlan_tx_tag_present(skb))
851                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
852
853         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854                 if (!vlan_tag)
855                         vlan_tag = adapter->pvid;
856                 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857                  * skip VLAN insertion
858                  */
859                 if (skip_hw_vlan)
860                         *skip_hw_vlan = true;
861         }
862
863         if (vlan_tag) {
864                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
865                 if (unlikely(!skb))
866                         return skb;
867                 skb->vlan_tci = 0;
868         }
869
870         /* Insert the outer VLAN, if any */
871         if (adapter->qnq_vid) {
872                 vlan_tag = adapter->qnq_vid;
873                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
874                 if (unlikely(!skb))
875                         return skb;
876                 if (skip_hw_vlan)
877                         *skip_hw_vlan = true;
878         }
879
880         return skb;
881 }
882
883 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884 {
885         struct ethhdr *eh = (struct ethhdr *)skb->data;
886         u16 offset = ETH_HLEN;
887
888         if (eh->h_proto == htons(ETH_P_IPV6)) {
889                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891                 offset += sizeof(struct ipv6hdr);
892                 if (ip6h->nexthdr != NEXTHDR_TCP &&
893                     ip6h->nexthdr != NEXTHDR_UDP) {
894                         struct ipv6_opt_hdr *ehdr =
895                                 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898                         if (ehdr->hdrlen == 0xff)
899                                 return true;
900                 }
901         }
902         return false;
903 }
904
905 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906 {
907         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908 }
909
910 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911                                 struct sk_buff *skb)
912 {
913         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
914 }
915
916 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
917                                            struct sk_buff *skb,
918                                            bool *skip_hw_vlan)
919 {
920         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
921         unsigned int eth_hdr_len;
922         struct iphdr *ip;
923
924         /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
925          * may cause a transmit stall on that port. So the work-around is to
926          * pad short packets (<= 32 bytes) to a 36-byte length.
927          */
928         if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
929                 if (skb_padto(skb, 36))
930                         goto tx_drop;
931                 skb->len = 36;
932         }
933
934         /* For padded packets, BE HW modifies tot_len field in IP header
935          * incorrecly when VLAN tag is inserted by HW.
936          * For padded packets, Lancer computes incorrect checksum.
937          */
938         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
939                                                 VLAN_ETH_HLEN : ETH_HLEN;
940         if (skb->len <= 60 &&
941             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
942             is_ipv4_pkt(skb)) {
943                 ip = (struct iphdr *)ip_hdr(skb);
944                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
945         }
946
947         /* If vlan tag is already inlined in the packet, skip HW VLAN
948          * tagging in UMC mode
949          */
950         if ((adapter->function_mode & UMC_ENABLED) &&
951             veh->h_vlan_proto == htons(ETH_P_8021Q))
952                         *skip_hw_vlan = true;
953
954         /* HW has a bug wherein it will calculate CSUM for VLAN
955          * pkts even though it is disabled.
956          * Manually insert VLAN in pkt.
957          */
958         if (skb->ip_summed != CHECKSUM_PARTIAL &&
959             vlan_tx_tag_present(skb)) {
960                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
961                 if (unlikely(!skb))
962                         goto tx_drop;
963         }
964
965         /* HW may lockup when VLAN HW tagging is requested on
966          * certain ipv6 packets. Drop such pkts if the HW workaround to
967          * skip HW tagging is not enabled by FW.
968          */
969         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
970             (adapter->pvid || adapter->qnq_vid) &&
971             !qnq_async_evt_rcvd(adapter)))
972                 goto tx_drop;
973
974         /* Manual VLAN tag insertion to prevent:
975          * ASIC lockup when the ASIC inserts VLAN tag into
976          * certain ipv6 packets. Insert VLAN tags in driver,
977          * and set event, completion, vlan bits accordingly
978          * in the Tx WRB.
979          */
980         if (be_ipv6_tx_stall_chk(adapter, skb) &&
981             be_vlan_tag_tx_chk(adapter, skb)) {
982                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
983                 if (unlikely(!skb))
984                         goto tx_drop;
985         }
986
987         return skb;
988 tx_drop:
989         dev_kfree_skb_any(skb);
990         return NULL;
991 }
992
993 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
994 {
995         struct be_adapter *adapter = netdev_priv(netdev);
996         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
997         struct be_queue_info *txq = &txo->q;
998         bool dummy_wrb, stopped = false;
999         u32 wrb_cnt = 0, copied = 0;
1000         bool skip_hw_vlan = false;
1001         u32 start = txq->head;
1002
1003         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1004         if (!skb) {
1005                 tx_stats(txo)->tx_drv_drops++;
1006                 return NETDEV_TX_OK;
1007         }
1008
1009         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1010
1011         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1012                               skip_hw_vlan);
1013         if (copied) {
1014                 int gso_segs = skb_shinfo(skb)->gso_segs;
1015
1016                 /* record the sent skb in the sent_skb table */
1017                 BUG_ON(txo->sent_skb_list[start]);
1018                 txo->sent_skb_list[start] = skb;
1019
1020                 /* Ensure txq has space for the next skb; Else stop the queue
1021                  * *BEFORE* ringing the tx doorbell, so that we serialze the
1022                  * tx compls of the current transmit which'll wake up the queue
1023                  */
1024                 atomic_add(wrb_cnt, &txq->used);
1025                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1026                                                                 txq->len) {
1027                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1028                         stopped = true;
1029                 }
1030
1031                 be_txq_notify(adapter, txo, wrb_cnt);
1032
1033                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1034         } else {
1035                 txq->head = start;
1036                 tx_stats(txo)->tx_drv_drops++;
1037                 dev_kfree_skb_any(skb);
1038         }
1039         return NETDEV_TX_OK;
1040 }
1041
1042 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1043 {
1044         struct be_adapter *adapter = netdev_priv(netdev);
1045         if (new_mtu < BE_MIN_MTU ||
1046                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1047                                         (ETH_HLEN + ETH_FCS_LEN))) {
1048                 dev_info(&adapter->pdev->dev,
1049                         "MTU must be between %d and %d bytes\n",
1050                         BE_MIN_MTU,
1051                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1052                 return -EINVAL;
1053         }
1054         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1055                         netdev->mtu, new_mtu);
1056         netdev->mtu = new_mtu;
1057         return 0;
1058 }
1059
1060 /*
1061  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1062  * If the user configures more, place BE in vlan promiscuous mode.
1063  */
1064 static int be_vid_config(struct be_adapter *adapter)
1065 {
1066         u16 vids[BE_NUM_VLANS_SUPPORTED];
1067         u16 num = 0, i;
1068         int status = 0;
1069
1070         /* No need to further configure vids if in promiscuous mode */
1071         if (adapter->promiscuous)
1072                 return 0;
1073
1074         if (adapter->vlans_added > be_max_vlans(adapter))
1075                 goto set_vlan_promisc;
1076
1077         /* Construct VLAN Table to give to HW */
1078         for (i = 0; i < VLAN_N_VID; i++)
1079                 if (adapter->vlan_tag[i])
1080                         vids[num++] = cpu_to_le16(i);
1081
1082         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1083                                     vids, num, 0);
1084
1085         if (status) {
1086                 /* Set to VLAN promisc mode as setting VLAN filter failed */
1087                 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1088                         goto set_vlan_promisc;
1089                 dev_err(&adapter->pdev->dev,
1090                         "Setting HW VLAN filtering failed.\n");
1091         } else {
1092                 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1093                         /* hw VLAN filtering re-enabled. */
1094                         status = be_cmd_rx_filter(adapter,
1095                                                   BE_FLAGS_VLAN_PROMISC, OFF);
1096                         if (!status) {
1097                                 dev_info(&adapter->pdev->dev,
1098                                          "Disabling VLAN Promiscuous mode.\n");
1099                                 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1100                                 dev_info(&adapter->pdev->dev,
1101                                          "Re-Enabling HW VLAN filtering\n");
1102                         }
1103                 }
1104         }
1105
1106         return status;
1107
1108 set_vlan_promisc:
1109         dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1110
1111         status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1112         if (!status) {
1113                 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1114                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1115                 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1116         } else
1117                 dev_err(&adapter->pdev->dev,
1118                         "Failed to enable VLAN Promiscuous mode.\n");
1119         return status;
1120 }
1121
1122 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1123 {
1124         struct be_adapter *adapter = netdev_priv(netdev);
1125         int status = 0;
1126
1127
1128         /* Packets with VID 0 are always received by Lancer by default */
1129         if (lancer_chip(adapter) && vid == 0)
1130                 goto ret;
1131
1132         adapter->vlan_tag[vid] = 1;
1133         if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
1134                 status = be_vid_config(adapter);
1135
1136         if (!status)
1137                 adapter->vlans_added++;
1138         else
1139                 adapter->vlan_tag[vid] = 0;
1140 ret:
1141         return status;
1142 }
1143
1144 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1145 {
1146         struct be_adapter *adapter = netdev_priv(netdev);
1147         int status = 0;
1148
1149         /* Packets with VID 0 are always received by Lancer by default */
1150         if (lancer_chip(adapter) && vid == 0)
1151                 goto ret;
1152
1153         adapter->vlan_tag[vid] = 0;
1154         if (adapter->vlans_added <= be_max_vlans(adapter))
1155                 status = be_vid_config(adapter);
1156
1157         if (!status)
1158                 adapter->vlans_added--;
1159         else
1160                 adapter->vlan_tag[vid] = 1;
1161 ret:
1162         return status;
1163 }
1164
1165 static void be_set_rx_mode(struct net_device *netdev)
1166 {
1167         struct be_adapter *adapter = netdev_priv(netdev);
1168         int status;
1169
1170         if (netdev->flags & IFF_PROMISC) {
1171                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1172                 adapter->promiscuous = true;
1173                 goto done;
1174         }
1175
1176         /* BE was previously in promiscuous mode; disable it */
1177         if (adapter->promiscuous) {
1178                 adapter->promiscuous = false;
1179                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1180
1181                 if (adapter->vlans_added)
1182                         be_vid_config(adapter);
1183         }
1184
1185         /* Enable multicast promisc if num configured exceeds what we support */
1186         if (netdev->flags & IFF_ALLMULTI ||
1187             netdev_mc_count(netdev) > be_max_mc(adapter)) {
1188                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1189                 goto done;
1190         }
1191
1192         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1193                 struct netdev_hw_addr *ha;
1194                 int i = 1; /* First slot is claimed by the Primary MAC */
1195
1196                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1197                         be_cmd_pmac_del(adapter, adapter->if_handle,
1198                                         adapter->pmac_id[i], 0);
1199                 }
1200
1201                 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1202                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1203                         adapter->promiscuous = true;
1204                         goto done;
1205                 }
1206
1207                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1208                         adapter->uc_macs++; /* First slot is for Primary MAC */
1209                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1210                                         adapter->if_handle,
1211                                         &adapter->pmac_id[adapter->uc_macs], 0);
1212                 }
1213         }
1214
1215         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1216
1217         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1218         if (status) {
1219                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1220                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1221                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1222         }
1223 done:
1224         return;
1225 }
1226
1227 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1228 {
1229         struct be_adapter *adapter = netdev_priv(netdev);
1230         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1231         int status;
1232
1233         if (!sriov_enabled(adapter))
1234                 return -EPERM;
1235
1236         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1237                 return -EINVAL;
1238
1239         if (BEx_chip(adapter)) {
1240                 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1241                                 vf + 1);
1242
1243                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1244                                          &vf_cfg->pmac_id, vf + 1);
1245         } else {
1246                 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1247                                         vf + 1);
1248         }
1249
1250         if (status)
1251                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1252                                 mac, vf);
1253         else
1254                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1255
1256         return status;
1257 }
1258
1259 static int be_get_vf_config(struct net_device *netdev, int vf,
1260                         struct ifla_vf_info *vi)
1261 {
1262         struct be_adapter *adapter = netdev_priv(netdev);
1263         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1264
1265         if (!sriov_enabled(adapter))
1266                 return -EPERM;
1267
1268         if (vf >= adapter->num_vfs)
1269                 return -EINVAL;
1270
1271         vi->vf = vf;
1272         vi->tx_rate = vf_cfg->tx_rate;
1273         vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1274         vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1275         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1276
1277         return 0;
1278 }
1279
1280 static int be_set_vf_vlan(struct net_device *netdev,
1281                         int vf, u16 vlan, u8 qos)
1282 {
1283         struct be_adapter *adapter = netdev_priv(netdev);
1284         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1285         int status = 0;
1286
1287         if (!sriov_enabled(adapter))
1288                 return -EPERM;
1289
1290         if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1291                 return -EINVAL;
1292
1293         if (vlan || qos) {
1294                 vlan |= qos << VLAN_PRIO_SHIFT;
1295                 if (vf_cfg->vlan_tag != vlan) {
1296                         /* If this is new value, program it. Else skip. */
1297                         vf_cfg->vlan_tag = vlan;
1298                         status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1299                                                        vf_cfg->if_handle, 0);
1300                 }
1301         } else {
1302                 /* Reset Transparent Vlan Tagging. */
1303                 vf_cfg->vlan_tag = 0;
1304                 vlan = vf_cfg->def_vid;
1305                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1306                                                vf_cfg->if_handle, 0);
1307         }
1308
1309
1310         if (status)
1311                 dev_info(&adapter->pdev->dev,
1312                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1313         return status;
1314 }
1315
1316 static int be_set_vf_tx_rate(struct net_device *netdev,
1317                         int vf, int rate)
1318 {
1319         struct be_adapter *adapter = netdev_priv(netdev);
1320         int status = 0;
1321
1322         if (!sriov_enabled(adapter))
1323                 return -EPERM;
1324
1325         if (vf >= adapter->num_vfs)
1326                 return -EINVAL;
1327
1328         if (rate < 100 || rate > 10000) {
1329                 dev_err(&adapter->pdev->dev,
1330                         "tx rate must be between 100 and 10000 Mbps\n");
1331                 return -EINVAL;
1332         }
1333
1334         if (lancer_chip(adapter))
1335                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1336         else
1337                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1338
1339         if (status)
1340                 dev_err(&adapter->pdev->dev,
1341                                 "tx rate %d on VF %d failed\n", rate, vf);
1342         else
1343                 adapter->vf_cfg[vf].tx_rate = rate;
1344         return status;
1345 }
1346
1347 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1348                           ulong now)
1349 {
1350         aic->rx_pkts_prev = rx_pkts;
1351         aic->tx_reqs_prev = tx_pkts;
1352         aic->jiffies = now;
1353 }
1354
1355 static void be_eqd_update(struct be_adapter *adapter)
1356 {
1357         struct be_set_eqd set_eqd[MAX_EVT_QS];
1358         int eqd, i, num = 0, start;
1359         struct be_aic_obj *aic;
1360         struct be_eq_obj *eqo;
1361         struct be_rx_obj *rxo;
1362         struct be_tx_obj *txo;
1363         u64 rx_pkts, tx_pkts;
1364         ulong now;
1365         u32 pps, delta;
1366
1367         for_all_evt_queues(adapter, eqo, i) {
1368                 aic = &adapter->aic_obj[eqo->idx];
1369                 if (!aic->enable) {
1370                         if (aic->jiffies)
1371                                 aic->jiffies = 0;
1372                         eqd = aic->et_eqd;
1373                         goto modify_eqd;
1374                 }
1375
1376                 rxo = &adapter->rx_obj[eqo->idx];
1377                 do {
1378                         start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1379                         rx_pkts = rxo->stats.rx_pkts;
1380                 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
1381
1382                 txo = &adapter->tx_obj[eqo->idx];
1383                 do {
1384                         start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1385                         tx_pkts = txo->stats.tx_reqs;
1386                 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
1387
1388
1389                 /* Skip, if wrapped around or first calculation */
1390                 now = jiffies;
1391                 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1392                     rx_pkts < aic->rx_pkts_prev ||
1393                     tx_pkts < aic->tx_reqs_prev) {
1394                         be_aic_update(aic, rx_pkts, tx_pkts, now);
1395                         continue;
1396                 }
1397
1398                 delta = jiffies_to_msecs(now - aic->jiffies);
1399                 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1400                         (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1401                 eqd = (pps / 15000) << 2;
1402
1403                 if (eqd < 8)
1404                         eqd = 0;
1405                 eqd = min_t(u32, eqd, aic->max_eqd);
1406                 eqd = max_t(u32, eqd, aic->min_eqd);
1407
1408                 be_aic_update(aic, rx_pkts, tx_pkts, now);
1409 modify_eqd:
1410                 if (eqd != aic->prev_eqd) {
1411                         set_eqd[num].delay_multiplier = (eqd * 65)/100;
1412                         set_eqd[num].eq_id = eqo->q.id;
1413                         aic->prev_eqd = eqd;
1414                         num++;
1415                 }
1416         }
1417
1418         if (num)
1419                 be_cmd_modify_eqd(adapter, set_eqd, num);
1420 }
1421
1422 static void be_rx_stats_update(struct be_rx_obj *rxo,
1423                 struct be_rx_compl_info *rxcp)
1424 {
1425         struct be_rx_stats *stats = rx_stats(rxo);
1426
1427         u64_stats_update_begin(&stats->sync);
1428         stats->rx_compl++;
1429         stats->rx_bytes += rxcp->pkt_size;
1430         stats->rx_pkts++;
1431         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1432                 stats->rx_mcast_pkts++;
1433         if (rxcp->err)
1434                 stats->rx_compl_err++;
1435         u64_stats_update_end(&stats->sync);
1436 }
1437
1438 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1439 {
1440         /* L4 checksum is not reliable for non TCP/UDP packets.
1441          * Also ignore ipcksm for ipv6 pkts */
1442         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1443                                 (rxcp->ip_csum || rxcp->ipv6);
1444 }
1445
1446 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1447 {
1448         struct be_adapter *adapter = rxo->adapter;
1449         struct be_rx_page_info *rx_page_info;
1450         struct be_queue_info *rxq = &rxo->q;
1451         u16 frag_idx = rxq->tail;
1452
1453         rx_page_info = &rxo->page_info_tbl[frag_idx];
1454         BUG_ON(!rx_page_info->page);
1455
1456         if (rx_page_info->last_page_user) {
1457                 dma_unmap_page(&adapter->pdev->dev,
1458                                dma_unmap_addr(rx_page_info, bus),
1459                                adapter->big_page_size, DMA_FROM_DEVICE);
1460                 rx_page_info->last_page_user = false;
1461         }
1462
1463         queue_tail_inc(rxq);
1464         atomic_dec(&rxq->used);
1465         return rx_page_info;
1466 }
1467
1468 /* Throwaway the data in the Rx completion */
1469 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1470                                 struct be_rx_compl_info *rxcp)
1471 {
1472         struct be_rx_page_info *page_info;
1473         u16 i, num_rcvd = rxcp->num_rcvd;
1474
1475         for (i = 0; i < num_rcvd; i++) {
1476                 page_info = get_rx_page_info(rxo);
1477                 put_page(page_info->page);
1478                 memset(page_info, 0, sizeof(*page_info));
1479         }
1480 }
1481
1482 /*
1483  * skb_fill_rx_data forms a complete skb for an ether frame
1484  * indicated by rxcp.
1485  */
1486 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1487                              struct be_rx_compl_info *rxcp)
1488 {
1489         struct be_rx_page_info *page_info;
1490         u16 i, j;
1491         u16 hdr_len, curr_frag_len, remaining;
1492         u8 *start;
1493
1494         page_info = get_rx_page_info(rxo);
1495         start = page_address(page_info->page) + page_info->page_offset;
1496         prefetch(start);
1497
1498         /* Copy data in the first descriptor of this completion */
1499         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1500
1501         skb->len = curr_frag_len;
1502         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1503                 memcpy(skb->data, start, curr_frag_len);
1504                 /* Complete packet has now been moved to data */
1505                 put_page(page_info->page);
1506                 skb->data_len = 0;
1507                 skb->tail += curr_frag_len;
1508         } else {
1509                 hdr_len = ETH_HLEN;
1510                 memcpy(skb->data, start, hdr_len);
1511                 skb_shinfo(skb)->nr_frags = 1;
1512                 skb_frag_set_page(skb, 0, page_info->page);
1513                 skb_shinfo(skb)->frags[0].page_offset =
1514                                         page_info->page_offset + hdr_len;
1515                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1516                 skb->data_len = curr_frag_len - hdr_len;
1517                 skb->truesize += rx_frag_size;
1518                 skb->tail += hdr_len;
1519         }
1520         page_info->page = NULL;
1521
1522         if (rxcp->pkt_size <= rx_frag_size) {
1523                 BUG_ON(rxcp->num_rcvd != 1);
1524                 return;
1525         }
1526
1527         /* More frags present for this completion */
1528         remaining = rxcp->pkt_size - curr_frag_len;
1529         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1530                 page_info = get_rx_page_info(rxo);
1531                 curr_frag_len = min(remaining, rx_frag_size);
1532
1533                 /* Coalesce all frags from the same physical page in one slot */
1534                 if (page_info->page_offset == 0) {
1535                         /* Fresh page */
1536                         j++;
1537                         skb_frag_set_page(skb, j, page_info->page);
1538                         skb_shinfo(skb)->frags[j].page_offset =
1539                                                         page_info->page_offset;
1540                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1541                         skb_shinfo(skb)->nr_frags++;
1542                 } else {
1543                         put_page(page_info->page);
1544                 }
1545
1546                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1547                 skb->len += curr_frag_len;
1548                 skb->data_len += curr_frag_len;
1549                 skb->truesize += rx_frag_size;
1550                 remaining -= curr_frag_len;
1551                 page_info->page = NULL;
1552         }
1553         BUG_ON(j > MAX_SKB_FRAGS);
1554 }
1555
1556 /* Process the RX completion indicated by rxcp when GRO is disabled */
1557 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1558                                 struct be_rx_compl_info *rxcp)
1559 {
1560         struct be_adapter *adapter = rxo->adapter;
1561         struct net_device *netdev = adapter->netdev;
1562         struct sk_buff *skb;
1563
1564         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1565         if (unlikely(!skb)) {
1566                 rx_stats(rxo)->rx_drops_no_skbs++;
1567                 be_rx_compl_discard(rxo, rxcp);
1568                 return;
1569         }
1570
1571         skb_fill_rx_data(rxo, skb, rxcp);
1572
1573         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1574                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1575         else
1576                 skb_checksum_none_assert(skb);
1577
1578         skb->protocol = eth_type_trans(skb, netdev);
1579         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1580         if (netdev->features & NETIF_F_RXHASH)
1581                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1582         skb_mark_napi_id(skb, napi);
1583
1584         if (rxcp->vlanf)
1585                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1586
1587         netif_receive_skb(skb);
1588 }
1589
1590 /* Process the RX completion indicated by rxcp when GRO is enabled */
1591 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1592                                     struct napi_struct *napi,
1593                                     struct be_rx_compl_info *rxcp)
1594 {
1595         struct be_adapter *adapter = rxo->adapter;
1596         struct be_rx_page_info *page_info;
1597         struct sk_buff *skb = NULL;
1598         u16 remaining, curr_frag_len;
1599         u16 i, j;
1600
1601         skb = napi_get_frags(napi);
1602         if (!skb) {
1603                 be_rx_compl_discard(rxo, rxcp);
1604                 return;
1605         }
1606
1607         remaining = rxcp->pkt_size;
1608         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1609                 page_info = get_rx_page_info(rxo);
1610
1611                 curr_frag_len = min(remaining, rx_frag_size);
1612
1613                 /* Coalesce all frags from the same physical page in one slot */
1614                 if (i == 0 || page_info->page_offset == 0) {
1615                         /* First frag or Fresh page */
1616                         j++;
1617                         skb_frag_set_page(skb, j, page_info->page);
1618                         skb_shinfo(skb)->frags[j].page_offset =
1619                                                         page_info->page_offset;
1620                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1621                 } else {
1622                         put_page(page_info->page);
1623                 }
1624                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1625                 skb->truesize += rx_frag_size;
1626                 remaining -= curr_frag_len;
1627                 memset(page_info, 0, sizeof(*page_info));
1628         }
1629         BUG_ON(j > MAX_SKB_FRAGS);
1630
1631         skb_shinfo(skb)->nr_frags = j + 1;
1632         skb->len = rxcp->pkt_size;
1633         skb->data_len = rxcp->pkt_size;
1634         skb->ip_summed = CHECKSUM_UNNECESSARY;
1635         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1636         if (adapter->netdev->features & NETIF_F_RXHASH)
1637                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1638         skb_mark_napi_id(skb, napi);
1639
1640         if (rxcp->vlanf)
1641                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1642
1643         napi_gro_frags(napi);
1644 }
1645
1646 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1647                                  struct be_rx_compl_info *rxcp)
1648 {
1649         rxcp->pkt_size =
1650                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1651         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1652         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1653         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1654         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1655         rxcp->ip_csum =
1656                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1657         rxcp->l4_csum =
1658                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1659         rxcp->ipv6 =
1660                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1661         rxcp->num_rcvd =
1662                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1663         rxcp->pkt_type =
1664                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1665         rxcp->rss_hash =
1666                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1667         if (rxcp->vlanf) {
1668                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1669                                           compl);
1670                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1671                                                compl);
1672         }
1673         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1674 }
1675
1676 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1677                                  struct be_rx_compl_info *rxcp)
1678 {
1679         rxcp->pkt_size =
1680                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1681         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1682         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1683         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1684         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1685         rxcp->ip_csum =
1686                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1687         rxcp->l4_csum =
1688                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1689         rxcp->ipv6 =
1690                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1691         rxcp->num_rcvd =
1692                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1693         rxcp->pkt_type =
1694                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1695         rxcp->rss_hash =
1696                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1697         if (rxcp->vlanf) {
1698                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1699                                           compl);
1700                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1701                                                compl);
1702         }
1703         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1704         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1705                                       ip_frag, compl);
1706 }
1707
1708 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1709 {
1710         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1711         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1712         struct be_adapter *adapter = rxo->adapter;
1713
1714         /* For checking the valid bit it is Ok to use either definition as the
1715          * valid bit is at the same position in both v0 and v1 Rx compl */
1716         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1717                 return NULL;
1718
1719         rmb();
1720         be_dws_le_to_cpu(compl, sizeof(*compl));
1721
1722         if (adapter->be3_native)
1723                 be_parse_rx_compl_v1(compl, rxcp);
1724         else
1725                 be_parse_rx_compl_v0(compl, rxcp);
1726
1727         if (rxcp->ip_frag)
1728                 rxcp->l4_csum = 0;
1729
1730         if (rxcp->vlanf) {
1731                 /* vlanf could be wrongly set in some cards.
1732                  * ignore if vtm is not set */
1733                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1734                         rxcp->vlanf = 0;
1735
1736                 if (!lancer_chip(adapter))
1737                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1738
1739                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1740                     !adapter->vlan_tag[rxcp->vlan_tag])
1741                         rxcp->vlanf = 0;
1742         }
1743
1744         /* As the compl has been parsed, reset it; we wont touch it again */
1745         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1746
1747         queue_tail_inc(&rxo->cq);
1748         return rxcp;
1749 }
1750
1751 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1752 {
1753         u32 order = get_order(size);
1754
1755         if (order > 0)
1756                 gfp |= __GFP_COMP;
1757         return  alloc_pages(gfp, order);
1758 }
1759
1760 /*
1761  * Allocate a page, split it to fragments of size rx_frag_size and post as
1762  * receive buffers to BE
1763  */
1764 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1765 {
1766         struct be_adapter *adapter = rxo->adapter;
1767         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1768         struct be_queue_info *rxq = &rxo->q;
1769         struct page *pagep = NULL;
1770         struct be_eth_rx_d *rxd;
1771         u64 page_dmaaddr = 0, frag_dmaaddr;
1772         u32 posted, page_offset = 0;
1773
1774         page_info = &rxo->page_info_tbl[rxq->head];
1775         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1776                 if (!pagep) {
1777                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1778                         if (unlikely(!pagep)) {
1779                                 rx_stats(rxo)->rx_post_fail++;
1780                                 break;
1781                         }
1782                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1783                                                     0, adapter->big_page_size,
1784                                                     DMA_FROM_DEVICE);
1785                         page_info->page_offset = 0;
1786                 } else {
1787                         get_page(pagep);
1788                         page_info->page_offset = page_offset + rx_frag_size;
1789                 }
1790                 page_offset = page_info->page_offset;
1791                 page_info->page = pagep;
1792                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1793                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1794
1795                 rxd = queue_head_node(rxq);
1796                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1797                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1798
1799                 /* Any space left in the current big page for another frag? */
1800                 if ((page_offset + rx_frag_size + rx_frag_size) >
1801                                         adapter->big_page_size) {
1802                         pagep = NULL;
1803                         page_info->last_page_user = true;
1804                 }
1805
1806                 prev_page_info = page_info;
1807                 queue_head_inc(rxq);
1808                 page_info = &rxo->page_info_tbl[rxq->head];
1809         }
1810         if (pagep)
1811                 prev_page_info->last_page_user = true;
1812
1813         if (posted) {
1814                 atomic_add(posted, &rxq->used);
1815                 if (rxo->rx_post_starved)
1816                         rxo->rx_post_starved = false;
1817                 be_rxq_notify(adapter, rxq->id, posted);
1818         } else if (atomic_read(&rxq->used) == 0) {
1819                 /* Let be_worker replenish when memory is available */
1820                 rxo->rx_post_starved = true;
1821         }
1822 }
1823
1824 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1825 {
1826         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1827
1828         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1829                 return NULL;
1830
1831         rmb();
1832         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1833
1834         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1835
1836         queue_tail_inc(tx_cq);
1837         return txcp;
1838 }
1839
1840 static u16 be_tx_compl_process(struct be_adapter *adapter,
1841                 struct be_tx_obj *txo, u16 last_index)
1842 {
1843         struct be_queue_info *txq = &txo->q;
1844         struct be_eth_wrb *wrb;
1845         struct sk_buff **sent_skbs = txo->sent_skb_list;
1846         struct sk_buff *sent_skb;
1847         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1848         bool unmap_skb_hdr = true;
1849
1850         sent_skb = sent_skbs[txq->tail];
1851         BUG_ON(!sent_skb);
1852         sent_skbs[txq->tail] = NULL;
1853
1854         /* skip header wrb */
1855         queue_tail_inc(txq);
1856
1857         do {
1858                 cur_index = txq->tail;
1859                 wrb = queue_tail_node(txq);
1860                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1861                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1862                 unmap_skb_hdr = false;
1863
1864                 num_wrbs++;
1865                 queue_tail_inc(txq);
1866         } while (cur_index != last_index);
1867
1868         kfree_skb(sent_skb);
1869         return num_wrbs;
1870 }
1871
1872 /* Return the number of events in the event queue */
1873 static inline int events_get(struct be_eq_obj *eqo)
1874 {
1875         struct be_eq_entry *eqe;
1876         int num = 0;
1877
1878         do {
1879                 eqe = queue_tail_node(&eqo->q);
1880                 if (eqe->evt == 0)
1881                         break;
1882
1883                 rmb();
1884                 eqe->evt = 0;
1885                 num++;
1886                 queue_tail_inc(&eqo->q);
1887         } while (true);
1888
1889         return num;
1890 }
1891
1892 /* Leaves the EQ is disarmed state */
1893 static void be_eq_clean(struct be_eq_obj *eqo)
1894 {
1895         int num = events_get(eqo);
1896
1897         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1898 }
1899
1900 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1901 {
1902         struct be_rx_page_info *page_info;
1903         struct be_queue_info *rxq = &rxo->q;
1904         struct be_queue_info *rx_cq = &rxo->cq;
1905         struct be_rx_compl_info *rxcp;
1906         struct be_adapter *adapter = rxo->adapter;
1907         int flush_wait = 0;
1908
1909         /* Consume pending rx completions.
1910          * Wait for the flush completion (identified by zero num_rcvd)
1911          * to arrive. Notify CQ even when there are no more CQ entries
1912          * for HW to flush partially coalesced CQ entries.
1913          * In Lancer, there is no need to wait for flush compl.
1914          */
1915         for (;;) {
1916                 rxcp = be_rx_compl_get(rxo);
1917                 if (rxcp == NULL) {
1918                         if (lancer_chip(adapter))
1919                                 break;
1920
1921                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1922                                 dev_warn(&adapter->pdev->dev,
1923                                          "did not receive flush compl\n");
1924                                 break;
1925                         }
1926                         be_cq_notify(adapter, rx_cq->id, true, 0);
1927                         mdelay(1);
1928                 } else {
1929                         be_rx_compl_discard(rxo, rxcp);
1930                         be_cq_notify(adapter, rx_cq->id, false, 1);
1931                         if (rxcp->num_rcvd == 0)
1932                                 break;
1933                 }
1934         }
1935
1936         /* After cleanup, leave the CQ in unarmed state */
1937         be_cq_notify(adapter, rx_cq->id, false, 0);
1938
1939         /* Then free posted rx buffers that were not used */
1940         while (atomic_read(&rxq->used) > 0) {
1941                 page_info = get_rx_page_info(rxo);
1942                 put_page(page_info->page);
1943                 memset(page_info, 0, sizeof(*page_info));
1944         }
1945         BUG_ON(atomic_read(&rxq->used));
1946         rxq->tail = rxq->head = 0;
1947 }
1948
1949 static void be_tx_compl_clean(struct be_adapter *adapter)
1950 {
1951         struct be_tx_obj *txo;
1952         struct be_queue_info *txq;
1953         struct be_eth_tx_compl *txcp;
1954         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1955         struct sk_buff *sent_skb;
1956         bool dummy_wrb;
1957         int i, pending_txqs;
1958
1959         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1960         do {
1961                 pending_txqs = adapter->num_tx_qs;
1962
1963                 for_all_tx_queues(adapter, txo, i) {
1964                         txq = &txo->q;
1965                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1966                                 end_idx =
1967                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1968                                                       wrb_index, txcp);
1969                                 num_wrbs += be_tx_compl_process(adapter, txo,
1970                                                                 end_idx);
1971                                 cmpl++;
1972                         }
1973                         if (cmpl) {
1974                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1975                                 atomic_sub(num_wrbs, &txq->used);
1976                                 cmpl = 0;
1977                                 num_wrbs = 0;
1978                         }
1979                         if (atomic_read(&txq->used) == 0)
1980                                 pending_txqs--;
1981                 }
1982
1983                 if (pending_txqs == 0 || ++timeo > 200)
1984                         break;
1985
1986                 mdelay(1);
1987         } while (true);
1988
1989         for_all_tx_queues(adapter, txo, i) {
1990                 txq = &txo->q;
1991                 if (atomic_read(&txq->used))
1992                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1993                                 atomic_read(&txq->used));
1994
1995                 /* free posted tx for which compls will never arrive */
1996                 while (atomic_read(&txq->used)) {
1997                         sent_skb = txo->sent_skb_list[txq->tail];
1998                         end_idx = txq->tail;
1999                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2000                                                    &dummy_wrb);
2001                         index_adv(&end_idx, num_wrbs - 1, txq->len);
2002                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2003                         atomic_sub(num_wrbs, &txq->used);
2004                 }
2005         }
2006 }
2007
2008 static void be_evt_queues_destroy(struct be_adapter *adapter)
2009 {
2010         struct be_eq_obj *eqo;
2011         int i;
2012
2013         for_all_evt_queues(adapter, eqo, i) {
2014                 if (eqo->q.created) {
2015                         be_eq_clean(eqo);
2016                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2017                         napi_hash_del(&eqo->napi);
2018                         netif_napi_del(&eqo->napi);
2019                 }
2020                 be_queue_free(adapter, &eqo->q);
2021         }
2022 }
2023
2024 static int be_evt_queues_create(struct be_adapter *adapter)
2025 {
2026         struct be_queue_info *eq;
2027         struct be_eq_obj *eqo;
2028         struct be_aic_obj *aic;
2029         int i, rc;
2030
2031         adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2032                                     adapter->cfg_num_qs);
2033
2034         for_all_evt_queues(adapter, eqo, i) {
2035                 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2036                                BE_NAPI_WEIGHT);
2037                 napi_hash_add(&eqo->napi);
2038                 aic = &adapter->aic_obj[i];
2039                 eqo->adapter = adapter;
2040                 eqo->tx_budget = BE_TX_BUDGET;
2041                 eqo->idx = i;
2042                 aic->max_eqd = BE_MAX_EQD;
2043                 aic->enable = true;
2044
2045                 eq = &eqo->q;
2046                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2047                                         sizeof(struct be_eq_entry));
2048                 if (rc)
2049                         return rc;
2050
2051                 rc = be_cmd_eq_create(adapter, eqo);
2052                 if (rc)
2053                         return rc;
2054         }
2055         return 0;
2056 }
2057
2058 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2059 {
2060         struct be_queue_info *q;
2061
2062         q = &adapter->mcc_obj.q;
2063         if (q->created)
2064                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2065         be_queue_free(adapter, q);
2066
2067         q = &adapter->mcc_obj.cq;
2068         if (q->created)
2069                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2070         be_queue_free(adapter, q);
2071 }
2072
2073 /* Must be called only after TX qs are created as MCC shares TX EQ */
2074 static int be_mcc_queues_create(struct be_adapter *adapter)
2075 {
2076         struct be_queue_info *q, *cq;
2077
2078         cq = &adapter->mcc_obj.cq;
2079         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2080                         sizeof(struct be_mcc_compl)))
2081                 goto err;
2082
2083         /* Use the default EQ for MCC completions */
2084         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2085                 goto mcc_cq_free;
2086
2087         q = &adapter->mcc_obj.q;
2088         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2089                 goto mcc_cq_destroy;
2090
2091         if (be_cmd_mccq_create(adapter, q, cq))
2092                 goto mcc_q_free;
2093
2094         return 0;
2095
2096 mcc_q_free:
2097         be_queue_free(adapter, q);
2098 mcc_cq_destroy:
2099         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2100 mcc_cq_free:
2101         be_queue_free(adapter, cq);
2102 err:
2103         return -1;
2104 }
2105
2106 static void be_tx_queues_destroy(struct be_adapter *adapter)
2107 {
2108         struct be_queue_info *q;
2109         struct be_tx_obj *txo;
2110         u8 i;
2111
2112         for_all_tx_queues(adapter, txo, i) {
2113                 q = &txo->q;
2114                 if (q->created)
2115                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2116                 be_queue_free(adapter, q);
2117
2118                 q = &txo->cq;
2119                 if (q->created)
2120                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2121                 be_queue_free(adapter, q);
2122         }
2123 }
2124
2125 static int be_tx_qs_create(struct be_adapter *adapter)
2126 {
2127         struct be_queue_info *cq, *eq;
2128         struct be_tx_obj *txo;
2129         int status, i;
2130
2131         adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2132
2133         for_all_tx_queues(adapter, txo, i) {
2134                 cq = &txo->cq;
2135                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2136                                         sizeof(struct be_eth_tx_compl));
2137                 if (status)
2138                         return status;
2139
2140                 u64_stats_init(&txo->stats.sync);
2141                 u64_stats_init(&txo->stats.sync_compl);
2142
2143                 /* If num_evt_qs is less than num_tx_qs, then more than
2144                  * one txq share an eq
2145                  */
2146                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2147                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2148                 if (status)
2149                         return status;
2150
2151                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2152                                         sizeof(struct be_eth_wrb));
2153                 if (status)
2154                         return status;
2155
2156                 status = be_cmd_txq_create(adapter, txo);
2157                 if (status)
2158                         return status;
2159         }
2160
2161         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2162                  adapter->num_tx_qs);
2163         return 0;
2164 }
2165
2166 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2167 {
2168         struct be_queue_info *q;
2169         struct be_rx_obj *rxo;
2170         int i;
2171
2172         for_all_rx_queues(adapter, rxo, i) {
2173                 q = &rxo->cq;
2174                 if (q->created)
2175                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2176                 be_queue_free(adapter, q);
2177         }
2178 }
2179
2180 static int be_rx_cqs_create(struct be_adapter *adapter)
2181 {
2182         struct be_queue_info *eq, *cq;
2183         struct be_rx_obj *rxo;
2184         int rc, i;
2185
2186         /* We can create as many RSS rings as there are EQs. */
2187         adapter->num_rx_qs = adapter->num_evt_qs;
2188
2189         /* We'll use RSS only if atleast 2 RSS rings are supported.
2190          * When RSS is used, we'll need a default RXQ for non-IP traffic.
2191          */
2192         if (adapter->num_rx_qs > 1)
2193                 adapter->num_rx_qs++;
2194
2195         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2196         for_all_rx_queues(adapter, rxo, i) {
2197                 rxo->adapter = adapter;
2198                 cq = &rxo->cq;
2199                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2200                                 sizeof(struct be_eth_rx_compl));
2201                 if (rc)
2202                         return rc;
2203
2204                 u64_stats_init(&rxo->stats.sync);
2205                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2206                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2207                 if (rc)
2208                         return rc;
2209         }
2210
2211         dev_info(&adapter->pdev->dev,
2212                  "created %d RSS queue(s) and 1 default RX queue\n",
2213                  adapter->num_rx_qs - 1);
2214         return 0;
2215 }
2216
2217 static irqreturn_t be_intx(int irq, void *dev)
2218 {
2219         struct be_eq_obj *eqo = dev;
2220         struct be_adapter *adapter = eqo->adapter;
2221         int num_evts = 0;
2222
2223         /* IRQ is not expected when NAPI is scheduled as the EQ
2224          * will not be armed.
2225          * But, this can happen on Lancer INTx where it takes
2226          * a while to de-assert INTx or in BE2 where occasionaly
2227          * an interrupt may be raised even when EQ is unarmed.
2228          * If NAPI is already scheduled, then counting & notifying
2229          * events will orphan them.
2230          */
2231         if (napi_schedule_prep(&eqo->napi)) {
2232                 num_evts = events_get(eqo);
2233                 __napi_schedule(&eqo->napi);
2234                 if (num_evts)
2235                         eqo->spurious_intr = 0;
2236         }
2237         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2238
2239         /* Return IRQ_HANDLED only for the the first spurious intr
2240          * after a valid intr to stop the kernel from branding
2241          * this irq as a bad one!
2242          */
2243         if (num_evts || eqo->spurious_intr++ == 0)
2244                 return IRQ_HANDLED;
2245         else
2246                 return IRQ_NONE;
2247 }
2248
2249 static irqreturn_t be_msix(int irq, void *dev)
2250 {
2251         struct be_eq_obj *eqo = dev;
2252
2253         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2254         napi_schedule(&eqo->napi);
2255         return IRQ_HANDLED;
2256 }
2257
2258 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2259 {
2260         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2261 }
2262
2263 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2264                         int budget, int polling)
2265 {
2266         struct be_adapter *adapter = rxo->adapter;
2267         struct be_queue_info *rx_cq = &rxo->cq;
2268         struct be_rx_compl_info *rxcp;
2269         u32 work_done;
2270
2271         for (work_done = 0; work_done < budget; work_done++) {
2272                 rxcp = be_rx_compl_get(rxo);
2273                 if (!rxcp)
2274                         break;
2275
2276                 /* Is it a flush compl that has no data */
2277                 if (unlikely(rxcp->num_rcvd == 0))
2278                         goto loop_continue;
2279
2280                 /* Discard compl with partial DMA Lancer B0 */
2281                 if (unlikely(!rxcp->pkt_size)) {
2282                         be_rx_compl_discard(rxo, rxcp);
2283                         goto loop_continue;
2284                 }
2285
2286                 /* On BE drop pkts that arrive due to imperfect filtering in
2287                  * promiscuous mode on some skews
2288                  */
2289                 if (unlikely(rxcp->port != adapter->port_num &&
2290                                 !lancer_chip(adapter))) {
2291                         be_rx_compl_discard(rxo, rxcp);
2292                         goto loop_continue;
2293                 }
2294
2295                 /* Don't do gro when we're busy_polling */
2296                 if (do_gro(rxcp) && polling != BUSY_POLLING)
2297                         be_rx_compl_process_gro(rxo, napi, rxcp);
2298                 else
2299                         be_rx_compl_process(rxo, napi, rxcp);
2300
2301 loop_continue:
2302                 be_rx_stats_update(rxo, rxcp);
2303         }
2304
2305         if (work_done) {
2306                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2307
2308                 /* When an rx-obj gets into post_starved state, just
2309                  * let be_worker do the posting.
2310                  */
2311                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2312                     !rxo->rx_post_starved)
2313                         be_post_rx_frags(rxo, GFP_ATOMIC);
2314         }
2315
2316         return work_done;
2317 }
2318
2319 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2320                           int budget, int idx)
2321 {
2322         struct be_eth_tx_compl *txcp;
2323         int num_wrbs = 0, work_done;
2324
2325         for (work_done = 0; work_done < budget; work_done++) {
2326                 txcp = be_tx_compl_get(&txo->cq);
2327                 if (!txcp)
2328                         break;
2329                 num_wrbs += be_tx_compl_process(adapter, txo,
2330                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2331                                         wrb_index, txcp));
2332         }
2333
2334         if (work_done) {
2335                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2336                 atomic_sub(num_wrbs, &txo->q.used);
2337
2338                 /* As Tx wrbs have been freed up, wake up netdev queue
2339                  * if it was stopped due to lack of tx wrbs.  */
2340                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2341                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2342                         netif_wake_subqueue(adapter->netdev, idx);
2343                 }
2344
2345                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2346                 tx_stats(txo)->tx_compl += work_done;
2347                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2348         }
2349         return (work_done < budget); /* Done */
2350 }
2351
2352 int be_poll(struct napi_struct *napi, int budget)
2353 {
2354         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2355         struct be_adapter *adapter = eqo->adapter;
2356         int max_work = 0, work, i, num_evts;
2357         struct be_rx_obj *rxo;
2358         bool tx_done;
2359
2360         num_evts = events_get(eqo);
2361
2362         /* Process all TXQs serviced by this EQ */
2363         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2364                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2365                                         eqo->tx_budget, i);
2366                 if (!tx_done)
2367                         max_work = budget;
2368         }
2369
2370         if (be_lock_napi(eqo)) {
2371                 /* This loop will iterate twice for EQ0 in which
2372                  * completions of the last RXQ (default one) are also processed
2373                  * For other EQs the loop iterates only once
2374                  */
2375                 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2376                         work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2377                         max_work = max(work, max_work);
2378                 }
2379                 be_unlock_napi(eqo);
2380         } else {
2381                 max_work = budget;
2382         }
2383
2384         if (is_mcc_eqo(eqo))
2385                 be_process_mcc(adapter);
2386
2387         if (max_work < budget) {
2388                 napi_complete(napi);
2389                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2390         } else {
2391                 /* As we'll continue in polling mode, count and clear events */
2392                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2393         }
2394         return max_work;
2395 }
2396
2397 #ifdef CONFIG_NET_RX_BUSY_POLL
2398 static int be_busy_poll(struct napi_struct *napi)
2399 {
2400         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2401         struct be_adapter *adapter = eqo->adapter;
2402         struct be_rx_obj *rxo;
2403         int i, work = 0;
2404
2405         if (!be_lock_busy_poll(eqo))
2406                 return LL_FLUSH_BUSY;
2407
2408         for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2409                 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2410                 if (work)
2411                         break;
2412         }
2413
2414         be_unlock_busy_poll(eqo);
2415         return work;
2416 }
2417 #endif
2418
2419 void be_detect_error(struct be_adapter *adapter)
2420 {
2421         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2422         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2423         u32 i;
2424
2425         if (be_hw_error(adapter))
2426                 return;
2427
2428         if (lancer_chip(adapter)) {
2429                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2430                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2431                         sliport_err1 = ioread32(adapter->db +
2432                                         SLIPORT_ERROR1_OFFSET);
2433                         sliport_err2 = ioread32(adapter->db +
2434                                         SLIPORT_ERROR2_OFFSET);
2435                 }
2436         } else {
2437                 pci_read_config_dword(adapter->pdev,
2438                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2439                 pci_read_config_dword(adapter->pdev,
2440                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2441                 pci_read_config_dword(adapter->pdev,
2442                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2443                 pci_read_config_dword(adapter->pdev,
2444                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2445
2446                 ue_lo = (ue_lo & ~ue_lo_mask);
2447                 ue_hi = (ue_hi & ~ue_hi_mask);
2448         }
2449
2450         /* On certain platforms BE hardware can indicate spurious UEs.
2451          * Allow the h/w to stop working completely in case of a real UE.
2452          * Hence not setting the hw_error for UE detection.
2453          */
2454         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2455                 adapter->hw_error = true;
2456                 /* Do not log error messages if its a FW reset */
2457                 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2458                     sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2459                         dev_info(&adapter->pdev->dev,
2460                                  "Firmware update in progress\n");
2461                         return;
2462                 } else {
2463                         dev_err(&adapter->pdev->dev,
2464                                 "Error detected in the card\n");
2465                 }
2466         }
2467
2468         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2469                 dev_err(&adapter->pdev->dev,
2470                         "ERR: sliport status 0x%x\n", sliport_status);
2471                 dev_err(&adapter->pdev->dev,
2472                         "ERR: sliport error1 0x%x\n", sliport_err1);
2473                 dev_err(&adapter->pdev->dev,
2474                         "ERR: sliport error2 0x%x\n", sliport_err2);
2475         }
2476
2477         if (ue_lo) {
2478                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2479                         if (ue_lo & 1)
2480                                 dev_err(&adapter->pdev->dev,
2481                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2482                 }
2483         }
2484
2485         if (ue_hi) {
2486                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2487                         if (ue_hi & 1)
2488                                 dev_err(&adapter->pdev->dev,
2489                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2490                 }
2491         }
2492
2493 }
2494
2495 static void be_msix_disable(struct be_adapter *adapter)
2496 {
2497         if (msix_enabled(adapter)) {
2498                 pci_disable_msix(adapter->pdev);
2499                 adapter->num_msix_vec = 0;
2500                 adapter->num_msix_roce_vec = 0;
2501         }
2502 }
2503
2504 static int be_msix_enable(struct be_adapter *adapter)
2505 {
2506         int i, status, num_vec;
2507         struct device *dev = &adapter->pdev->dev;
2508
2509         /* If RoCE is supported, program the max number of NIC vectors that
2510          * may be configured via set-channels, along with vectors needed for
2511          * RoCe. Else, just program the number we'll use initially.
2512          */
2513         if (be_roce_supported(adapter))
2514                 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2515                                 2 * num_online_cpus());
2516         else
2517                 num_vec = adapter->cfg_num_qs;
2518
2519         for (i = 0; i < num_vec; i++)
2520                 adapter->msix_entries[i].entry = i;
2521
2522         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2523         if (status == 0) {
2524                 goto done;
2525         } else if (status >= MIN_MSIX_VECTORS) {
2526                 num_vec = status;
2527                 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2528                                          num_vec);
2529                 if (!status)
2530                         goto done;
2531         }
2532
2533         dev_warn(dev, "MSIx enable failed\n");
2534
2535         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2536         if (!be_physfn(adapter))
2537                 return status;
2538         return 0;
2539 done:
2540         if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2541                 adapter->num_msix_roce_vec = num_vec / 2;
2542                 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2543                          adapter->num_msix_roce_vec);
2544         }
2545
2546         adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2547
2548         dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2549                  adapter->num_msix_vec);
2550         return 0;
2551 }
2552
2553 static inline int be_msix_vec_get(struct be_adapter *adapter,
2554                                 struct be_eq_obj *eqo)
2555 {
2556         return adapter->msix_entries[eqo->msix_idx].vector;
2557 }
2558
2559 static int be_msix_register(struct be_adapter *adapter)
2560 {
2561         struct net_device *netdev = adapter->netdev;
2562         struct be_eq_obj *eqo;
2563         int status, i, vec;
2564
2565         for_all_evt_queues(adapter, eqo, i) {
2566                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2567                 vec = be_msix_vec_get(adapter, eqo);
2568                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2569                 if (status)
2570                         goto err_msix;
2571         }
2572
2573         return 0;
2574 err_msix:
2575         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2576                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2577         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2578                 status);
2579         be_msix_disable(adapter);
2580         return status;
2581 }
2582
2583 static int be_irq_register(struct be_adapter *adapter)
2584 {
2585         struct net_device *netdev = adapter->netdev;
2586         int status;
2587
2588         if (msix_enabled(adapter)) {
2589                 status = be_msix_register(adapter);
2590                 if (status == 0)
2591                         goto done;
2592                 /* INTx is not supported for VF */
2593                 if (!be_physfn(adapter))
2594                         return status;
2595         }
2596
2597         /* INTx: only the first EQ is used */
2598         netdev->irq = adapter->pdev->irq;
2599         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2600                              &adapter->eq_obj[0]);
2601         if (status) {
2602                 dev_err(&adapter->pdev->dev,
2603                         "INTx request IRQ failed - err %d\n", status);
2604                 return status;
2605         }
2606 done:
2607         adapter->isr_registered = true;
2608         return 0;
2609 }
2610
2611 static void be_irq_unregister(struct be_adapter *adapter)
2612 {
2613         struct net_device *netdev = adapter->netdev;
2614         struct be_eq_obj *eqo;
2615         int i;
2616
2617         if (!adapter->isr_registered)
2618                 return;
2619
2620         /* INTx */
2621         if (!msix_enabled(adapter)) {
2622                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2623                 goto done;
2624         }
2625
2626         /* MSIx */
2627         for_all_evt_queues(adapter, eqo, i)
2628                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2629
2630 done:
2631         adapter->isr_registered = false;
2632 }
2633
2634 static void be_rx_qs_destroy(struct be_adapter *adapter)
2635 {
2636         struct be_queue_info *q;
2637         struct be_rx_obj *rxo;
2638         int i;
2639
2640         for_all_rx_queues(adapter, rxo, i) {
2641                 q = &rxo->q;
2642                 if (q->created) {
2643                         be_cmd_rxq_destroy(adapter, q);
2644                         be_rx_cq_clean(rxo);
2645                 }
2646                 be_queue_free(adapter, q);
2647         }
2648 }
2649
2650 static int be_close(struct net_device *netdev)
2651 {
2652         struct be_adapter *adapter = netdev_priv(netdev);
2653         struct be_eq_obj *eqo;
2654         int i;
2655
2656         be_roce_dev_close(adapter);
2657
2658         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2659                 for_all_evt_queues(adapter, eqo, i) {
2660                         napi_disable(&eqo->napi);
2661                         be_disable_busy_poll(eqo);
2662                 }
2663                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2664         }
2665
2666         be_async_mcc_disable(adapter);
2667
2668         /* Wait for all pending tx completions to arrive so that
2669          * all tx skbs are freed.
2670          */
2671         netif_tx_disable(netdev);
2672         be_tx_compl_clean(adapter);
2673
2674         be_rx_qs_destroy(adapter);
2675
2676         for (i = 1; i < (adapter->uc_macs + 1); i++)
2677                 be_cmd_pmac_del(adapter, adapter->if_handle,
2678                                 adapter->pmac_id[i], 0);
2679         adapter->uc_macs = 0;
2680
2681         for_all_evt_queues(adapter, eqo, i) {
2682                 if (msix_enabled(adapter))
2683                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2684                 else
2685                         synchronize_irq(netdev->irq);
2686                 be_eq_clean(eqo);
2687         }
2688
2689         be_irq_unregister(adapter);
2690
2691         return 0;
2692 }
2693
2694 static int be_rx_qs_create(struct be_adapter *adapter)
2695 {
2696         struct be_rx_obj *rxo;
2697         int rc, i, j;
2698         u8 rsstable[128];
2699
2700         for_all_rx_queues(adapter, rxo, i) {
2701                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2702                                     sizeof(struct be_eth_rx_d));
2703                 if (rc)
2704                         return rc;
2705         }
2706
2707         /* The FW would like the default RXQ to be created first */
2708         rxo = default_rxo(adapter);
2709         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2710                                adapter->if_handle, false, &rxo->rss_id);
2711         if (rc)
2712                 return rc;
2713
2714         for_all_rss_queues(adapter, rxo, i) {
2715                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2716                                        rx_frag_size, adapter->if_handle,
2717                                        true, &rxo->rss_id);
2718                 if (rc)
2719                         return rc;
2720         }
2721
2722         if (be_multi_rxq(adapter)) {
2723                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2724                         for_all_rss_queues(adapter, rxo, i) {
2725                                 if ((j + i) >= 128)
2726                                         break;
2727                                 rsstable[j + i] = rxo->rss_id;
2728                         }
2729                 }
2730                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2731                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2732
2733                 if (!BEx_chip(adapter))
2734                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2735                                                 RSS_ENABLE_UDP_IPV6;
2736         } else {
2737                 /* Disable RSS, if only default RX Q is created */
2738                 adapter->rss_flags = RSS_ENABLE_NONE;
2739         }
2740
2741         rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2742                                128);
2743         if (rc) {
2744                 adapter->rss_flags = RSS_ENABLE_NONE;
2745                 return rc;
2746         }
2747
2748         /* First time posting */
2749         for_all_rx_queues(adapter, rxo, i)
2750                 be_post_rx_frags(rxo, GFP_KERNEL);
2751         return 0;
2752 }
2753
2754 static int be_open(struct net_device *netdev)
2755 {
2756         struct be_adapter *adapter = netdev_priv(netdev);
2757         struct be_eq_obj *eqo;
2758         struct be_rx_obj *rxo;
2759         struct be_tx_obj *txo;
2760         u8 link_status;
2761         int status, i;
2762
2763         status = be_rx_qs_create(adapter);
2764         if (status)
2765                 goto err;
2766
2767         status = be_irq_register(adapter);
2768         if (status)
2769                 goto err;
2770
2771         for_all_rx_queues(adapter, rxo, i)
2772                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2773
2774         for_all_tx_queues(adapter, txo, i)
2775                 be_cq_notify(adapter, txo->cq.id, true, 0);
2776
2777         be_async_mcc_enable(adapter);
2778
2779         for_all_evt_queues(adapter, eqo, i) {
2780                 napi_enable(&eqo->napi);
2781                 be_enable_busy_poll(eqo);
2782                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2783         }
2784         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2785
2786         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2787         if (!status)
2788                 be_link_status_update(adapter, link_status);
2789
2790         netif_tx_start_all_queues(netdev);
2791         be_roce_dev_open(adapter);
2792         return 0;
2793 err:
2794         be_close(adapter->netdev);
2795         return -EIO;
2796 }
2797
2798 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2799 {
2800         struct be_dma_mem cmd;
2801         int status = 0;
2802         u8 mac[ETH_ALEN];
2803
2804         memset(mac, 0, ETH_ALEN);
2805
2806         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2807         cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2808                                      GFP_KERNEL);
2809         if (cmd.va == NULL)
2810                 return -1;
2811
2812         if (enable) {
2813                 status = pci_write_config_dword(adapter->pdev,
2814                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2815                 if (status) {
2816                         dev_err(&adapter->pdev->dev,
2817                                 "Could not enable Wake-on-lan\n");
2818                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2819                                           cmd.dma);
2820                         return status;
2821                 }
2822                 status = be_cmd_enable_magic_wol(adapter,
2823                                 adapter->netdev->dev_addr, &cmd);
2824                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2825                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2826         } else {
2827                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2828                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2829                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2830         }
2831
2832         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2833         return status;
2834 }
2835
2836 /*
2837  * Generate a seed MAC address from the PF MAC Address using jhash.
2838  * MAC Address for VFs are assigned incrementally starting from the seed.
2839  * These addresses are programmed in the ASIC by the PF and the VF driver
2840  * queries for the MAC address during its probe.
2841  */
2842 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2843 {
2844         u32 vf;
2845         int status = 0;
2846         u8 mac[ETH_ALEN];
2847         struct be_vf_cfg *vf_cfg;
2848
2849         be_vf_eth_addr_generate(adapter, mac);
2850
2851         for_all_vfs(adapter, vf_cfg, vf) {
2852                 if (BEx_chip(adapter))
2853                         status = be_cmd_pmac_add(adapter, mac,
2854                                                  vf_cfg->if_handle,
2855                                                  &vf_cfg->pmac_id, vf + 1);
2856                 else
2857                         status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2858                                                 vf + 1);
2859
2860                 if (status)
2861                         dev_err(&adapter->pdev->dev,
2862                         "Mac address assignment failed for VF %d\n", vf);
2863                 else
2864                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2865
2866                 mac[5] += 1;
2867         }
2868         return status;
2869 }
2870
2871 static int be_vfs_mac_query(struct be_adapter *adapter)
2872 {
2873         int status, vf;
2874         u8 mac[ETH_ALEN];
2875         struct be_vf_cfg *vf_cfg;
2876
2877         for_all_vfs(adapter, vf_cfg, vf) {
2878                 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2879                                                mac, vf_cfg->if_handle,
2880                                                false, vf+1);
2881                 if (status)
2882                         return status;
2883                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2884         }
2885         return 0;
2886 }
2887
2888 static void be_vf_clear(struct be_adapter *adapter)
2889 {
2890         struct be_vf_cfg *vf_cfg;
2891         u32 vf;
2892
2893         if (pci_vfs_assigned(adapter->pdev)) {
2894                 dev_warn(&adapter->pdev->dev,
2895                          "VFs are assigned to VMs: not disabling VFs\n");
2896                 goto done;
2897         }
2898
2899         pci_disable_sriov(adapter->pdev);
2900
2901         for_all_vfs(adapter, vf_cfg, vf) {
2902                 if (BEx_chip(adapter))
2903                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2904                                         vf_cfg->pmac_id, vf + 1);
2905                 else
2906                         be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2907                                        vf + 1);
2908
2909                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2910         }
2911 done:
2912         kfree(adapter->vf_cfg);
2913         adapter->num_vfs = 0;
2914 }
2915
2916 static void be_clear_queues(struct be_adapter *adapter)
2917 {
2918         be_mcc_queues_destroy(adapter);
2919         be_rx_cqs_destroy(adapter);
2920         be_tx_queues_destroy(adapter);
2921         be_evt_queues_destroy(adapter);
2922 }
2923
2924 static void be_cancel_worker(struct be_adapter *adapter)
2925 {
2926         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2927                 cancel_delayed_work_sync(&adapter->work);
2928                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2929         }
2930 }
2931
2932 static void be_mac_clear(struct be_adapter *adapter)
2933 {
2934         int i;
2935
2936         if (adapter->pmac_id) {
2937                 for (i = 0; i < (adapter->uc_macs + 1); i++)
2938                         be_cmd_pmac_del(adapter, adapter->if_handle,
2939                                         adapter->pmac_id[i], 0);
2940                 adapter->uc_macs = 0;
2941
2942                 kfree(adapter->pmac_id);
2943                 adapter->pmac_id = NULL;
2944         }
2945 }
2946
2947 static int be_clear(struct be_adapter *adapter)
2948 {
2949         be_cancel_worker(adapter);
2950
2951         if (sriov_enabled(adapter))
2952                 be_vf_clear(adapter);
2953
2954         /* delete the primary mac along with the uc-mac list */
2955         be_mac_clear(adapter);
2956
2957         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2958
2959         be_clear_queues(adapter);
2960
2961         be_msix_disable(adapter);
2962         return 0;
2963 }
2964
2965 static int be_vfs_if_create(struct be_adapter *adapter)
2966 {
2967         struct be_resources res = {0};
2968         struct be_vf_cfg *vf_cfg;
2969         u32 cap_flags, en_flags, vf;
2970         int status = 0;
2971
2972         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2973                     BE_IF_FLAGS_MULTICAST;
2974
2975         for_all_vfs(adapter, vf_cfg, vf) {
2976                 if (!BE3_chip(adapter)) {
2977                         status = be_cmd_get_profile_config(adapter, &res,
2978                                                            vf + 1);
2979                         if (!status)
2980                                 cap_flags = res.if_cap_flags;
2981                 }
2982
2983                 /* If a FW profile exists, then cap_flags are updated */
2984                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2985                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2986                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2987                                           &vf_cfg->if_handle, vf + 1);
2988                 if (status)
2989                         goto err;
2990         }
2991 err:
2992         return status;
2993 }
2994
2995 static int be_vf_setup_init(struct be_adapter *adapter)
2996 {
2997         struct be_vf_cfg *vf_cfg;
2998         int vf;
2999
3000         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3001                                   GFP_KERNEL);
3002         if (!adapter->vf_cfg)
3003                 return -ENOMEM;
3004
3005         for_all_vfs(adapter, vf_cfg, vf) {
3006                 vf_cfg->if_handle = -1;
3007                 vf_cfg->pmac_id = -1;
3008         }
3009         return 0;
3010 }
3011
3012 static int be_vf_setup(struct be_adapter *adapter)
3013 {
3014         struct be_vf_cfg *vf_cfg;
3015         u16 def_vlan, lnk_speed;
3016         int status, old_vfs, vf;
3017         struct device *dev = &adapter->pdev->dev;
3018         u32 privileges;
3019
3020         old_vfs = pci_num_vf(adapter->pdev);
3021         if (old_vfs) {
3022                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3023                 if (old_vfs != num_vfs)
3024                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3025                 adapter->num_vfs = old_vfs;
3026         } else {
3027                 if (num_vfs > be_max_vfs(adapter))
3028                         dev_info(dev, "Device supports %d VFs and not %d\n",
3029                                  be_max_vfs(adapter), num_vfs);
3030                 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3031                 if (!adapter->num_vfs)
3032                         return 0;
3033         }
3034
3035         status = be_vf_setup_init(adapter);
3036         if (status)
3037                 goto err;
3038
3039         if (old_vfs) {
3040                 for_all_vfs(adapter, vf_cfg, vf) {
3041                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3042                         if (status)
3043                                 goto err;
3044                 }
3045         } else {
3046                 status = be_vfs_if_create(adapter);
3047                 if (status)
3048                         goto err;
3049         }
3050
3051         if (old_vfs) {
3052                 status = be_vfs_mac_query(adapter);
3053                 if (status)
3054                         goto err;
3055         } else {
3056                 status = be_vf_eth_addr_config(adapter);
3057                 if (status)
3058                         goto err;
3059         }
3060
3061         for_all_vfs(adapter, vf_cfg, vf) {
3062                 /* Allow VFs to programs MAC/VLAN filters */
3063                 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3064                 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3065                         status = be_cmd_set_fn_privileges(adapter,
3066                                                           privileges |
3067                                                           BE_PRIV_FILTMGMT,
3068                                                           vf + 1);
3069                         if (!status)
3070                                 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3071                                          vf);
3072                 }
3073
3074                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3075                  * Allow full available bandwidth
3076                  */
3077                 if (BE3_chip(adapter) && !old_vfs)
3078                         be_cmd_set_qos(adapter, 1000, vf+1);
3079
3080                 status = be_cmd_link_status_query(adapter, &lnk_speed,
3081                                                   NULL, vf + 1);
3082                 if (!status)
3083                         vf_cfg->tx_rate = lnk_speed;
3084
3085                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
3086                                                vf + 1, vf_cfg->if_handle, NULL);
3087                 if (status)
3088                         goto err;
3089                 vf_cfg->def_vid = def_vlan;
3090
3091                 if (!old_vfs)
3092                         be_cmd_enable_vf(adapter, vf + 1);
3093         }
3094
3095         if (!old_vfs) {
3096                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3097                 if (status) {
3098                         dev_err(dev, "SRIOV enable failed\n");
3099                         adapter->num_vfs = 0;
3100                         goto err;
3101                 }
3102         }
3103         return 0;
3104 err:
3105         dev_err(dev, "VF setup failed\n");
3106         be_vf_clear(adapter);
3107         return status;
3108 }
3109
3110 /* On BE2/BE3 FW does not suggest the supported limits */
3111 static void BEx_get_resources(struct be_adapter *adapter,
3112                               struct be_resources *res)
3113 {
3114         struct pci_dev *pdev = adapter->pdev;
3115         bool use_sriov = false;
3116         int max_vfs;
3117
3118         max_vfs = pci_sriov_get_totalvfs(pdev);
3119
3120         if (BE3_chip(adapter) && sriov_want(adapter)) {
3121                 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3122                 use_sriov = res->max_vfs;
3123         }
3124
3125         if (be_physfn(adapter))
3126                 res->max_uc_mac = BE_UC_PMAC_COUNT;
3127         else
3128                 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3129
3130         if (adapter->function_mode & FLEX10_MODE)
3131                 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3132         else if (adapter->function_mode & UMC_ENABLED)
3133                 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
3134         else
3135                 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3136         res->max_mcast_mac = BE_MAX_MC;
3137
3138         /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
3139         if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
3140             !be_physfn(adapter) || (adapter->port_num > 1))
3141                 res->max_tx_qs = 1;
3142         else
3143                 res->max_tx_qs = BE3_MAX_TX_QS;
3144
3145         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3146             !use_sriov && be_physfn(adapter))
3147                 res->max_rss_qs = (adapter->be3_native) ?
3148                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3149         res->max_rx_qs = res->max_rss_qs + 1;
3150
3151         if (be_physfn(adapter))
3152                 res->max_evt_qs = (max_vfs > 0) ?
3153                                         BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3154         else
3155                 res->max_evt_qs = 1;
3156
3157         res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3158         if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3159                 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3160 }
3161
3162 static void be_setup_init(struct be_adapter *adapter)
3163 {
3164         adapter->vlan_prio_bmap = 0xff;
3165         adapter->phy.link_speed = -1;
3166         adapter->if_handle = -1;
3167         adapter->be3_native = false;
3168         adapter->promiscuous = false;
3169         if (be_physfn(adapter))
3170                 adapter->cmd_privileges = MAX_PRIVILEGES;
3171         else
3172                 adapter->cmd_privileges = MIN_PRIVILEGES;
3173 }
3174
3175 static int be_get_resources(struct be_adapter *adapter)
3176 {
3177         struct device *dev = &adapter->pdev->dev;
3178         struct be_resources res = {0};
3179         int status;
3180
3181         if (BEx_chip(adapter)) {
3182                 BEx_get_resources(adapter, &res);
3183                 adapter->res = res;
3184         }
3185
3186         /* For Lancer, SH etc read per-function resource limits from FW.
3187          * GET_FUNC_CONFIG returns per function guaranteed limits.
3188          * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3189          */
3190         if (!BEx_chip(adapter)) {
3191                 status = be_cmd_get_func_config(adapter, &res);
3192                 if (status)
3193                         return status;
3194
3195                 /* If RoCE may be enabled stash away half the EQs for RoCE */
3196                 if (be_roce_supported(adapter))
3197                         res.max_evt_qs /= 2;
3198                 adapter->res = res;
3199
3200                 if (be_physfn(adapter)) {
3201                         status = be_cmd_get_profile_config(adapter, &res, 0);
3202                         if (status)
3203                                 return status;
3204                         adapter->res.max_vfs = res.max_vfs;
3205                 }
3206
3207                 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3208                          be_max_txqs(adapter), be_max_rxqs(adapter),
3209                          be_max_rss(adapter), be_max_eqs(adapter),
3210                          be_max_vfs(adapter));
3211                 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3212                          be_max_uc(adapter), be_max_mc(adapter),
3213                          be_max_vlans(adapter));
3214         }
3215
3216         return 0;
3217 }
3218
3219 /* Routine to query per function resource limits */
3220 static int be_get_config(struct be_adapter *adapter)
3221 {
3222         u16 profile_id;
3223         int status;
3224
3225         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3226                                      &adapter->function_mode,
3227                                      &adapter->function_caps,
3228                                      &adapter->asic_rev);
3229         if (status)
3230                 return status;
3231
3232          if (be_physfn(adapter)) {
3233                 status = be_cmd_get_active_profile(adapter, &profile_id);
3234                 if (!status)
3235                         dev_info(&adapter->pdev->dev,
3236                                  "Using profile 0x%x\n", profile_id);
3237         }
3238
3239         status = be_get_resources(adapter);
3240         if (status)
3241                 return status;
3242
3243         /* primary mac needs 1 pmac entry */
3244         adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3245                                    GFP_KERNEL);
3246         if (!adapter->pmac_id)
3247                 return -ENOMEM;
3248
3249         /* Sanitize cfg_num_qs based on HW and platform limits */
3250         adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3251
3252         return 0;
3253 }
3254
3255 static int be_mac_setup(struct be_adapter *adapter)
3256 {
3257         u8 mac[ETH_ALEN];
3258         int status;
3259
3260         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3261                 status = be_cmd_get_perm_mac(adapter, mac);
3262                 if (status)
3263                         return status;
3264
3265                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3266                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3267         } else {
3268                 /* Maybe the HW was reset; dev_addr must be re-programmed */
3269                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3270         }
3271
3272         /* For BE3-R VFs, the PF programs the initial MAC address */
3273         if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3274                 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3275                                 &adapter->pmac_id[0], 0);
3276         return 0;
3277 }
3278
3279 static void be_schedule_worker(struct be_adapter *adapter)
3280 {
3281         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3282         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3283 }
3284
3285 static int be_setup_queues(struct be_adapter *adapter)
3286 {
3287         struct net_device *netdev = adapter->netdev;
3288         int status;
3289
3290         status = be_evt_queues_create(adapter);
3291         if (status)
3292                 goto err;
3293
3294         status = be_tx_qs_create(adapter);
3295         if (status)
3296                 goto err;
3297
3298         status = be_rx_cqs_create(adapter);
3299         if (status)
3300                 goto err;
3301
3302         status = be_mcc_queues_create(adapter);
3303         if (status)
3304                 goto err;
3305
3306         status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3307         if (status)
3308                 goto err;
3309
3310         status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3311         if (status)
3312                 goto err;
3313
3314         return 0;
3315 err:
3316         dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3317         return status;
3318 }
3319
3320 int be_update_queues(struct be_adapter *adapter)
3321 {
3322         struct net_device *netdev = adapter->netdev;
3323         int status;
3324
3325         if (netif_running(netdev))
3326                 be_close(netdev);
3327
3328         be_cancel_worker(adapter);
3329
3330         /* If any vectors have been shared with RoCE we cannot re-program
3331          * the MSIx table.
3332          */
3333         if (!adapter->num_msix_roce_vec)
3334                 be_msix_disable(adapter);
3335
3336         be_clear_queues(adapter);
3337
3338         if (!msix_enabled(adapter)) {
3339                 status = be_msix_enable(adapter);
3340                 if (status)
3341                         return status;
3342         }
3343
3344         status = be_setup_queues(adapter);
3345         if (status)
3346                 return status;
3347
3348         be_schedule_worker(adapter);
3349
3350         if (netif_running(netdev))
3351                 status = be_open(netdev);
3352
3353         return status;
3354 }
3355
3356 static int be_setup(struct be_adapter *adapter)
3357 {
3358         struct device *dev = &adapter->pdev->dev;
3359         u32 tx_fc, rx_fc, en_flags;
3360         int status;
3361
3362         be_setup_init(adapter);
3363
3364         if (!lancer_chip(adapter))
3365                 be_cmd_req_native_mode(adapter);
3366
3367         status = be_get_config(adapter);
3368         if (status)
3369                 goto err;
3370
3371         status = be_msix_enable(adapter);
3372         if (status)
3373                 goto err;
3374
3375         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3376                    BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3377         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3378                 en_flags |= BE_IF_FLAGS_RSS;
3379         en_flags = en_flags & be_if_cap_flags(adapter);
3380         status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3381                                   &adapter->if_handle, 0);
3382         if (status)
3383                 goto err;
3384
3385         /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3386         rtnl_lock();
3387         status = be_setup_queues(adapter);
3388         rtnl_unlock();
3389         if (status)
3390                 goto err;
3391
3392         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3393
3394         status = be_mac_setup(adapter);
3395         if (status)
3396                 goto err;
3397
3398         be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3399
3400         if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3401                 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3402                         adapter->fw_ver);
3403                 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3404         }
3405
3406         if (adapter->vlans_added)
3407                 be_vid_config(adapter);
3408
3409         be_set_rx_mode(adapter->netdev);
3410
3411         be_cmd_get_acpi_wol_cap(adapter);
3412
3413         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3414
3415         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3416                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3417                                         adapter->rx_fc);
3418
3419         if (sriov_want(adapter)) {
3420                 if (be_max_vfs(adapter))
3421                         be_vf_setup(adapter);
3422                 else
3423                         dev_warn(dev, "device doesn't support SRIOV\n");
3424         }
3425
3426         status = be_cmd_get_phy_info(adapter);
3427         if (!status && be_pause_supported(adapter))
3428                 adapter->phy.fc_autoneg = 1;
3429
3430         be_schedule_worker(adapter);
3431         return 0;
3432 err:
3433         be_clear(adapter);
3434         return status;
3435 }
3436
3437 #ifdef CONFIG_NET_POLL_CONTROLLER
3438 static void be_netpoll(struct net_device *netdev)
3439 {
3440         struct be_adapter *adapter = netdev_priv(netdev);
3441         struct be_eq_obj *eqo;
3442         int i;
3443
3444         for_all_evt_queues(adapter, eqo, i) {
3445                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3446                 napi_schedule(&eqo->napi);
3447         }
3448
3449         return;
3450 }
3451 #endif
3452
3453 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3454 static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3455
3456 static bool be_flash_redboot(struct be_adapter *adapter,
3457                         const u8 *p, u32 img_start, int image_size,
3458                         int hdr_size)
3459 {
3460         u32 crc_offset;
3461         u8 flashed_crc[4];
3462         int status;
3463
3464         crc_offset = hdr_size + img_start + image_size - 4;
3465
3466         p += crc_offset;
3467
3468         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3469                         (image_size - 4));
3470         if (status) {
3471                 dev_err(&adapter->pdev->dev,
3472                 "could not get crc from flash, not flashing redboot\n");
3473                 return false;
3474         }
3475
3476         /*update redboot only if crc does not match*/
3477         if (!memcmp(flashed_crc, p, 4))
3478                 return false;
3479         else
3480                 return true;
3481 }
3482
3483 static bool phy_flashing_required(struct be_adapter *adapter)
3484 {
3485         return (adapter->phy.phy_type == TN_8022 &&
3486                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3487 }
3488
3489 static bool is_comp_in_ufi(struct be_adapter *adapter,
3490                            struct flash_section_info *fsec, int type)
3491 {
3492         int i = 0, img_type = 0;
3493         struct flash_section_info_g2 *fsec_g2 = NULL;
3494
3495         if (BE2_chip(adapter))
3496                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3497
3498         for (i = 0; i < MAX_FLASH_COMP; i++) {
3499                 if (fsec_g2)
3500                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3501                 else
3502                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3503
3504                 if (img_type == type)
3505                         return true;
3506         }
3507         return false;
3508
3509 }
3510
3511 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3512                                          int header_size,
3513                                          const struct firmware *fw)
3514 {
3515         struct flash_section_info *fsec = NULL;
3516         const u8 *p = fw->data;
3517
3518         p += header_size;
3519         while (p < (fw->data + fw->size)) {
3520                 fsec = (struct flash_section_info *)p;
3521                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3522                         return fsec;
3523                 p += 32;
3524         }
3525         return NULL;
3526 }
3527
3528 static int be_flash(struct be_adapter *adapter, const u8 *img,
3529                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3530 {
3531         u32 total_bytes = 0, flash_op, num_bytes = 0;
3532         int status = 0;
3533         struct be_cmd_write_flashrom *req = flash_cmd->va;
3534
3535         total_bytes = img_size;
3536         while (total_bytes) {
3537                 num_bytes = min_t(u32, 32*1024, total_bytes);
3538
3539                 total_bytes -= num_bytes;
3540
3541                 if (!total_bytes) {
3542                         if (optype == OPTYPE_PHY_FW)
3543                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3544                         else
3545                                 flash_op = FLASHROM_OPER_FLASH;
3546                 } else {
3547                         if (optype == OPTYPE_PHY_FW)
3548                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3549                         else
3550                                 flash_op = FLASHROM_OPER_SAVE;
3551                 }
3552
3553                 memcpy(req->data_buf, img, num_bytes);
3554                 img += num_bytes;
3555                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3556                                                 flash_op, num_bytes);
3557                 if (status) {
3558                         if (status == ILLEGAL_IOCTL_REQ &&
3559                             optype == OPTYPE_PHY_FW)
3560                                 break;
3561                         dev_err(&adapter->pdev->dev,
3562                                 "cmd to write to flash rom failed.\n");
3563                         return status;
3564                 }
3565         }
3566         return 0;
3567 }
3568
3569 /* For BE2, BE3 and BE3-R */
3570 static int be_flash_BEx(struct be_adapter *adapter,
3571                          const struct firmware *fw,
3572                          struct be_dma_mem *flash_cmd,
3573                          int num_of_images)
3574
3575 {
3576         int status = 0, i, filehdr_size = 0;
3577         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3578         const u8 *p = fw->data;
3579         const struct flash_comp *pflashcomp;
3580         int num_comp, redboot;
3581         struct flash_section_info *fsec = NULL;
3582
3583         struct flash_comp gen3_flash_types[] = {
3584                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3585                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3586                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3587                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3588                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3589                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3590                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3591                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3592                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3593                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3594                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3595                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3596                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3597                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3598                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3599                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3600                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3601                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3602                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3603                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3604         };
3605
3606         struct flash_comp gen2_flash_types[] = {
3607                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3608                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3609                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3610                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3611                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3612                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3613                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3614                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3615                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3616                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3617                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3618                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3619                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3620                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3621                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3622                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3623         };
3624
3625         if (BE3_chip(adapter)) {
3626                 pflashcomp = gen3_flash_types;
3627                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3628                 num_comp = ARRAY_SIZE(gen3_flash_types);
3629         } else {
3630                 pflashcomp = gen2_flash_types;
3631                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3632                 num_comp = ARRAY_SIZE(gen2_flash_types);
3633         }
3634
3635         /* Get flash section info*/
3636         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3637         if (!fsec) {
3638                 dev_err(&adapter->pdev->dev,
3639                         "Invalid Cookie. UFI corrupted ?\n");
3640                 return -1;
3641         }
3642         for (i = 0; i < num_comp; i++) {
3643                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3644                         continue;
3645
3646                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3647                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3648                         continue;
3649
3650                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3651                     !phy_flashing_required(adapter))
3652                                 continue;
3653
3654                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3655                         redboot = be_flash_redboot(adapter, fw->data,
3656                                 pflashcomp[i].offset, pflashcomp[i].size,
3657                                 filehdr_size + img_hdrs_size);
3658                         if (!redboot)
3659                                 continue;
3660                 }
3661
3662                 p = fw->data;
3663                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3664                 if (p + pflashcomp[i].size > fw->data + fw->size)
3665                         return -1;
3666
3667                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3668                                         pflashcomp[i].size);
3669                 if (status) {
3670                         dev_err(&adapter->pdev->dev,
3671                                 "Flashing section type %d failed.\n",
3672                                 pflashcomp[i].img_type);
3673                         return status;
3674                 }
3675         }
3676         return 0;
3677 }
3678
3679 static int be_flash_skyhawk(struct be_adapter *adapter,
3680                 const struct firmware *fw,
3681                 struct be_dma_mem *flash_cmd, int num_of_images)
3682 {
3683         int status = 0, i, filehdr_size = 0;
3684         int img_offset, img_size, img_optype, redboot;
3685         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3686         const u8 *p = fw->data;
3687         struct flash_section_info *fsec = NULL;
3688
3689         filehdr_size = sizeof(struct flash_file_hdr_g3);
3690         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3691         if (!fsec) {
3692                 dev_err(&adapter->pdev->dev,
3693                         "Invalid Cookie. UFI corrupted ?\n");
3694                 return -1;
3695         }
3696
3697         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3698                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3699                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3700
3701                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3702                 case IMAGE_FIRMWARE_iSCSI:
3703                         img_optype = OPTYPE_ISCSI_ACTIVE;
3704                         break;
3705                 case IMAGE_BOOT_CODE:
3706                         img_optype = OPTYPE_REDBOOT;
3707                         break;
3708                 case IMAGE_OPTION_ROM_ISCSI:
3709                         img_optype = OPTYPE_BIOS;
3710                         break;
3711                 case IMAGE_OPTION_ROM_PXE:
3712                         img_optype = OPTYPE_PXE_BIOS;
3713                         break;
3714                 case IMAGE_OPTION_ROM_FCoE:
3715                         img_optype = OPTYPE_FCOE_BIOS;
3716                         break;
3717                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3718                         img_optype = OPTYPE_ISCSI_BACKUP;
3719                         break;
3720                 case IMAGE_NCSI:
3721                         img_optype = OPTYPE_NCSI_FW;
3722                         break;
3723                 default:
3724                         continue;
3725                 }
3726
3727                 if (img_optype == OPTYPE_REDBOOT) {
3728                         redboot = be_flash_redboot(adapter, fw->data,
3729                                         img_offset, img_size,
3730                                         filehdr_size + img_hdrs_size);
3731                         if (!redboot)
3732                                 continue;
3733                 }
3734
3735                 p = fw->data;
3736                 p += filehdr_size + img_offset + img_hdrs_size;
3737                 if (p + img_size > fw->data + fw->size)
3738                         return -1;
3739
3740                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3741                 if (status) {
3742                         dev_err(&adapter->pdev->dev,
3743                                 "Flashing section type %d failed.\n",
3744                                 fsec->fsec_entry[i].type);
3745                         return status;
3746                 }
3747         }
3748         return 0;
3749 }
3750
3751 static int lancer_fw_download(struct be_adapter *adapter,
3752                                 const struct firmware *fw)
3753 {
3754 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3755 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3756         struct be_dma_mem flash_cmd;
3757         const u8 *data_ptr = NULL;
3758         u8 *dest_image_ptr = NULL;
3759         size_t image_size = 0;
3760         u32 chunk_size = 0;
3761         u32 data_written = 0;
3762         u32 offset = 0;
3763         int status = 0;
3764         u8 add_status = 0;
3765         u8 change_status;
3766
3767         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3768                 dev_err(&adapter->pdev->dev,
3769                         "FW Image not properly aligned. "
3770                         "Length must be 4 byte aligned.\n");
3771                 status = -EINVAL;
3772                 goto lancer_fw_exit;
3773         }
3774
3775         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3776                                 + LANCER_FW_DOWNLOAD_CHUNK;
3777         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3778                                           &flash_cmd.dma, GFP_KERNEL);
3779         if (!flash_cmd.va) {
3780                 status = -ENOMEM;
3781                 goto lancer_fw_exit;
3782         }
3783
3784         dest_image_ptr = flash_cmd.va +
3785                                 sizeof(struct lancer_cmd_req_write_object);
3786         image_size = fw->size;
3787         data_ptr = fw->data;
3788
3789         while (image_size) {
3790                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3791
3792                 /* Copy the image chunk content. */
3793                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3794
3795                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3796                                                  chunk_size, offset,
3797                                                  LANCER_FW_DOWNLOAD_LOCATION,
3798                                                  &data_written, &change_status,
3799                                                  &add_status);
3800                 if (status)
3801                         break;
3802
3803                 offset += data_written;
3804                 data_ptr += data_written;
3805                 image_size -= data_written;
3806         }
3807
3808         if (!status) {
3809                 /* Commit the FW written */
3810                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3811                                                  0, offset,
3812                                                  LANCER_FW_DOWNLOAD_LOCATION,
3813                                                  &data_written, &change_status,
3814                                                  &add_status);
3815         }
3816
3817         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3818                                 flash_cmd.dma);
3819         if (status) {
3820                 dev_err(&adapter->pdev->dev,
3821                         "Firmware load error. "
3822                         "Status code: 0x%x Additional Status: 0x%x\n",
3823                         status, add_status);
3824                 goto lancer_fw_exit;
3825         }
3826
3827         if (change_status == LANCER_FW_RESET_NEEDED) {
3828                 dev_info(&adapter->pdev->dev,
3829                          "Resetting adapter to activate new FW\n");
3830                 status = lancer_physdev_ctrl(adapter,
3831                                              PHYSDEV_CONTROL_FW_RESET_MASK);
3832                 if (status) {
3833                         dev_err(&adapter->pdev->dev,
3834                                 "Adapter busy for FW reset.\n"
3835                                 "New FW will not be active.\n");
3836                         goto lancer_fw_exit;
3837                 }
3838         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3839                         dev_err(&adapter->pdev->dev,
3840                                 "System reboot required for new FW"
3841                                 " to be active\n");
3842         }
3843
3844         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3845 lancer_fw_exit:
3846         return status;
3847 }
3848
3849 #define UFI_TYPE2               2
3850 #define UFI_TYPE3               3
3851 #define UFI_TYPE3R              10
3852 #define UFI_TYPE4               4
3853 static int be_get_ufi_type(struct be_adapter *adapter,
3854                            struct flash_file_hdr_g3 *fhdr)
3855 {
3856         if (fhdr == NULL)
3857                 goto be_get_ufi_exit;
3858
3859         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3860                 return UFI_TYPE4;
3861         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3862                 if (fhdr->asic_type_rev == 0x10)
3863                         return UFI_TYPE3R;
3864                 else
3865                         return UFI_TYPE3;
3866         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3867                 return UFI_TYPE2;
3868
3869 be_get_ufi_exit:
3870         dev_err(&adapter->pdev->dev,
3871                 "UFI and Interface are not compatible for flashing\n");
3872         return -1;
3873 }
3874
3875 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3876 {
3877         struct flash_file_hdr_g3 *fhdr3;
3878         struct image_hdr *img_hdr_ptr = NULL;
3879         struct be_dma_mem flash_cmd;
3880         const u8 *p;
3881         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3882
3883         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3884         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3885                                           &flash_cmd.dma, GFP_KERNEL);
3886         if (!flash_cmd.va) {
3887                 status = -ENOMEM;
3888                 goto be_fw_exit;
3889         }
3890
3891         p = fw->data;
3892         fhdr3 = (struct flash_file_hdr_g3 *)p;
3893
3894         ufi_type = be_get_ufi_type(adapter, fhdr3);
3895
3896         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3897         for (i = 0; i < num_imgs; i++) {
3898                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3899                                 (sizeof(struct flash_file_hdr_g3) +
3900                                  i * sizeof(struct image_hdr)));
3901                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3902                         switch (ufi_type) {
3903                         case UFI_TYPE4:
3904                                 status = be_flash_skyhawk(adapter, fw,
3905                                                         &flash_cmd, num_imgs);
3906                                 break;
3907                         case UFI_TYPE3R:
3908                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3909                                                       num_imgs);
3910                                 break;
3911                         case UFI_TYPE3:
3912                                 /* Do not flash this ufi on BE3-R cards */
3913                                 if (adapter->asic_rev < 0x10)
3914                                         status = be_flash_BEx(adapter, fw,
3915                                                               &flash_cmd,
3916                                                               num_imgs);
3917                                 else {
3918                                         status = -1;
3919                                         dev_err(&adapter->pdev->dev,
3920                                                 "Can't load BE3 UFI on BE3R\n");
3921                                 }
3922                         }
3923                 }
3924         }
3925
3926         if (ufi_type == UFI_TYPE2)
3927                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3928         else if (ufi_type == -1)
3929                 status = -1;
3930
3931         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3932                           flash_cmd.dma);
3933         if (status) {
3934                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3935                 goto be_fw_exit;
3936         }
3937
3938         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3939
3940 be_fw_exit:
3941         return status;
3942 }
3943
3944 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3945 {
3946         const struct firmware *fw;
3947         int status;
3948
3949         if (!netif_running(adapter->netdev)) {
3950                 dev_err(&adapter->pdev->dev,
3951                         "Firmware load not allowed (interface is down)\n");
3952                 return -1;
3953         }
3954
3955         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3956         if (status)
3957                 goto fw_exit;
3958
3959         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3960
3961         if (lancer_chip(adapter))
3962                 status = lancer_fw_download(adapter, fw);
3963         else
3964                 status = be_fw_download(adapter, fw);
3965
3966         if (!status)
3967                 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3968                                   adapter->fw_on_flash);
3969
3970 fw_exit:
3971         release_firmware(fw);
3972         return status;
3973 }
3974
3975 static int be_ndo_bridge_setlink(struct net_device *dev,
3976                                     struct nlmsghdr *nlh)
3977 {
3978         struct be_adapter *adapter = netdev_priv(dev);
3979         struct nlattr *attr, *br_spec;
3980         int rem;
3981         int status = 0;
3982         u16 mode = 0;
3983
3984         if (!sriov_enabled(adapter))
3985                 return -EOPNOTSUPP;
3986
3987         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3988
3989         nla_for_each_nested(attr, br_spec, rem) {
3990                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3991                         continue;
3992
3993                 mode = nla_get_u16(attr);
3994                 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3995                         return -EINVAL;
3996
3997                 status = be_cmd_set_hsw_config(adapter, 0, 0,
3998                                                adapter->if_handle,
3999                                                mode == BRIDGE_MODE_VEPA ?
4000                                                PORT_FWD_TYPE_VEPA :
4001                                                PORT_FWD_TYPE_VEB);
4002                 if (status)
4003                         goto err;
4004
4005                 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4006                          mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4007
4008                 return status;
4009         }
4010 err:
4011         dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4012                 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4013
4014         return status;
4015 }
4016
4017 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4018                                     struct net_device *dev,
4019                                     u32 filter_mask)
4020 {
4021         struct be_adapter *adapter = netdev_priv(dev);
4022         int status = 0;
4023         u8 hsw_mode;
4024
4025         if (!sriov_enabled(adapter))
4026                 return 0;
4027
4028         /* BE and Lancer chips support VEB mode only */
4029         if (BEx_chip(adapter) || lancer_chip(adapter)) {
4030                 hsw_mode = PORT_FWD_TYPE_VEB;
4031         } else {
4032                 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4033                                                adapter->if_handle, &hsw_mode);
4034                 if (status)
4035                         return 0;
4036         }
4037
4038         return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4039                                        hsw_mode == PORT_FWD_TYPE_VEPA ?
4040                                        BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4041 }
4042
4043 static const struct net_device_ops be_netdev_ops = {
4044         .ndo_open               = be_open,
4045         .ndo_stop               = be_close,
4046         .ndo_start_xmit         = be_xmit,
4047         .ndo_set_rx_mode        = be_set_rx_mode,
4048         .ndo_set_mac_address    = be_mac_addr_set,
4049         .ndo_change_mtu         = be_change_mtu,
4050         .ndo_get_stats64        = be_get_stats64,
4051         .ndo_validate_addr      = eth_validate_addr,
4052         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
4053         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
4054         .ndo_set_vf_mac         = be_set_vf_mac,
4055         .ndo_set_vf_vlan        = be_set_vf_vlan,
4056         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
4057         .ndo_get_vf_config      = be_get_vf_config,
4058 #ifdef CONFIG_NET_POLL_CONTROLLER
4059         .ndo_poll_controller    = be_netpoll,
4060 #endif
4061         .ndo_bridge_setlink     = be_ndo_bridge_setlink,
4062         .ndo_bridge_getlink     = be_ndo_bridge_getlink,
4063 #ifdef CONFIG_NET_RX_BUSY_POLL
4064         .ndo_busy_poll          = be_busy_poll
4065 #endif
4066 };
4067
4068 static void be_netdev_init(struct net_device *netdev)
4069 {
4070         struct be_adapter *adapter = netdev_priv(netdev);
4071
4072         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4073                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4074                 NETIF_F_HW_VLAN_CTAG_TX;
4075         if (be_multi_rxq(adapter))
4076                 netdev->hw_features |= NETIF_F_RXHASH;
4077
4078         netdev->features |= netdev->hw_features |
4079                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4080
4081         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4082                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4083
4084         netdev->priv_flags |= IFF_UNICAST_FLT;
4085
4086         netdev->flags |= IFF_MULTICAST;
4087
4088         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4089
4090         netdev->netdev_ops = &be_netdev_ops;
4091
4092         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
4093 }
4094
4095 static void be_unmap_pci_bars(struct be_adapter *adapter)
4096 {
4097         if (adapter->csr)
4098                 pci_iounmap(adapter->pdev, adapter->csr);
4099         if (adapter->db)
4100                 pci_iounmap(adapter->pdev, adapter->db);
4101 }
4102
4103 static int db_bar(struct be_adapter *adapter)
4104 {
4105         if (lancer_chip(adapter) || !be_physfn(adapter))
4106                 return 0;
4107         else
4108                 return 4;
4109 }
4110
4111 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4112 {
4113         if (skyhawk_chip(adapter)) {
4114                 adapter->roce_db.size = 4096;
4115                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4116                                                               db_bar(adapter));
4117                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4118                                                                db_bar(adapter));
4119         }
4120         return 0;
4121 }
4122
4123 static int be_map_pci_bars(struct be_adapter *adapter)
4124 {
4125         u8 __iomem *addr;
4126
4127         if (BEx_chip(adapter) && be_physfn(adapter)) {
4128                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4129                 if (adapter->csr == NULL)
4130                         return -ENOMEM;
4131         }
4132
4133         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4134         if (addr == NULL)
4135                 goto pci_map_err;
4136         adapter->db = addr;
4137
4138         be_roce_map_pci_bars(adapter);
4139         return 0;
4140
4141 pci_map_err:
4142         be_unmap_pci_bars(adapter);
4143         return -ENOMEM;
4144 }
4145
4146 static void be_ctrl_cleanup(struct be_adapter *adapter)
4147 {
4148         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4149
4150         be_unmap_pci_bars(adapter);
4151
4152         if (mem->va)
4153                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4154                                   mem->dma);
4155
4156         mem = &adapter->rx_filter;
4157         if (mem->va)
4158                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4159                                   mem->dma);
4160 }
4161
4162 static int be_ctrl_init(struct be_adapter *adapter)
4163 {
4164         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4165         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4166         struct be_dma_mem *rx_filter = &adapter->rx_filter;
4167         u32 sli_intf;
4168         int status;
4169
4170         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4171         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4172                                  SLI_INTF_FAMILY_SHIFT;
4173         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4174
4175         status = be_map_pci_bars(adapter);
4176         if (status)
4177                 goto done;
4178
4179         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4180         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4181                                                 mbox_mem_alloc->size,
4182                                                 &mbox_mem_alloc->dma,
4183                                                 GFP_KERNEL);
4184         if (!mbox_mem_alloc->va) {
4185                 status = -ENOMEM;
4186                 goto unmap_pci_bars;
4187         }
4188         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4189         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4190         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4191         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4192
4193         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4194         rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4195                                             rx_filter->size, &rx_filter->dma,
4196                                             GFP_KERNEL);
4197         if (rx_filter->va == NULL) {
4198                 status = -ENOMEM;
4199                 goto free_mbox;
4200         }
4201
4202         mutex_init(&adapter->mbox_lock);
4203         spin_lock_init(&adapter->mcc_lock);
4204         spin_lock_init(&adapter->mcc_cq_lock);
4205
4206         init_completion(&adapter->et_cmd_compl);
4207         pci_save_state(adapter->pdev);
4208         return 0;
4209
4210 free_mbox:
4211         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4212                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
4213
4214 unmap_pci_bars:
4215         be_unmap_pci_bars(adapter);
4216
4217 done:
4218         return status;
4219 }
4220
4221 static void be_stats_cleanup(struct be_adapter *adapter)
4222 {
4223         struct be_dma_mem *cmd = &adapter->stats_cmd;
4224
4225         if (cmd->va)
4226                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4227                                   cmd->va, cmd->dma);
4228 }
4229
4230 static int be_stats_init(struct be_adapter *adapter)
4231 {
4232         struct be_dma_mem *cmd = &adapter->stats_cmd;
4233
4234         if (lancer_chip(adapter))
4235                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4236         else if (BE2_chip(adapter))
4237                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4238         else if (BE3_chip(adapter))
4239                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4240         else
4241                 /* ALL non-BE ASICs */
4242                 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4243
4244         cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4245                                       GFP_KERNEL);
4246         if (cmd->va == NULL)
4247                 return -1;
4248         return 0;
4249 }
4250
4251 static void be_remove(struct pci_dev *pdev)
4252 {
4253         struct be_adapter *adapter = pci_get_drvdata(pdev);
4254
4255         if (!adapter)
4256                 return;
4257
4258         be_roce_dev_remove(adapter);
4259         be_intr_set(adapter, false);
4260
4261         cancel_delayed_work_sync(&adapter->func_recovery_work);
4262
4263         unregister_netdev(adapter->netdev);
4264
4265         be_clear(adapter);
4266
4267         /* tell fw we're done with firing cmds */
4268         be_cmd_fw_clean(adapter);
4269
4270         be_stats_cleanup(adapter);
4271
4272         be_ctrl_cleanup(adapter);
4273
4274         pci_disable_pcie_error_reporting(pdev);
4275
4276         pci_release_regions(pdev);
4277         pci_disable_device(pdev);
4278
4279         free_netdev(adapter->netdev);
4280 }
4281
4282 static int be_get_initial_config(struct be_adapter *adapter)
4283 {
4284         int status, level;
4285
4286         status = be_cmd_get_cntl_attributes(adapter);
4287         if (status)
4288                 return status;
4289
4290         /* Must be a power of 2 or else MODULO will BUG_ON */
4291         adapter->be_get_temp_freq = 64;
4292
4293         if (BEx_chip(adapter)) {
4294                 level = be_cmd_get_fw_log_level(adapter);
4295                 adapter->msg_enable =
4296                         level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4297         }
4298
4299         adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4300         return 0;
4301 }
4302
4303 static int lancer_recover_func(struct be_adapter *adapter)
4304 {
4305         struct device *dev = &adapter->pdev->dev;
4306         int status;
4307
4308         status = lancer_test_and_set_rdy_state(adapter);
4309         if (status)
4310                 goto err;
4311
4312         if (netif_running(adapter->netdev))
4313                 be_close(adapter->netdev);
4314
4315         be_clear(adapter);
4316
4317         be_clear_all_error(adapter);
4318
4319         status = be_setup(adapter);
4320         if (status)
4321                 goto err;
4322
4323         if (netif_running(adapter->netdev)) {
4324                 status = be_open(adapter->netdev);
4325                 if (status)
4326                         goto err;
4327         }
4328
4329         dev_err(dev, "Adapter recovery successful\n");
4330         return 0;
4331 err:
4332         if (status == -EAGAIN)
4333                 dev_err(dev, "Waiting for resource provisioning\n");
4334         else
4335                 dev_err(dev, "Adapter recovery failed\n");
4336
4337         return status;
4338 }
4339
4340 static void be_func_recovery_task(struct work_struct *work)
4341 {
4342         struct be_adapter *adapter =
4343                 container_of(work, struct be_adapter,  func_recovery_work.work);
4344         int status = 0;
4345
4346         be_detect_error(adapter);
4347
4348         if (adapter->hw_error && lancer_chip(adapter)) {
4349
4350                 rtnl_lock();
4351                 netif_device_detach(adapter->netdev);
4352                 rtnl_unlock();
4353
4354                 status = lancer_recover_func(adapter);
4355                 if (!status)
4356                         netif_device_attach(adapter->netdev);
4357         }
4358
4359         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4360          * no need to attempt further recovery.
4361          */
4362         if (!status || status == -EAGAIN)
4363                 schedule_delayed_work(&adapter->func_recovery_work,
4364                                       msecs_to_jiffies(1000));
4365 }
4366
4367 static void be_worker(struct work_struct *work)
4368 {
4369         struct be_adapter *adapter =
4370                 container_of(work, struct be_adapter, work.work);
4371         struct be_rx_obj *rxo;
4372         int i;
4373
4374         /* when interrupts are not yet enabled, just reap any pending
4375         * mcc completions */
4376         if (!netif_running(adapter->netdev)) {
4377                 local_bh_disable();
4378                 be_process_mcc(adapter);
4379                 local_bh_enable();
4380                 goto reschedule;
4381         }
4382
4383         if (!adapter->stats_cmd_sent) {
4384                 if (lancer_chip(adapter))
4385                         lancer_cmd_get_pport_stats(adapter,
4386                                                 &adapter->stats_cmd);
4387                 else
4388                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4389         }
4390
4391         if (be_physfn(adapter) &&
4392             MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4393                 be_cmd_get_die_temperature(adapter);
4394
4395         for_all_rx_queues(adapter, rxo, i) {
4396                 /* Replenish RX-queues starved due to memory
4397                  * allocation failures.
4398                  */
4399                 if (rxo->rx_post_starved)
4400                         be_post_rx_frags(rxo, GFP_KERNEL);
4401         }
4402
4403         be_eqd_update(adapter);
4404
4405 reschedule:
4406         adapter->work_counter++;
4407         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4408 }
4409
4410 /* If any VFs are already enabled don't FLR the PF */
4411 static bool be_reset_required(struct be_adapter *adapter)
4412 {
4413         return pci_num_vf(adapter->pdev) ? false : true;
4414 }
4415
4416 static char *mc_name(struct be_adapter *adapter)
4417 {
4418         if (adapter->function_mode & FLEX10_MODE)
4419                 return "FLEX10";
4420         else if (adapter->function_mode & VNIC_MODE)
4421                 return "vNIC";
4422         else if (adapter->function_mode & UMC_ENABLED)
4423                 return "UMC";
4424         else
4425                 return "";
4426 }
4427
4428 static inline char *func_name(struct be_adapter *adapter)
4429 {
4430         return be_physfn(adapter) ? "PF" : "VF";
4431 }
4432
4433 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4434 {
4435         int status = 0;
4436         struct be_adapter *adapter;
4437         struct net_device *netdev;
4438         char port_name;
4439
4440         status = pci_enable_device(pdev);
4441         if (status)
4442                 goto do_none;
4443
4444         status = pci_request_regions(pdev, DRV_NAME);
4445         if (status)
4446                 goto disable_dev;
4447         pci_set_master(pdev);
4448
4449         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4450         if (netdev == NULL) {
4451                 status = -ENOMEM;
4452                 goto rel_reg;
4453         }
4454         adapter = netdev_priv(netdev);
4455         adapter->pdev = pdev;
4456         pci_set_drvdata(pdev, adapter);
4457         adapter->netdev = netdev;
4458         SET_NETDEV_DEV(netdev, &pdev->dev);
4459
4460         status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4461         if (!status) {
4462                 netdev->features |= NETIF_F_HIGHDMA;
4463         } else {
4464                 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4465                 if (status) {
4466                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4467                         goto free_netdev;
4468                 }
4469         }
4470
4471         if (be_physfn(adapter)) {
4472                 status = pci_enable_pcie_error_reporting(pdev);
4473                 if (!status)
4474                         dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4475         }
4476
4477         status = be_ctrl_init(adapter);
4478         if (status)
4479                 goto free_netdev;
4480
4481         /* sync up with fw's ready state */
4482         if (be_physfn(adapter)) {
4483                 status = be_fw_wait_ready(adapter);
4484                 if (status)
4485                         goto ctrl_clean;
4486         }
4487
4488         if (be_reset_required(adapter)) {
4489                 status = be_cmd_reset_function(adapter);
4490                 if (status)
4491                         goto ctrl_clean;
4492
4493                 /* Wait for interrupts to quiesce after an FLR */
4494                 msleep(100);
4495         }
4496
4497         /* Allow interrupts for other ULPs running on NIC function */
4498         be_intr_set(adapter, true);
4499
4500         /* tell fw we're ready to fire cmds */
4501         status = be_cmd_fw_init(adapter);
4502         if (status)
4503                 goto ctrl_clean;
4504
4505         status = be_stats_init(adapter);
4506         if (status)
4507                 goto ctrl_clean;
4508
4509         status = be_get_initial_config(adapter);
4510         if (status)
4511                 goto stats_clean;
4512
4513         INIT_DELAYED_WORK(&adapter->work, be_worker);
4514         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4515         adapter->rx_fc = adapter->tx_fc = true;
4516
4517         status = be_setup(adapter);
4518         if (status)
4519                 goto stats_clean;
4520
4521         be_netdev_init(netdev);
4522         status = register_netdev(netdev);
4523         if (status != 0)
4524                 goto unsetup;
4525
4526         be_roce_dev_add(adapter);
4527
4528         schedule_delayed_work(&adapter->func_recovery_work,
4529                               msecs_to_jiffies(1000));
4530
4531         be_cmd_query_port_name(adapter, &port_name);
4532
4533         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4534                  func_name(adapter), mc_name(adapter), port_name);
4535
4536         return 0;
4537
4538 unsetup:
4539         be_clear(adapter);
4540 stats_clean:
4541         be_stats_cleanup(adapter);
4542 ctrl_clean:
4543         be_ctrl_cleanup(adapter);
4544 free_netdev:
4545         free_netdev(netdev);
4546 rel_reg:
4547         pci_release_regions(pdev);
4548 disable_dev:
4549         pci_disable_device(pdev);
4550 do_none:
4551         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4552         return status;
4553 }
4554
4555 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4556 {
4557         struct be_adapter *adapter = pci_get_drvdata(pdev);
4558         struct net_device *netdev =  adapter->netdev;
4559
4560         if (adapter->wol_en)
4561                 be_setup_wol(adapter, true);
4562
4563         be_intr_set(adapter, false);
4564         cancel_delayed_work_sync(&adapter->func_recovery_work);
4565
4566         netif_device_detach(netdev);
4567         if (netif_running(netdev)) {
4568                 rtnl_lock();
4569                 be_close(netdev);
4570                 rtnl_unlock();
4571         }
4572         be_clear(adapter);
4573
4574         pci_save_state(pdev);
4575         pci_disable_device(pdev);
4576         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4577         return 0;
4578 }
4579
4580 static int be_resume(struct pci_dev *pdev)
4581 {
4582         int status = 0;
4583         struct be_adapter *adapter = pci_get_drvdata(pdev);
4584         struct net_device *netdev =  adapter->netdev;
4585
4586         netif_device_detach(netdev);
4587
4588         status = pci_enable_device(pdev);
4589         if (status)
4590                 return status;
4591
4592         pci_set_power_state(pdev, PCI_D0);
4593         pci_restore_state(pdev);
4594
4595         status = be_fw_wait_ready(adapter);
4596         if (status)
4597                 return status;
4598
4599         be_intr_set(adapter, true);
4600         /* tell fw we're ready to fire cmds */
4601         status = be_cmd_fw_init(adapter);
4602         if (status)
4603                 return status;
4604
4605         be_setup(adapter);
4606         if (netif_running(netdev)) {
4607                 rtnl_lock();
4608                 be_open(netdev);
4609                 rtnl_unlock();
4610         }
4611
4612         schedule_delayed_work(&adapter->func_recovery_work,
4613                               msecs_to_jiffies(1000));
4614         netif_device_attach(netdev);
4615
4616         if (adapter->wol_en)
4617                 be_setup_wol(adapter, false);
4618
4619         return 0;
4620 }
4621
4622 /*
4623  * An FLR will stop BE from DMAing any data.
4624  */
4625 static void be_shutdown(struct pci_dev *pdev)
4626 {
4627         struct be_adapter *adapter = pci_get_drvdata(pdev);
4628
4629         if (!adapter)
4630                 return;
4631
4632         cancel_delayed_work_sync(&adapter->work);
4633         cancel_delayed_work_sync(&adapter->func_recovery_work);
4634
4635         netif_device_detach(adapter->netdev);
4636
4637         be_cmd_reset_function(adapter);
4638
4639         pci_disable_device(pdev);
4640 }
4641
4642 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4643                                 pci_channel_state_t state)
4644 {
4645         struct be_adapter *adapter = pci_get_drvdata(pdev);
4646         struct net_device *netdev =  adapter->netdev;
4647
4648         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4649
4650         if (!adapter->eeh_error) {
4651                 adapter->eeh_error = true;
4652
4653                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4654
4655                 rtnl_lock();
4656                 netif_device_detach(netdev);
4657                 if (netif_running(netdev))
4658                         be_close(netdev);
4659                 rtnl_unlock();
4660
4661                 be_clear(adapter);
4662         }
4663
4664         if (state == pci_channel_io_perm_failure)
4665                 return PCI_ERS_RESULT_DISCONNECT;
4666
4667         pci_disable_device(pdev);
4668
4669         /* The error could cause the FW to trigger a flash debug dump.
4670          * Resetting the card while flash dump is in progress
4671          * can cause it not to recover; wait for it to finish.
4672          * Wait only for first function as it is needed only once per
4673          * adapter.
4674          */
4675         if (pdev->devfn == 0)
4676                 ssleep(30);
4677
4678         return PCI_ERS_RESULT_NEED_RESET;
4679 }
4680
4681 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4682 {
4683         struct be_adapter *adapter = pci_get_drvdata(pdev);
4684         int status;
4685
4686         dev_info(&adapter->pdev->dev, "EEH reset\n");
4687
4688         status = pci_enable_device(pdev);
4689         if (status)
4690                 return PCI_ERS_RESULT_DISCONNECT;
4691
4692         pci_set_master(pdev);
4693         pci_set_power_state(pdev, PCI_D0);
4694         pci_restore_state(pdev);
4695
4696         /* Check if card is ok and fw is ready */
4697         dev_info(&adapter->pdev->dev,
4698                  "Waiting for FW to be ready after EEH reset\n");
4699         status = be_fw_wait_ready(adapter);
4700         if (status)
4701                 return PCI_ERS_RESULT_DISCONNECT;
4702
4703         pci_cleanup_aer_uncorrect_error_status(pdev);
4704         be_clear_all_error(adapter);
4705         return PCI_ERS_RESULT_RECOVERED;
4706 }
4707
4708 static void be_eeh_resume(struct pci_dev *pdev)
4709 {
4710         int status = 0;
4711         struct be_adapter *adapter = pci_get_drvdata(pdev);
4712         struct net_device *netdev =  adapter->netdev;
4713
4714         dev_info(&adapter->pdev->dev, "EEH resume\n");
4715
4716         pci_save_state(pdev);
4717
4718         status = be_cmd_reset_function(adapter);
4719         if (status)
4720                 goto err;
4721
4722         /* tell fw we're ready to fire cmds */
4723         status = be_cmd_fw_init(adapter);
4724         if (status)
4725                 goto err;
4726
4727         status = be_setup(adapter);
4728         if (status)
4729                 goto err;
4730
4731         if (netif_running(netdev)) {
4732                 status = be_open(netdev);
4733                 if (status)
4734                         goto err;
4735         }
4736
4737         schedule_delayed_work(&adapter->func_recovery_work,
4738                               msecs_to_jiffies(1000));
4739         netif_device_attach(netdev);
4740         return;
4741 err:
4742         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4743 }
4744
4745 static const struct pci_error_handlers be_eeh_handlers = {
4746         .error_detected = be_eeh_err_detected,
4747         .slot_reset = be_eeh_reset,
4748         .resume = be_eeh_resume,
4749 };
4750
4751 static struct pci_driver be_driver = {
4752         .name = DRV_NAME,
4753         .id_table = be_dev_ids,
4754         .probe = be_probe,
4755         .remove = be_remove,
4756         .suspend = be_suspend,
4757         .resume = be_resume,
4758         .shutdown = be_shutdown,
4759         .err_handler = &be_eeh_handlers
4760 };
4761
4762 static int __init be_init_module(void)
4763 {
4764         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4765             rx_frag_size != 2048) {
4766                 printk(KERN_WARNING DRV_NAME
4767                         " : Module param rx_frag_size must be 2048/4096/8192."
4768                         " Using 2048\n");
4769                 rx_frag_size = 2048;
4770         }
4771
4772         return pci_register_driver(&be_driver);
4773 }
4774 module_init(be_init_module);
4775
4776 static void __exit be_exit_module(void)
4777 {
4778         pci_unregister_driver(&be_driver);
4779 }
4780 module_exit(be_exit_module);