be2net: Add support for setting and getting rx flow hash options
[cascardo/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL | __GFP_ZERO);
150         if (!mem->va)
151                 return -ENOMEM;
152         return 0;
153 }
154
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
156 {
157         u32 reg, enabled;
158
159         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160                                 &reg);
161         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163         if (!enabled && enable)
164                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else if (enabled && !enable)
166                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else
168                 return;
169
170         pci_write_config_dword(adapter->pdev,
171                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
175 {
176         int status = 0;
177
178         /* On lancer interrupts can't be controlled via this register */
179         if (lancer_chip(adapter))
180                 return;
181
182         if (adapter->eeh_error)
183                 return;
184
185         status = be_cmd_intr_set(adapter, enable);
186         if (status)
187                 be_reg_intr_set(adapter, enable);
188 }
189
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
191 {
192         u32 val = 0;
193         val |= qid & DB_RQ_RING_ID_MASK;
194         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
195
196         wmb();
197         iowrite32(val, adapter->db + DB_RQ_OFFSET);
198 }
199
200 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201                           u16 posted)
202 {
203         u32 val = 0;
204         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
205         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
206
207         wmb();
208         iowrite32(val, adapter->db + txo->db_offset);
209 }
210
211 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
212                 bool arm, bool clear_int, u16 num_popped)
213 {
214         u32 val = 0;
215         val |= qid & DB_EQ_RING_ID_MASK;
216         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
218
219         if (adapter->eeh_error)
220                 return;
221
222         if (arm)
223                 val |= 1 << DB_EQ_REARM_SHIFT;
224         if (clear_int)
225                 val |= 1 << DB_EQ_CLR_SHIFT;
226         val |= 1 << DB_EQ_EVNT_SHIFT;
227         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
228         iowrite32(val, adapter->db + DB_EQ_OFFSET);
229 }
230
231 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
232 {
233         u32 val = 0;
234         val |= qid & DB_CQ_RING_ID_MASK;
235         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
237
238         if (adapter->eeh_error)
239                 return;
240
241         if (arm)
242                 val |= 1 << DB_CQ_REARM_SHIFT;
243         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244         iowrite32(val, adapter->db + DB_CQ_OFFSET);
245 }
246
247 static int be_mac_addr_set(struct net_device *netdev, void *p)
248 {
249         struct be_adapter *adapter = netdev_priv(netdev);
250         struct sockaddr *addr = p;
251         int status = 0;
252         u8 current_mac[ETH_ALEN];
253         u32 pmac_id = adapter->pmac_id[0];
254         bool active_mac = true;
255
256         if (!is_valid_ether_addr(addr->sa_data))
257                 return -EADDRNOTAVAIL;
258
259         /* For BE VF, MAC address is already activated by PF.
260          * Hence only operation left is updating netdev->devaddr.
261          * Update it if user is passing the same MAC which was used
262          * during configuring VF MAC from PF(Hypervisor).
263          */
264         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265                 status = be_cmd_mac_addr_query(adapter, current_mac,
266                                                false, adapter->if_handle, 0);
267                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268                         goto done;
269                 else
270                         goto err;
271         }
272
273         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274                 goto done;
275
276         /* For Lancer check if any MAC is active.
277          * If active, get its mac id.
278          */
279         if (lancer_chip(adapter) && !be_physfn(adapter))
280                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281                                          &pmac_id, 0);
282
283         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284                                  adapter->if_handle,
285                                  &adapter->pmac_id[0], 0);
286
287         if (status)
288                 goto err;
289
290         if (active_mac)
291                 be_cmd_pmac_del(adapter, adapter->if_handle,
292                                 pmac_id, 0);
293 done:
294         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295         return 0;
296 err:
297         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
298         return status;
299 }
300
301 /* BE2 supports only v0 cmd */
302 static void *hw_stats_from_cmd(struct be_adapter *adapter)
303 {
304         if (BE2_chip(adapter)) {
305                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307                 return &cmd->hw_stats;
308         } else  {
309                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311                 return &cmd->hw_stats;
312         }
313 }
314
315 /* BE2 supports only v0 cmd */
316 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317 {
318         if (BE2_chip(adapter)) {
319                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321                 return &hw_stats->erx;
322         } else {
323                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325                 return &hw_stats->erx;
326         }
327 }
328
329 static void populate_be_v0_stats(struct be_adapter *adapter)
330 {
331         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
334         struct be_port_rxf_stats_v0 *port_stats =
335                                         &rxf_stats->port[adapter->port_num];
336         struct be_drv_stats *drvs = &adapter->drv_stats;
337
338         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
339         drvs->rx_pause_frames = port_stats->rx_pause_frames;
340         drvs->rx_crc_errors = port_stats->rx_crc_errors;
341         drvs->rx_control_frames = port_stats->rx_control_frames;
342         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
353         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
354         drvs->rx_dropped_header_too_small =
355                 port_stats->rx_dropped_header_too_small;
356         drvs->rx_address_mismatch_drops =
357                                         port_stats->rx_address_mismatch_drops +
358                                         port_stats->rx_vlan_mismatch_drops;
359         drvs->rx_alignment_symbol_errors =
360                 port_stats->rx_alignment_symbol_errors;
361
362         drvs->tx_pauseframes = port_stats->tx_pauseframes;
363         drvs->tx_controlframes = port_stats->tx_controlframes;
364
365         if (adapter->port_num)
366                 drvs->jabber_events = rxf_stats->port1_jabber_events;
367         else
368                 drvs->jabber_events = rxf_stats->port0_jabber_events;
369         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
370         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
371         drvs->forwarded_packets = rxf_stats->forwarded_packets;
372         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
373         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
375         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376 }
377
378 static void populate_be_v1_stats(struct be_adapter *adapter)
379 {
380         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
383         struct be_port_rxf_stats_v1 *port_stats =
384                                         &rxf_stats->port[adapter->port_num];
385         struct be_drv_stats *drvs = &adapter->drv_stats;
386
387         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
388         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
390         drvs->rx_pause_frames = port_stats->rx_pause_frames;
391         drvs->rx_crc_errors = port_stats->rx_crc_errors;
392         drvs->rx_control_frames = port_stats->rx_control_frames;
393         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403         drvs->rx_dropped_header_too_small =
404                 port_stats->rx_dropped_header_too_small;
405         drvs->rx_input_fifo_overflow_drop =
406                 port_stats->rx_input_fifo_overflow_drop;
407         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
408         drvs->rx_alignment_symbol_errors =
409                 port_stats->rx_alignment_symbol_errors;
410         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
411         drvs->tx_pauseframes = port_stats->tx_pauseframes;
412         drvs->tx_controlframes = port_stats->tx_controlframes;
413         drvs->jabber_events = port_stats->jabber_events;
414         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
415         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
416         drvs->forwarded_packets = rxf_stats->forwarded_packets;
417         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
418         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
419         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
420         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
421 }
422
423 static void populate_lancer_stats(struct be_adapter *adapter)
424 {
425
426         struct be_drv_stats *drvs = &adapter->drv_stats;
427         struct lancer_pport_stats *pport_stats =
428                                         pport_stats_from_cmd(adapter);
429
430         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
431         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
432         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
433         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
434         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
435         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
436         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
437         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
438         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
439         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
440         drvs->rx_dropped_tcp_length =
441                                 pport_stats->rx_dropped_invalid_tcp_length;
442         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
443         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
444         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
445         drvs->rx_dropped_header_too_small =
446                                 pport_stats->rx_dropped_header_too_small;
447         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
448         drvs->rx_address_mismatch_drops =
449                                         pport_stats->rx_address_mismatch_drops +
450                                         pport_stats->rx_vlan_mismatch_drops;
451         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
452         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
453         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
454         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
455         drvs->jabber_events = pport_stats->rx_jabbers;
456         drvs->forwarded_packets = pport_stats->num_forwards_lo;
457         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
458         drvs->rx_drops_too_many_frags =
459                                 pport_stats->rx_drops_too_many_frags_lo;
460 }
461
462 static void accumulate_16bit_val(u32 *acc, u16 val)
463 {
464 #define lo(x)                   (x & 0xFFFF)
465 #define hi(x)                   (x & 0xFFFF0000)
466         bool wrapped = val < lo(*acc);
467         u32 newacc = hi(*acc) + val;
468
469         if (wrapped)
470                 newacc += 65536;
471         ACCESS_ONCE(*acc) = newacc;
472 }
473
474 void be_parse_stats(struct be_adapter *adapter)
475 {
476         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
477         struct be_rx_obj *rxo;
478         int i;
479
480         if (lancer_chip(adapter)) {
481                 populate_lancer_stats(adapter);
482         } else {
483                 if (BE2_chip(adapter))
484                         populate_be_v0_stats(adapter);
485                 else
486                         /* for BE3 and Skyhawk */
487                         populate_be_v1_stats(adapter);
488
489                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
490                 for_all_rx_queues(adapter, rxo, i) {
491                         /* below erx HW counter can actually wrap around after
492                          * 65535. Driver accumulates a 32-bit value
493                          */
494                         accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
495                                              (u16)erx->rx_drops_no_fragments \
496                                              [rxo->q.id]);
497                 }
498         }
499 }
500
501 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
502                                         struct rtnl_link_stats64 *stats)
503 {
504         struct be_adapter *adapter = netdev_priv(netdev);
505         struct be_drv_stats *drvs = &adapter->drv_stats;
506         struct be_rx_obj *rxo;
507         struct be_tx_obj *txo;
508         u64 pkts, bytes;
509         unsigned int start;
510         int i;
511
512         for_all_rx_queues(adapter, rxo, i) {
513                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
514                 do {
515                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
516                         pkts = rx_stats(rxo)->rx_pkts;
517                         bytes = rx_stats(rxo)->rx_bytes;
518                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
519                 stats->rx_packets += pkts;
520                 stats->rx_bytes += bytes;
521                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
522                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
523                                         rx_stats(rxo)->rx_drops_no_frags;
524         }
525
526         for_all_tx_queues(adapter, txo, i) {
527                 const struct be_tx_stats *tx_stats = tx_stats(txo);
528                 do {
529                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
530                         pkts = tx_stats(txo)->tx_pkts;
531                         bytes = tx_stats(txo)->tx_bytes;
532                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
533                 stats->tx_packets += pkts;
534                 stats->tx_bytes += bytes;
535         }
536
537         /* bad pkts received */
538         stats->rx_errors = drvs->rx_crc_errors +
539                 drvs->rx_alignment_symbol_errors +
540                 drvs->rx_in_range_errors +
541                 drvs->rx_out_range_errors +
542                 drvs->rx_frame_too_long +
543                 drvs->rx_dropped_too_small +
544                 drvs->rx_dropped_too_short +
545                 drvs->rx_dropped_header_too_small +
546                 drvs->rx_dropped_tcp_length +
547                 drvs->rx_dropped_runt;
548
549         /* detailed rx errors */
550         stats->rx_length_errors = drvs->rx_in_range_errors +
551                 drvs->rx_out_range_errors +
552                 drvs->rx_frame_too_long;
553
554         stats->rx_crc_errors = drvs->rx_crc_errors;
555
556         /* frame alignment errors */
557         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
558
559         /* receiver fifo overrun */
560         /* drops_no_pbuf is no per i/f, it's per BE card */
561         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
562                                 drvs->rx_input_fifo_overflow_drop +
563                                 drvs->rx_drops_no_pbuf;
564         return stats;
565 }
566
567 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
568 {
569         struct net_device *netdev = adapter->netdev;
570
571         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
572                 netif_carrier_off(netdev);
573                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
574         }
575
576         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
577                 netif_carrier_on(netdev);
578         else
579                 netif_carrier_off(netdev);
580 }
581
582 static void be_tx_stats_update(struct be_tx_obj *txo,
583                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
584 {
585         struct be_tx_stats *stats = tx_stats(txo);
586
587         u64_stats_update_begin(&stats->sync);
588         stats->tx_reqs++;
589         stats->tx_wrbs += wrb_cnt;
590         stats->tx_bytes += copied;
591         stats->tx_pkts += (gso_segs ? gso_segs : 1);
592         if (stopped)
593                 stats->tx_stops++;
594         u64_stats_update_end(&stats->sync);
595 }
596
597 /* Determine number of WRB entries needed to xmit data in an skb */
598 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
599                                                                 bool *dummy)
600 {
601         int cnt = (skb->len > skb->data_len);
602
603         cnt += skb_shinfo(skb)->nr_frags;
604
605         /* to account for hdr wrb */
606         cnt++;
607         if (lancer_chip(adapter) || !(cnt & 1)) {
608                 *dummy = false;
609         } else {
610                 /* add a dummy to make it an even num */
611                 cnt++;
612                 *dummy = true;
613         }
614         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
615         return cnt;
616 }
617
618 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
619 {
620         wrb->frag_pa_hi = upper_32_bits(addr);
621         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
622         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
623         wrb->rsvd0 = 0;
624 }
625
626 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
627                                         struct sk_buff *skb)
628 {
629         u8 vlan_prio;
630         u16 vlan_tag;
631
632         vlan_tag = vlan_tx_tag_get(skb);
633         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
634         /* If vlan priority provided by OS is NOT in available bmap */
635         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
636                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
637                                 adapter->recommended_prio;
638
639         return vlan_tag;
640 }
641
642 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
643 {
644         return vlan_tx_tag_present(skb) || adapter->pvid;
645 }
646
647 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
648                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
649 {
650         u16 vlan_tag;
651
652         memset(hdr, 0, sizeof(*hdr));
653
654         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
655
656         if (skb_is_gso(skb)) {
657                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
658                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
659                         hdr, skb_shinfo(skb)->gso_size);
660                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
661                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
662         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
663                 if (is_tcp_pkt(skb))
664                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
665                 else if (is_udp_pkt(skb))
666                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
667         }
668
669         if (vlan_tx_tag_present(skb)) {
670                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
671                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
672                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
673         }
674
675         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
676         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
677         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
678         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
679 }
680
681 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
682                 bool unmap_single)
683 {
684         dma_addr_t dma;
685
686         be_dws_le_to_cpu(wrb, sizeof(*wrb));
687
688         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
689         if (wrb->frag_len) {
690                 if (unmap_single)
691                         dma_unmap_single(dev, dma, wrb->frag_len,
692                                          DMA_TO_DEVICE);
693                 else
694                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
695         }
696 }
697
698 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
699                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
700 {
701         dma_addr_t busaddr;
702         int i, copied = 0;
703         struct device *dev = &adapter->pdev->dev;
704         struct sk_buff *first_skb = skb;
705         struct be_eth_wrb *wrb;
706         struct be_eth_hdr_wrb *hdr;
707         bool map_single = false;
708         u16 map_head;
709
710         hdr = queue_head_node(txq);
711         queue_head_inc(txq);
712         map_head = txq->head;
713
714         if (skb->len > skb->data_len) {
715                 int len = skb_headlen(skb);
716                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
717                 if (dma_mapping_error(dev, busaddr))
718                         goto dma_err;
719                 map_single = true;
720                 wrb = queue_head_node(txq);
721                 wrb_fill(wrb, busaddr, len);
722                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
723                 queue_head_inc(txq);
724                 copied += len;
725         }
726
727         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
728                 const struct skb_frag_struct *frag =
729                         &skb_shinfo(skb)->frags[i];
730                 busaddr = skb_frag_dma_map(dev, frag, 0,
731                                            skb_frag_size(frag), DMA_TO_DEVICE);
732                 if (dma_mapping_error(dev, busaddr))
733                         goto dma_err;
734                 wrb = queue_head_node(txq);
735                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
736                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
737                 queue_head_inc(txq);
738                 copied += skb_frag_size(frag);
739         }
740
741         if (dummy_wrb) {
742                 wrb = queue_head_node(txq);
743                 wrb_fill(wrb, 0, 0);
744                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
745                 queue_head_inc(txq);
746         }
747
748         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
749         be_dws_cpu_to_le(hdr, sizeof(*hdr));
750
751         return copied;
752 dma_err:
753         txq->head = map_head;
754         while (copied) {
755                 wrb = queue_head_node(txq);
756                 unmap_tx_frag(dev, wrb, map_single);
757                 map_single = false;
758                 copied -= wrb->frag_len;
759                 queue_head_inc(txq);
760         }
761         return 0;
762 }
763
764 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
765                                              struct sk_buff *skb)
766 {
767         u16 vlan_tag = 0;
768
769         skb = skb_share_check(skb, GFP_ATOMIC);
770         if (unlikely(!skb))
771                 return skb;
772
773         if (vlan_tx_tag_present(skb)) {
774                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
775                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
776                 if (skb)
777                         skb->vlan_tci = 0;
778         }
779
780         return skb;
781 }
782
783 static netdev_tx_t be_xmit(struct sk_buff *skb,
784                         struct net_device *netdev)
785 {
786         struct be_adapter *adapter = netdev_priv(netdev);
787         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
788         struct be_queue_info *txq = &txo->q;
789         struct iphdr *ip = NULL;
790         u32 wrb_cnt = 0, copied = 0;
791         u32 start = txq->head, eth_hdr_len;
792         bool dummy_wrb, stopped = false;
793
794         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
795                 VLAN_ETH_HLEN : ETH_HLEN;
796
797         /* HW has a bug which considers padding bytes as legal
798          * and modifies the IPv4 hdr's 'tot_len' field
799          */
800         if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
801                         is_ipv4_pkt(skb)) {
802                 ip = (struct iphdr *)ip_hdr(skb);
803                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
804         }
805
806         /* HW has a bug wherein it will calculate CSUM for VLAN
807          * pkts even though it is disabled.
808          * Manually insert VLAN in pkt.
809          */
810         if (skb->ip_summed != CHECKSUM_PARTIAL &&
811                         be_vlan_tag_chk(adapter, skb)) {
812                 skb = be_insert_vlan_in_pkt(adapter, skb);
813                 if (unlikely(!skb))
814                         goto tx_drop;
815         }
816
817         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
818
819         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
820         if (copied) {
821                 int gso_segs = skb_shinfo(skb)->gso_segs;
822
823                 /* record the sent skb in the sent_skb table */
824                 BUG_ON(txo->sent_skb_list[start]);
825                 txo->sent_skb_list[start] = skb;
826
827                 /* Ensure txq has space for the next skb; Else stop the queue
828                  * *BEFORE* ringing the tx doorbell, so that we serialze the
829                  * tx compls of the current transmit which'll wake up the queue
830                  */
831                 atomic_add(wrb_cnt, &txq->used);
832                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
833                                                                 txq->len) {
834                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
835                         stopped = true;
836                 }
837
838                 be_txq_notify(adapter, txo, wrb_cnt);
839
840                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
841         } else {
842                 txq->head = start;
843                 dev_kfree_skb_any(skb);
844         }
845 tx_drop:
846         return NETDEV_TX_OK;
847 }
848
849 static int be_change_mtu(struct net_device *netdev, int new_mtu)
850 {
851         struct be_adapter *adapter = netdev_priv(netdev);
852         if (new_mtu < BE_MIN_MTU ||
853                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
854                                         (ETH_HLEN + ETH_FCS_LEN))) {
855                 dev_info(&adapter->pdev->dev,
856                         "MTU must be between %d and %d bytes\n",
857                         BE_MIN_MTU,
858                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
859                 return -EINVAL;
860         }
861         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
862                         netdev->mtu, new_mtu);
863         netdev->mtu = new_mtu;
864         return 0;
865 }
866
867 /*
868  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
869  * If the user configures more, place BE in vlan promiscuous mode.
870  */
871 static int be_vid_config(struct be_adapter *adapter)
872 {
873         u16 vids[BE_NUM_VLANS_SUPPORTED];
874         u16 num = 0, i;
875         int status = 0;
876
877         /* No need to further configure vids if in promiscuous mode */
878         if (adapter->promiscuous)
879                 return 0;
880
881         if (adapter->vlans_added > adapter->max_vlans)
882                 goto set_vlan_promisc;
883
884         /* Construct VLAN Table to give to HW */
885         for (i = 0; i < VLAN_N_VID; i++)
886                 if (adapter->vlan_tag[i])
887                         vids[num++] = cpu_to_le16(i);
888
889         status = be_cmd_vlan_config(adapter, adapter->if_handle,
890                                     vids, num, 1, 0);
891
892         /* Set to VLAN promisc mode as setting VLAN filter failed */
893         if (status) {
894                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
895                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
896                 goto set_vlan_promisc;
897         }
898
899         return status;
900
901 set_vlan_promisc:
902         status = be_cmd_vlan_config(adapter, adapter->if_handle,
903                                     NULL, 0, 1, 1);
904         return status;
905 }
906
907 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
908 {
909         struct be_adapter *adapter = netdev_priv(netdev);
910         int status = 0;
911
912         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
913                 status = -EINVAL;
914                 goto ret;
915         }
916
917         /* Packets with VID 0 are always received by Lancer by default */
918         if (lancer_chip(adapter) && vid == 0)
919                 goto ret;
920
921         adapter->vlan_tag[vid] = 1;
922         if (adapter->vlans_added <= (adapter->max_vlans + 1))
923                 status = be_vid_config(adapter);
924
925         if (!status)
926                 adapter->vlans_added++;
927         else
928                 adapter->vlan_tag[vid] = 0;
929 ret:
930         return status;
931 }
932
933 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
934 {
935         struct be_adapter *adapter = netdev_priv(netdev);
936         int status = 0;
937
938         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
939                 status = -EINVAL;
940                 goto ret;
941         }
942
943         /* Packets with VID 0 are always received by Lancer by default */
944         if (lancer_chip(adapter) && vid == 0)
945                 goto ret;
946
947         adapter->vlan_tag[vid] = 0;
948         if (adapter->vlans_added <= adapter->max_vlans)
949                 status = be_vid_config(adapter);
950
951         if (!status)
952                 adapter->vlans_added--;
953         else
954                 adapter->vlan_tag[vid] = 1;
955 ret:
956         return status;
957 }
958
959 static void be_set_rx_mode(struct net_device *netdev)
960 {
961         struct be_adapter *adapter = netdev_priv(netdev);
962         int status;
963
964         if (netdev->flags & IFF_PROMISC) {
965                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
966                 adapter->promiscuous = true;
967                 goto done;
968         }
969
970         /* BE was previously in promiscuous mode; disable it */
971         if (adapter->promiscuous) {
972                 adapter->promiscuous = false;
973                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
974
975                 if (adapter->vlans_added)
976                         be_vid_config(adapter);
977         }
978
979         /* Enable multicast promisc if num configured exceeds what we support */
980         if (netdev->flags & IFF_ALLMULTI ||
981             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
982                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
983                 goto done;
984         }
985
986         if (netdev_uc_count(netdev) != adapter->uc_macs) {
987                 struct netdev_hw_addr *ha;
988                 int i = 1; /* First slot is claimed by the Primary MAC */
989
990                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
991                         be_cmd_pmac_del(adapter, adapter->if_handle,
992                                         adapter->pmac_id[i], 0);
993                 }
994
995                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
996                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
997                         adapter->promiscuous = true;
998                         goto done;
999                 }
1000
1001                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1002                         adapter->uc_macs++; /* First slot is for Primary MAC */
1003                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1004                                         adapter->if_handle,
1005                                         &adapter->pmac_id[adapter->uc_macs], 0);
1006                 }
1007         }
1008
1009         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1010
1011         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1012         if (status) {
1013                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1014                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1015                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1016         }
1017 done:
1018         return;
1019 }
1020
1021 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1022 {
1023         struct be_adapter *adapter = netdev_priv(netdev);
1024         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1025         int status;
1026         bool active_mac = false;
1027         u32 pmac_id;
1028         u8 old_mac[ETH_ALEN];
1029
1030         if (!sriov_enabled(adapter))
1031                 return -EPERM;
1032
1033         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1034                 return -EINVAL;
1035
1036         if (lancer_chip(adapter)) {
1037                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1038                                                   &pmac_id, vf + 1);
1039                 if (!status && active_mac)
1040                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1041                                         pmac_id, vf + 1);
1042
1043                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1044         } else {
1045                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1046                                          vf_cfg->pmac_id, vf + 1);
1047
1048                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1049                                          &vf_cfg->pmac_id, vf + 1);
1050         }
1051
1052         if (status)
1053                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1054                                 mac, vf);
1055         else
1056                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1057
1058         return status;
1059 }
1060
1061 static int be_get_vf_config(struct net_device *netdev, int vf,
1062                         struct ifla_vf_info *vi)
1063 {
1064         struct be_adapter *adapter = netdev_priv(netdev);
1065         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1066
1067         if (!sriov_enabled(adapter))
1068                 return -EPERM;
1069
1070         if (vf >= adapter->num_vfs)
1071                 return -EINVAL;
1072
1073         vi->vf = vf;
1074         vi->tx_rate = vf_cfg->tx_rate;
1075         vi->vlan = vf_cfg->vlan_tag;
1076         vi->qos = 0;
1077         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1078
1079         return 0;
1080 }
1081
1082 static int be_set_vf_vlan(struct net_device *netdev,
1083                         int vf, u16 vlan, u8 qos)
1084 {
1085         struct be_adapter *adapter = netdev_priv(netdev);
1086         int status = 0;
1087
1088         if (!sriov_enabled(adapter))
1089                 return -EPERM;
1090
1091         if (vf >= adapter->num_vfs || vlan > 4095)
1092                 return -EINVAL;
1093
1094         if (vlan) {
1095                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1096                         /* If this is new value, program it. Else skip. */
1097                         adapter->vf_cfg[vf].vlan_tag = vlan;
1098
1099                         status = be_cmd_set_hsw_config(adapter, vlan,
1100                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1101                 }
1102         } else {
1103                 /* Reset Transparent Vlan Tagging. */
1104                 adapter->vf_cfg[vf].vlan_tag = 0;
1105                 vlan = adapter->vf_cfg[vf].def_vid;
1106                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1107                         adapter->vf_cfg[vf].if_handle);
1108         }
1109
1110
1111         if (status)
1112                 dev_info(&adapter->pdev->dev,
1113                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1114         return status;
1115 }
1116
1117 static int be_set_vf_tx_rate(struct net_device *netdev,
1118                         int vf, int rate)
1119 {
1120         struct be_adapter *adapter = netdev_priv(netdev);
1121         int status = 0;
1122
1123         if (!sriov_enabled(adapter))
1124                 return -EPERM;
1125
1126         if (vf >= adapter->num_vfs)
1127                 return -EINVAL;
1128
1129         if (rate < 100 || rate > 10000) {
1130                 dev_err(&adapter->pdev->dev,
1131                         "tx rate must be between 100 and 10000 Mbps\n");
1132                 return -EINVAL;
1133         }
1134
1135         if (lancer_chip(adapter))
1136                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1137         else
1138                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1139
1140         if (status)
1141                 dev_err(&adapter->pdev->dev,
1142                                 "tx rate %d on VF %d failed\n", rate, vf);
1143         else
1144                 adapter->vf_cfg[vf].tx_rate = rate;
1145         return status;
1146 }
1147
1148 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1149 {
1150         struct pci_dev *dev, *pdev = adapter->pdev;
1151         int vfs = 0, assigned_vfs = 0, pos;
1152         u16 offset, stride;
1153
1154         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1155         if (!pos)
1156                 return 0;
1157         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1158         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1159
1160         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1161         while (dev) {
1162                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1163                         vfs++;
1164                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1165                                 assigned_vfs++;
1166                 }
1167                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1168         }
1169         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1170 }
1171
1172 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1173 {
1174         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1175         ulong now = jiffies;
1176         ulong delta = now - stats->rx_jiffies;
1177         u64 pkts;
1178         unsigned int start, eqd;
1179
1180         if (!eqo->enable_aic) {
1181                 eqd = eqo->eqd;
1182                 goto modify_eqd;
1183         }
1184
1185         if (eqo->idx >= adapter->num_rx_qs)
1186                 return;
1187
1188         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1189
1190         /* Wrapped around */
1191         if (time_before(now, stats->rx_jiffies)) {
1192                 stats->rx_jiffies = now;
1193                 return;
1194         }
1195
1196         /* Update once a second */
1197         if (delta < HZ)
1198                 return;
1199
1200         do {
1201                 start = u64_stats_fetch_begin_bh(&stats->sync);
1202                 pkts = stats->rx_pkts;
1203         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1204
1205         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1206         stats->rx_pkts_prev = pkts;
1207         stats->rx_jiffies = now;
1208         eqd = (stats->rx_pps / 110000) << 3;
1209         eqd = min(eqd, eqo->max_eqd);
1210         eqd = max(eqd, eqo->min_eqd);
1211         if (eqd < 10)
1212                 eqd = 0;
1213
1214 modify_eqd:
1215         if (eqd != eqo->cur_eqd) {
1216                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1217                 eqo->cur_eqd = eqd;
1218         }
1219 }
1220
1221 static void be_rx_stats_update(struct be_rx_obj *rxo,
1222                 struct be_rx_compl_info *rxcp)
1223 {
1224         struct be_rx_stats *stats = rx_stats(rxo);
1225
1226         u64_stats_update_begin(&stats->sync);
1227         stats->rx_compl++;
1228         stats->rx_bytes += rxcp->pkt_size;
1229         stats->rx_pkts++;
1230         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1231                 stats->rx_mcast_pkts++;
1232         if (rxcp->err)
1233                 stats->rx_compl_err++;
1234         u64_stats_update_end(&stats->sync);
1235 }
1236
1237 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1238 {
1239         /* L4 checksum is not reliable for non TCP/UDP packets.
1240          * Also ignore ipcksm for ipv6 pkts */
1241         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1242                                 (rxcp->ip_csum || rxcp->ipv6);
1243 }
1244
1245 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1246                                                 u16 frag_idx)
1247 {
1248         struct be_adapter *adapter = rxo->adapter;
1249         struct be_rx_page_info *rx_page_info;
1250         struct be_queue_info *rxq = &rxo->q;
1251
1252         rx_page_info = &rxo->page_info_tbl[frag_idx];
1253         BUG_ON(!rx_page_info->page);
1254
1255         if (rx_page_info->last_page_user) {
1256                 dma_unmap_page(&adapter->pdev->dev,
1257                                dma_unmap_addr(rx_page_info, bus),
1258                                adapter->big_page_size, DMA_FROM_DEVICE);
1259                 rx_page_info->last_page_user = false;
1260         }
1261
1262         atomic_dec(&rxq->used);
1263         return rx_page_info;
1264 }
1265
1266 /* Throwaway the data in the Rx completion */
1267 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1268                                 struct be_rx_compl_info *rxcp)
1269 {
1270         struct be_queue_info *rxq = &rxo->q;
1271         struct be_rx_page_info *page_info;
1272         u16 i, num_rcvd = rxcp->num_rcvd;
1273
1274         for (i = 0; i < num_rcvd; i++) {
1275                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1276                 put_page(page_info->page);
1277                 memset(page_info, 0, sizeof(*page_info));
1278                 index_inc(&rxcp->rxq_idx, rxq->len);
1279         }
1280 }
1281
1282 /*
1283  * skb_fill_rx_data forms a complete skb for an ether frame
1284  * indicated by rxcp.
1285  */
1286 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1287                              struct be_rx_compl_info *rxcp)
1288 {
1289         struct be_queue_info *rxq = &rxo->q;
1290         struct be_rx_page_info *page_info;
1291         u16 i, j;
1292         u16 hdr_len, curr_frag_len, remaining;
1293         u8 *start;
1294
1295         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1296         start = page_address(page_info->page) + page_info->page_offset;
1297         prefetch(start);
1298
1299         /* Copy data in the first descriptor of this completion */
1300         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1301
1302         skb->len = curr_frag_len;
1303         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1304                 memcpy(skb->data, start, curr_frag_len);
1305                 /* Complete packet has now been moved to data */
1306                 put_page(page_info->page);
1307                 skb->data_len = 0;
1308                 skb->tail += curr_frag_len;
1309         } else {
1310                 hdr_len = ETH_HLEN;
1311                 memcpy(skb->data, start, hdr_len);
1312                 skb_shinfo(skb)->nr_frags = 1;
1313                 skb_frag_set_page(skb, 0, page_info->page);
1314                 skb_shinfo(skb)->frags[0].page_offset =
1315                                         page_info->page_offset + hdr_len;
1316                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1317                 skb->data_len = curr_frag_len - hdr_len;
1318                 skb->truesize += rx_frag_size;
1319                 skb->tail += hdr_len;
1320         }
1321         page_info->page = NULL;
1322
1323         if (rxcp->pkt_size <= rx_frag_size) {
1324                 BUG_ON(rxcp->num_rcvd != 1);
1325                 return;
1326         }
1327
1328         /* More frags present for this completion */
1329         index_inc(&rxcp->rxq_idx, rxq->len);
1330         remaining = rxcp->pkt_size - curr_frag_len;
1331         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1332                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1333                 curr_frag_len = min(remaining, rx_frag_size);
1334
1335                 /* Coalesce all frags from the same physical page in one slot */
1336                 if (page_info->page_offset == 0) {
1337                         /* Fresh page */
1338                         j++;
1339                         skb_frag_set_page(skb, j, page_info->page);
1340                         skb_shinfo(skb)->frags[j].page_offset =
1341                                                         page_info->page_offset;
1342                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1343                         skb_shinfo(skb)->nr_frags++;
1344                 } else {
1345                         put_page(page_info->page);
1346                 }
1347
1348                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1349                 skb->len += curr_frag_len;
1350                 skb->data_len += curr_frag_len;
1351                 skb->truesize += rx_frag_size;
1352                 remaining -= curr_frag_len;
1353                 index_inc(&rxcp->rxq_idx, rxq->len);
1354                 page_info->page = NULL;
1355         }
1356         BUG_ON(j > MAX_SKB_FRAGS);
1357 }
1358
1359 /* Process the RX completion indicated by rxcp when GRO is disabled */
1360 static void be_rx_compl_process(struct be_rx_obj *rxo,
1361                                 struct be_rx_compl_info *rxcp)
1362 {
1363         struct be_adapter *adapter = rxo->adapter;
1364         struct net_device *netdev = adapter->netdev;
1365         struct sk_buff *skb;
1366
1367         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1368         if (unlikely(!skb)) {
1369                 rx_stats(rxo)->rx_drops_no_skbs++;
1370                 be_rx_compl_discard(rxo, rxcp);
1371                 return;
1372         }
1373
1374         skb_fill_rx_data(rxo, skb, rxcp);
1375
1376         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1377                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1378         else
1379                 skb_checksum_none_assert(skb);
1380
1381         skb->protocol = eth_type_trans(skb, netdev);
1382         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1383         if (netdev->features & NETIF_F_RXHASH)
1384                 skb->rxhash = rxcp->rss_hash;
1385
1386
1387         if (rxcp->vlanf)
1388                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1389
1390         netif_receive_skb(skb);
1391 }
1392
1393 /* Process the RX completion indicated by rxcp when GRO is enabled */
1394 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1395                              struct be_rx_compl_info *rxcp)
1396 {
1397         struct be_adapter *adapter = rxo->adapter;
1398         struct be_rx_page_info *page_info;
1399         struct sk_buff *skb = NULL;
1400         struct be_queue_info *rxq = &rxo->q;
1401         u16 remaining, curr_frag_len;
1402         u16 i, j;
1403
1404         skb = napi_get_frags(napi);
1405         if (!skb) {
1406                 be_rx_compl_discard(rxo, rxcp);
1407                 return;
1408         }
1409
1410         remaining = rxcp->pkt_size;
1411         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1412                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1413
1414                 curr_frag_len = min(remaining, rx_frag_size);
1415
1416                 /* Coalesce all frags from the same physical page in one slot */
1417                 if (i == 0 || page_info->page_offset == 0) {
1418                         /* First frag or Fresh page */
1419                         j++;
1420                         skb_frag_set_page(skb, j, page_info->page);
1421                         skb_shinfo(skb)->frags[j].page_offset =
1422                                                         page_info->page_offset;
1423                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1424                 } else {
1425                         put_page(page_info->page);
1426                 }
1427                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1428                 skb->truesize += rx_frag_size;
1429                 remaining -= curr_frag_len;
1430                 index_inc(&rxcp->rxq_idx, rxq->len);
1431                 memset(page_info, 0, sizeof(*page_info));
1432         }
1433         BUG_ON(j > MAX_SKB_FRAGS);
1434
1435         skb_shinfo(skb)->nr_frags = j + 1;
1436         skb->len = rxcp->pkt_size;
1437         skb->data_len = rxcp->pkt_size;
1438         skb->ip_summed = CHECKSUM_UNNECESSARY;
1439         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1440         if (adapter->netdev->features & NETIF_F_RXHASH)
1441                 skb->rxhash = rxcp->rss_hash;
1442
1443         if (rxcp->vlanf)
1444                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1445
1446         napi_gro_frags(napi);
1447 }
1448
1449 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1450                                  struct be_rx_compl_info *rxcp)
1451 {
1452         rxcp->pkt_size =
1453                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1454         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1455         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1456         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1457         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1458         rxcp->ip_csum =
1459                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1460         rxcp->l4_csum =
1461                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1462         rxcp->ipv6 =
1463                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1464         rxcp->rxq_idx =
1465                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1466         rxcp->num_rcvd =
1467                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1468         rxcp->pkt_type =
1469                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1470         rxcp->rss_hash =
1471                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1472         if (rxcp->vlanf) {
1473                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1474                                           compl);
1475                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1476                                                compl);
1477         }
1478         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1479 }
1480
1481 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1482                                  struct be_rx_compl_info *rxcp)
1483 {
1484         rxcp->pkt_size =
1485                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1486         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1487         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1488         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1489         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1490         rxcp->ip_csum =
1491                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1492         rxcp->l4_csum =
1493                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1494         rxcp->ipv6 =
1495                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1496         rxcp->rxq_idx =
1497                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1498         rxcp->num_rcvd =
1499                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1500         rxcp->pkt_type =
1501                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1502         rxcp->rss_hash =
1503                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1504         if (rxcp->vlanf) {
1505                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1506                                           compl);
1507                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1508                                                compl);
1509         }
1510         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1511 }
1512
1513 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1514 {
1515         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1516         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1517         struct be_adapter *adapter = rxo->adapter;
1518
1519         /* For checking the valid bit it is Ok to use either definition as the
1520          * valid bit is at the same position in both v0 and v1 Rx compl */
1521         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1522                 return NULL;
1523
1524         rmb();
1525         be_dws_le_to_cpu(compl, sizeof(*compl));
1526
1527         if (adapter->be3_native)
1528                 be_parse_rx_compl_v1(compl, rxcp);
1529         else
1530                 be_parse_rx_compl_v0(compl, rxcp);
1531
1532         if (rxcp->vlanf) {
1533                 /* vlanf could be wrongly set in some cards.
1534                  * ignore if vtm is not set */
1535                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1536                         rxcp->vlanf = 0;
1537
1538                 if (!lancer_chip(adapter))
1539                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1540
1541                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1542                     !adapter->vlan_tag[rxcp->vlan_tag])
1543                         rxcp->vlanf = 0;
1544         }
1545
1546         /* As the compl has been parsed, reset it; we wont touch it again */
1547         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1548
1549         queue_tail_inc(&rxo->cq);
1550         return rxcp;
1551 }
1552
1553 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1554 {
1555         u32 order = get_order(size);
1556
1557         if (order > 0)
1558                 gfp |= __GFP_COMP;
1559         return  alloc_pages(gfp, order);
1560 }
1561
1562 /*
1563  * Allocate a page, split it to fragments of size rx_frag_size and post as
1564  * receive buffers to BE
1565  */
1566 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1567 {
1568         struct be_adapter *adapter = rxo->adapter;
1569         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1570         struct be_queue_info *rxq = &rxo->q;
1571         struct page *pagep = NULL;
1572         struct be_eth_rx_d *rxd;
1573         u64 page_dmaaddr = 0, frag_dmaaddr;
1574         u32 posted, page_offset = 0;
1575
1576         page_info = &rxo->page_info_tbl[rxq->head];
1577         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1578                 if (!pagep) {
1579                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1580                         if (unlikely(!pagep)) {
1581                                 rx_stats(rxo)->rx_post_fail++;
1582                                 break;
1583                         }
1584                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1585                                                     0, adapter->big_page_size,
1586                                                     DMA_FROM_DEVICE);
1587                         page_info->page_offset = 0;
1588                 } else {
1589                         get_page(pagep);
1590                         page_info->page_offset = page_offset + rx_frag_size;
1591                 }
1592                 page_offset = page_info->page_offset;
1593                 page_info->page = pagep;
1594                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1595                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1596
1597                 rxd = queue_head_node(rxq);
1598                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1599                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1600
1601                 /* Any space left in the current big page for another frag? */
1602                 if ((page_offset + rx_frag_size + rx_frag_size) >
1603                                         adapter->big_page_size) {
1604                         pagep = NULL;
1605                         page_info->last_page_user = true;
1606                 }
1607
1608                 prev_page_info = page_info;
1609                 queue_head_inc(rxq);
1610                 page_info = &rxo->page_info_tbl[rxq->head];
1611         }
1612         if (pagep)
1613                 prev_page_info->last_page_user = true;
1614
1615         if (posted) {
1616                 atomic_add(posted, &rxq->used);
1617                 be_rxq_notify(adapter, rxq->id, posted);
1618         } else if (atomic_read(&rxq->used) == 0) {
1619                 /* Let be_worker replenish when memory is available */
1620                 rxo->rx_post_starved = true;
1621         }
1622 }
1623
1624 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1625 {
1626         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1627
1628         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1629                 return NULL;
1630
1631         rmb();
1632         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1633
1634         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1635
1636         queue_tail_inc(tx_cq);
1637         return txcp;
1638 }
1639
1640 static u16 be_tx_compl_process(struct be_adapter *adapter,
1641                 struct be_tx_obj *txo, u16 last_index)
1642 {
1643         struct be_queue_info *txq = &txo->q;
1644         struct be_eth_wrb *wrb;
1645         struct sk_buff **sent_skbs = txo->sent_skb_list;
1646         struct sk_buff *sent_skb;
1647         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1648         bool unmap_skb_hdr = true;
1649
1650         sent_skb = sent_skbs[txq->tail];
1651         BUG_ON(!sent_skb);
1652         sent_skbs[txq->tail] = NULL;
1653
1654         /* skip header wrb */
1655         queue_tail_inc(txq);
1656
1657         do {
1658                 cur_index = txq->tail;
1659                 wrb = queue_tail_node(txq);
1660                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1661                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1662                 unmap_skb_hdr = false;
1663
1664                 num_wrbs++;
1665                 queue_tail_inc(txq);
1666         } while (cur_index != last_index);
1667
1668         kfree_skb(sent_skb);
1669         return num_wrbs;
1670 }
1671
1672 /* Return the number of events in the event queue */
1673 static inline int events_get(struct be_eq_obj *eqo)
1674 {
1675         struct be_eq_entry *eqe;
1676         int num = 0;
1677
1678         do {
1679                 eqe = queue_tail_node(&eqo->q);
1680                 if (eqe->evt == 0)
1681                         break;
1682
1683                 rmb();
1684                 eqe->evt = 0;
1685                 num++;
1686                 queue_tail_inc(&eqo->q);
1687         } while (true);
1688
1689         return num;
1690 }
1691
1692 /* Leaves the EQ is disarmed state */
1693 static void be_eq_clean(struct be_eq_obj *eqo)
1694 {
1695         int num = events_get(eqo);
1696
1697         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1698 }
1699
1700 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1701 {
1702         struct be_rx_page_info *page_info;
1703         struct be_queue_info *rxq = &rxo->q;
1704         struct be_queue_info *rx_cq = &rxo->cq;
1705         struct be_rx_compl_info *rxcp;
1706         struct be_adapter *adapter = rxo->adapter;
1707         int flush_wait = 0;
1708         u16 tail;
1709
1710         /* Consume pending rx completions.
1711          * Wait for the flush completion (identified by zero num_rcvd)
1712          * to arrive. Notify CQ even when there are no more CQ entries
1713          * for HW to flush partially coalesced CQ entries.
1714          * In Lancer, there is no need to wait for flush compl.
1715          */
1716         for (;;) {
1717                 rxcp = be_rx_compl_get(rxo);
1718                 if (rxcp == NULL) {
1719                         if (lancer_chip(adapter))
1720                                 break;
1721
1722                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1723                                 dev_warn(&adapter->pdev->dev,
1724                                          "did not receive flush compl\n");
1725                                 break;
1726                         }
1727                         be_cq_notify(adapter, rx_cq->id, true, 0);
1728                         mdelay(1);
1729                 } else {
1730                         be_rx_compl_discard(rxo, rxcp);
1731                         be_cq_notify(adapter, rx_cq->id, true, 1);
1732                         if (rxcp->num_rcvd == 0)
1733                                 break;
1734                 }
1735         }
1736
1737         /* After cleanup, leave the CQ in unarmed state */
1738         be_cq_notify(adapter, rx_cq->id, false, 0);
1739
1740         /* Then free posted rx buffers that were not used */
1741         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1742         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1743                 page_info = get_rx_page_info(rxo, tail);
1744                 put_page(page_info->page);
1745                 memset(page_info, 0, sizeof(*page_info));
1746         }
1747         BUG_ON(atomic_read(&rxq->used));
1748         rxq->tail = rxq->head = 0;
1749 }
1750
1751 static void be_tx_compl_clean(struct be_adapter *adapter)
1752 {
1753         struct be_tx_obj *txo;
1754         struct be_queue_info *txq;
1755         struct be_eth_tx_compl *txcp;
1756         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1757         struct sk_buff *sent_skb;
1758         bool dummy_wrb;
1759         int i, pending_txqs;
1760
1761         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1762         do {
1763                 pending_txqs = adapter->num_tx_qs;
1764
1765                 for_all_tx_queues(adapter, txo, i) {
1766                         txq = &txo->q;
1767                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1768                                 end_idx =
1769                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1770                                                       wrb_index, txcp);
1771                                 num_wrbs += be_tx_compl_process(adapter, txo,
1772                                                                 end_idx);
1773                                 cmpl++;
1774                         }
1775                         if (cmpl) {
1776                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1777                                 atomic_sub(num_wrbs, &txq->used);
1778                                 cmpl = 0;
1779                                 num_wrbs = 0;
1780                         }
1781                         if (atomic_read(&txq->used) == 0)
1782                                 pending_txqs--;
1783                 }
1784
1785                 if (pending_txqs == 0 || ++timeo > 200)
1786                         break;
1787
1788                 mdelay(1);
1789         } while (true);
1790
1791         for_all_tx_queues(adapter, txo, i) {
1792                 txq = &txo->q;
1793                 if (atomic_read(&txq->used))
1794                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1795                                 atomic_read(&txq->used));
1796
1797                 /* free posted tx for which compls will never arrive */
1798                 while (atomic_read(&txq->used)) {
1799                         sent_skb = txo->sent_skb_list[txq->tail];
1800                         end_idx = txq->tail;
1801                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1802                                                    &dummy_wrb);
1803                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1804                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1805                         atomic_sub(num_wrbs, &txq->used);
1806                 }
1807         }
1808 }
1809
1810 static void be_evt_queues_destroy(struct be_adapter *adapter)
1811 {
1812         struct be_eq_obj *eqo;
1813         int i;
1814
1815         for_all_evt_queues(adapter, eqo, i) {
1816                 if (eqo->q.created) {
1817                         be_eq_clean(eqo);
1818                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1819                 }
1820                 be_queue_free(adapter, &eqo->q);
1821         }
1822 }
1823
1824 static int be_evt_queues_create(struct be_adapter *adapter)
1825 {
1826         struct be_queue_info *eq;
1827         struct be_eq_obj *eqo;
1828         int i, rc;
1829
1830         adapter->num_evt_qs = num_irqs(adapter);
1831
1832         for_all_evt_queues(adapter, eqo, i) {
1833                 eqo->adapter = adapter;
1834                 eqo->tx_budget = BE_TX_BUDGET;
1835                 eqo->idx = i;
1836                 eqo->max_eqd = BE_MAX_EQD;
1837                 eqo->enable_aic = true;
1838
1839                 eq = &eqo->q;
1840                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1841                                         sizeof(struct be_eq_entry));
1842                 if (rc)
1843                         return rc;
1844
1845                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1846                 if (rc)
1847                         return rc;
1848         }
1849         return 0;
1850 }
1851
1852 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1853 {
1854         struct be_queue_info *q;
1855
1856         q = &adapter->mcc_obj.q;
1857         if (q->created)
1858                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1859         be_queue_free(adapter, q);
1860
1861         q = &adapter->mcc_obj.cq;
1862         if (q->created)
1863                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1864         be_queue_free(adapter, q);
1865 }
1866
1867 /* Must be called only after TX qs are created as MCC shares TX EQ */
1868 static int be_mcc_queues_create(struct be_adapter *adapter)
1869 {
1870         struct be_queue_info *q, *cq;
1871
1872         cq = &adapter->mcc_obj.cq;
1873         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1874                         sizeof(struct be_mcc_compl)))
1875                 goto err;
1876
1877         /* Use the default EQ for MCC completions */
1878         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1879                 goto mcc_cq_free;
1880
1881         q = &adapter->mcc_obj.q;
1882         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1883                 goto mcc_cq_destroy;
1884
1885         if (be_cmd_mccq_create(adapter, q, cq))
1886                 goto mcc_q_free;
1887
1888         return 0;
1889
1890 mcc_q_free:
1891         be_queue_free(adapter, q);
1892 mcc_cq_destroy:
1893         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1894 mcc_cq_free:
1895         be_queue_free(adapter, cq);
1896 err:
1897         return -1;
1898 }
1899
1900 static void be_tx_queues_destroy(struct be_adapter *adapter)
1901 {
1902         struct be_queue_info *q;
1903         struct be_tx_obj *txo;
1904         u8 i;
1905
1906         for_all_tx_queues(adapter, txo, i) {
1907                 q = &txo->q;
1908                 if (q->created)
1909                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1910                 be_queue_free(adapter, q);
1911
1912                 q = &txo->cq;
1913                 if (q->created)
1914                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1915                 be_queue_free(adapter, q);
1916         }
1917 }
1918
1919 static int be_num_txqs_want(struct be_adapter *adapter)
1920 {
1921         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1922             be_is_mc(adapter) ||
1923             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
1924             BE2_chip(adapter))
1925                 return 1;
1926         else
1927                 return adapter->max_tx_queues;
1928 }
1929
1930 static int be_tx_cqs_create(struct be_adapter *adapter)
1931 {
1932         struct be_queue_info *cq, *eq;
1933         int status;
1934         struct be_tx_obj *txo;
1935         u8 i;
1936
1937         adapter->num_tx_qs = be_num_txqs_want(adapter);
1938         if (adapter->num_tx_qs != MAX_TX_QS) {
1939                 rtnl_lock();
1940                 netif_set_real_num_tx_queues(adapter->netdev,
1941                         adapter->num_tx_qs);
1942                 rtnl_unlock();
1943         }
1944
1945         for_all_tx_queues(adapter, txo, i) {
1946                 cq = &txo->cq;
1947                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1948                                         sizeof(struct be_eth_tx_compl));
1949                 if (status)
1950                         return status;
1951
1952                 /* If num_evt_qs is less than num_tx_qs, then more than
1953                  * one txq share an eq
1954                  */
1955                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1956                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1957                 if (status)
1958                         return status;
1959         }
1960         return 0;
1961 }
1962
1963 static int be_tx_qs_create(struct be_adapter *adapter)
1964 {
1965         struct be_tx_obj *txo;
1966         int i, status;
1967
1968         for_all_tx_queues(adapter, txo, i) {
1969                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1970                                         sizeof(struct be_eth_wrb));
1971                 if (status)
1972                         return status;
1973
1974                 status = be_cmd_txq_create(adapter, txo);
1975                 if (status)
1976                         return status;
1977         }
1978
1979         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1980                  adapter->num_tx_qs);
1981         return 0;
1982 }
1983
1984 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1985 {
1986         struct be_queue_info *q;
1987         struct be_rx_obj *rxo;
1988         int i;
1989
1990         for_all_rx_queues(adapter, rxo, i) {
1991                 q = &rxo->cq;
1992                 if (q->created)
1993                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1994                 be_queue_free(adapter, q);
1995         }
1996 }
1997
1998 static int be_rx_cqs_create(struct be_adapter *adapter)
1999 {
2000         struct be_queue_info *eq, *cq;
2001         struct be_rx_obj *rxo;
2002         int rc, i;
2003
2004         /* We'll create as many RSS rings as there are irqs.
2005          * But when there's only one irq there's no use creating RSS rings
2006          */
2007         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2008                                 num_irqs(adapter) + 1 : 1;
2009         if (adapter->num_rx_qs != MAX_RX_QS) {
2010                 rtnl_lock();
2011                 netif_set_real_num_rx_queues(adapter->netdev,
2012                                              adapter->num_rx_qs);
2013                 rtnl_unlock();
2014         }
2015
2016         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2017         for_all_rx_queues(adapter, rxo, i) {
2018                 rxo->adapter = adapter;
2019                 cq = &rxo->cq;
2020                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2021                                 sizeof(struct be_eth_rx_compl));
2022                 if (rc)
2023                         return rc;
2024
2025                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2026                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2027                 if (rc)
2028                         return rc;
2029         }
2030
2031         dev_info(&adapter->pdev->dev,
2032                  "created %d RSS queue(s) and 1 default RX queue\n",
2033                  adapter->num_rx_qs - 1);
2034         return 0;
2035 }
2036
2037 static irqreturn_t be_intx(int irq, void *dev)
2038 {
2039         struct be_eq_obj *eqo = dev;
2040         struct be_adapter *adapter = eqo->adapter;
2041         int num_evts = 0;
2042
2043         /* IRQ is not expected when NAPI is scheduled as the EQ
2044          * will not be armed.
2045          * But, this can happen on Lancer INTx where it takes
2046          * a while to de-assert INTx or in BE2 where occasionaly
2047          * an interrupt may be raised even when EQ is unarmed.
2048          * If NAPI is already scheduled, then counting & notifying
2049          * events will orphan them.
2050          */
2051         if (napi_schedule_prep(&eqo->napi)) {
2052                 num_evts = events_get(eqo);
2053                 __napi_schedule(&eqo->napi);
2054                 if (num_evts)
2055                         eqo->spurious_intr = 0;
2056         }
2057         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2058
2059         /* Return IRQ_HANDLED only for the the first spurious intr
2060          * after a valid intr to stop the kernel from branding
2061          * this irq as a bad one!
2062          */
2063         if (num_evts || eqo->spurious_intr++ == 0)
2064                 return IRQ_HANDLED;
2065         else
2066                 return IRQ_NONE;
2067 }
2068
2069 static irqreturn_t be_msix(int irq, void *dev)
2070 {
2071         struct be_eq_obj *eqo = dev;
2072
2073         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2074         napi_schedule(&eqo->napi);
2075         return IRQ_HANDLED;
2076 }
2077
2078 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2079 {
2080         return (rxcp->tcpf && !rxcp->err) ? true : false;
2081 }
2082
2083 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2084                         int budget)
2085 {
2086         struct be_adapter *adapter = rxo->adapter;
2087         struct be_queue_info *rx_cq = &rxo->cq;
2088         struct be_rx_compl_info *rxcp;
2089         u32 work_done;
2090
2091         for (work_done = 0; work_done < budget; work_done++) {
2092                 rxcp = be_rx_compl_get(rxo);
2093                 if (!rxcp)
2094                         break;
2095
2096                 /* Is it a flush compl that has no data */
2097                 if (unlikely(rxcp->num_rcvd == 0))
2098                         goto loop_continue;
2099
2100                 /* Discard compl with partial DMA Lancer B0 */
2101                 if (unlikely(!rxcp->pkt_size)) {
2102                         be_rx_compl_discard(rxo, rxcp);
2103                         goto loop_continue;
2104                 }
2105
2106                 /* On BE drop pkts that arrive due to imperfect filtering in
2107                  * promiscuous mode on some skews
2108                  */
2109                 if (unlikely(rxcp->port != adapter->port_num &&
2110                                 !lancer_chip(adapter))) {
2111                         be_rx_compl_discard(rxo, rxcp);
2112                         goto loop_continue;
2113                 }
2114
2115                 if (do_gro(rxcp))
2116                         be_rx_compl_process_gro(rxo, napi, rxcp);
2117                 else
2118                         be_rx_compl_process(rxo, rxcp);
2119 loop_continue:
2120                 be_rx_stats_update(rxo, rxcp);
2121         }
2122
2123         if (work_done) {
2124                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2125
2126                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2127                         be_post_rx_frags(rxo, GFP_ATOMIC);
2128         }
2129
2130         return work_done;
2131 }
2132
2133 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2134                           int budget, int idx)
2135 {
2136         struct be_eth_tx_compl *txcp;
2137         int num_wrbs = 0, work_done;
2138
2139         for (work_done = 0; work_done < budget; work_done++) {
2140                 txcp = be_tx_compl_get(&txo->cq);
2141                 if (!txcp)
2142                         break;
2143                 num_wrbs += be_tx_compl_process(adapter, txo,
2144                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2145                                         wrb_index, txcp));
2146         }
2147
2148         if (work_done) {
2149                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2150                 atomic_sub(num_wrbs, &txo->q.used);
2151
2152                 /* As Tx wrbs have been freed up, wake up netdev queue
2153                  * if it was stopped due to lack of tx wrbs.  */
2154                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2155                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2156                         netif_wake_subqueue(adapter->netdev, idx);
2157                 }
2158
2159                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2160                 tx_stats(txo)->tx_compl += work_done;
2161                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2162         }
2163         return (work_done < budget); /* Done */
2164 }
2165
2166 int be_poll(struct napi_struct *napi, int budget)
2167 {
2168         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2169         struct be_adapter *adapter = eqo->adapter;
2170         int max_work = 0, work, i, num_evts;
2171         bool tx_done;
2172
2173         num_evts = events_get(eqo);
2174
2175         /* Process all TXQs serviced by this EQ */
2176         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2177                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2178                                         eqo->tx_budget, i);
2179                 if (!tx_done)
2180                         max_work = budget;
2181         }
2182
2183         /* This loop will iterate twice for EQ0 in which
2184          * completions of the last RXQ (default one) are also processed
2185          * For other EQs the loop iterates only once
2186          */
2187         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2188                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2189                 max_work = max(work, max_work);
2190         }
2191
2192         if (is_mcc_eqo(eqo))
2193                 be_process_mcc(adapter);
2194
2195         if (max_work < budget) {
2196                 napi_complete(napi);
2197                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2198         } else {
2199                 /* As we'll continue in polling mode, count and clear events */
2200                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2201         }
2202         return max_work;
2203 }
2204
2205 void be_detect_error(struct be_adapter *adapter)
2206 {
2207         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2208         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2209         u32 i;
2210
2211         if (be_hw_error(adapter))
2212                 return;
2213
2214         if (lancer_chip(adapter)) {
2215                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2216                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2217                         sliport_err1 = ioread32(adapter->db +
2218                                         SLIPORT_ERROR1_OFFSET);
2219                         sliport_err2 = ioread32(adapter->db +
2220                                         SLIPORT_ERROR2_OFFSET);
2221                 }
2222         } else {
2223                 pci_read_config_dword(adapter->pdev,
2224                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2225                 pci_read_config_dword(adapter->pdev,
2226                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2227                 pci_read_config_dword(adapter->pdev,
2228                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2229                 pci_read_config_dword(adapter->pdev,
2230                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2231
2232                 ue_lo = (ue_lo & ~ue_lo_mask);
2233                 ue_hi = (ue_hi & ~ue_hi_mask);
2234         }
2235
2236         /* On certain platforms BE hardware can indicate spurious UEs.
2237          * Allow the h/w to stop working completely in case of a real UE.
2238          * Hence not setting the hw_error for UE detection.
2239          */
2240         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2241                 adapter->hw_error = true;
2242                 dev_err(&adapter->pdev->dev,
2243                         "Error detected in the card\n");
2244         }
2245
2246         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2247                 dev_err(&adapter->pdev->dev,
2248                         "ERR: sliport status 0x%x\n", sliport_status);
2249                 dev_err(&adapter->pdev->dev,
2250                         "ERR: sliport error1 0x%x\n", sliport_err1);
2251                 dev_err(&adapter->pdev->dev,
2252                         "ERR: sliport error2 0x%x\n", sliport_err2);
2253         }
2254
2255         if (ue_lo) {
2256                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2257                         if (ue_lo & 1)
2258                                 dev_err(&adapter->pdev->dev,
2259                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2260                 }
2261         }
2262
2263         if (ue_hi) {
2264                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2265                         if (ue_hi & 1)
2266                                 dev_err(&adapter->pdev->dev,
2267                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2268                 }
2269         }
2270
2271 }
2272
2273 static void be_msix_disable(struct be_adapter *adapter)
2274 {
2275         if (msix_enabled(adapter)) {
2276                 pci_disable_msix(adapter->pdev);
2277                 adapter->num_msix_vec = 0;
2278         }
2279 }
2280
2281 static uint be_num_rss_want(struct be_adapter *adapter)
2282 {
2283         u32 num = 0;
2284
2285         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2286             (lancer_chip(adapter) ||
2287              (!sriov_want(adapter) && be_physfn(adapter)))) {
2288                 num = adapter->max_rss_queues;
2289                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2290         }
2291         return num;
2292 }
2293
2294 static void be_msix_enable(struct be_adapter *adapter)
2295 {
2296 #define BE_MIN_MSIX_VECTORS             1
2297         int i, status, num_vec, num_roce_vec = 0;
2298         struct device *dev = &adapter->pdev->dev;
2299
2300         /* If RSS queues are not used, need a vec for default RX Q */
2301         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2302         if (be_roce_supported(adapter)) {
2303                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2304                                         (num_online_cpus() + 1));
2305                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2306                 num_vec += num_roce_vec;
2307                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2308         }
2309         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2310
2311         for (i = 0; i < num_vec; i++)
2312                 adapter->msix_entries[i].entry = i;
2313
2314         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2315         if (status == 0) {
2316                 goto done;
2317         } else if (status >= BE_MIN_MSIX_VECTORS) {
2318                 num_vec = status;
2319                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2320                                 num_vec) == 0)
2321                         goto done;
2322         }
2323
2324         dev_warn(dev, "MSIx enable failed\n");
2325         return;
2326 done:
2327         if (be_roce_supported(adapter)) {
2328                 if (num_vec > num_roce_vec) {
2329                         adapter->num_msix_vec = num_vec - num_roce_vec;
2330                         adapter->num_msix_roce_vec =
2331                                 num_vec - adapter->num_msix_vec;
2332                 } else {
2333                         adapter->num_msix_vec = num_vec;
2334                         adapter->num_msix_roce_vec = 0;
2335                 }
2336         } else
2337                 adapter->num_msix_vec = num_vec;
2338         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2339         return;
2340 }
2341
2342 static inline int be_msix_vec_get(struct be_adapter *adapter,
2343                                 struct be_eq_obj *eqo)
2344 {
2345         return adapter->msix_entries[eqo->idx].vector;
2346 }
2347
2348 static int be_msix_register(struct be_adapter *adapter)
2349 {
2350         struct net_device *netdev = adapter->netdev;
2351         struct be_eq_obj *eqo;
2352         int status, i, vec;
2353
2354         for_all_evt_queues(adapter, eqo, i) {
2355                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2356                 vec = be_msix_vec_get(adapter, eqo);
2357                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2358                 if (status)
2359                         goto err_msix;
2360         }
2361
2362         return 0;
2363 err_msix:
2364         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2365                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2366         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2367                 status);
2368         be_msix_disable(adapter);
2369         return status;
2370 }
2371
2372 static int be_irq_register(struct be_adapter *adapter)
2373 {
2374         struct net_device *netdev = adapter->netdev;
2375         int status;
2376
2377         if (msix_enabled(adapter)) {
2378                 status = be_msix_register(adapter);
2379                 if (status == 0)
2380                         goto done;
2381                 /* INTx is not supported for VF */
2382                 if (!be_physfn(adapter))
2383                         return status;
2384         }
2385
2386         /* INTx: only the first EQ is used */
2387         netdev->irq = adapter->pdev->irq;
2388         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2389                              &adapter->eq_obj[0]);
2390         if (status) {
2391                 dev_err(&adapter->pdev->dev,
2392                         "INTx request IRQ failed - err %d\n", status);
2393                 return status;
2394         }
2395 done:
2396         adapter->isr_registered = true;
2397         return 0;
2398 }
2399
2400 static void be_irq_unregister(struct be_adapter *adapter)
2401 {
2402         struct net_device *netdev = adapter->netdev;
2403         struct be_eq_obj *eqo;
2404         int i;
2405
2406         if (!adapter->isr_registered)
2407                 return;
2408
2409         /* INTx */
2410         if (!msix_enabled(adapter)) {
2411                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2412                 goto done;
2413         }
2414
2415         /* MSIx */
2416         for_all_evt_queues(adapter, eqo, i)
2417                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2418
2419 done:
2420         adapter->isr_registered = false;
2421 }
2422
2423 static void be_rx_qs_destroy(struct be_adapter *adapter)
2424 {
2425         struct be_queue_info *q;
2426         struct be_rx_obj *rxo;
2427         int i;
2428
2429         for_all_rx_queues(adapter, rxo, i) {
2430                 q = &rxo->q;
2431                 if (q->created) {
2432                         be_cmd_rxq_destroy(adapter, q);
2433                         /* After the rxq is invalidated, wait for a grace time
2434                          * of 1ms for all dma to end and the flush compl to
2435                          * arrive
2436                          */
2437                         mdelay(1);
2438                         be_rx_cq_clean(rxo);
2439                 }
2440                 be_queue_free(adapter, q);
2441         }
2442 }
2443
2444 static int be_close(struct net_device *netdev)
2445 {
2446         struct be_adapter *adapter = netdev_priv(netdev);
2447         struct be_eq_obj *eqo;
2448         int i;
2449
2450         be_roce_dev_close(adapter);
2451
2452         for_all_evt_queues(adapter, eqo, i)
2453                 napi_disable(&eqo->napi);
2454
2455         be_async_mcc_disable(adapter);
2456
2457         /* Wait for all pending tx completions to arrive so that
2458          * all tx skbs are freed.
2459          */
2460         be_tx_compl_clean(adapter);
2461
2462         be_rx_qs_destroy(adapter);
2463
2464         for_all_evt_queues(adapter, eqo, i) {
2465                 if (msix_enabled(adapter))
2466                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2467                 else
2468                         synchronize_irq(netdev->irq);
2469                 be_eq_clean(eqo);
2470         }
2471
2472         be_irq_unregister(adapter);
2473
2474         return 0;
2475 }
2476
2477 static int be_rx_qs_create(struct be_adapter *adapter)
2478 {
2479         struct be_rx_obj *rxo;
2480         int rc, i, j;
2481         u8 rsstable[128];
2482
2483         for_all_rx_queues(adapter, rxo, i) {
2484                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2485                                     sizeof(struct be_eth_rx_d));
2486                 if (rc)
2487                         return rc;
2488         }
2489
2490         /* The FW would like the default RXQ to be created first */
2491         rxo = default_rxo(adapter);
2492         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2493                                adapter->if_handle, false, &rxo->rss_id);
2494         if (rc)
2495                 return rc;
2496
2497         for_all_rss_queues(adapter, rxo, i) {
2498                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2499                                        rx_frag_size, adapter->if_handle,
2500                                        true, &rxo->rss_id);
2501                 if (rc)
2502                         return rc;
2503         }
2504
2505         if (be_multi_rxq(adapter)) {
2506                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2507                         for_all_rss_queues(adapter, rxo, i) {
2508                                 if ((j + i) >= 128)
2509                                         break;
2510                                 rsstable[j + i] = rxo->rss_id;
2511                         }
2512                 }
2513                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2514                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2515
2516                 if (!BEx_chip(adapter))
2517                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2518                                                 RSS_ENABLE_UDP_IPV6;
2519
2520                 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2521                                        128);
2522                 if (rc) {
2523                         adapter->rss_flags = 0;
2524                         return rc;
2525                 }
2526         }
2527
2528         /* First time posting */
2529         for_all_rx_queues(adapter, rxo, i)
2530                 be_post_rx_frags(rxo, GFP_KERNEL);
2531         return 0;
2532 }
2533
2534 static int be_open(struct net_device *netdev)
2535 {
2536         struct be_adapter *adapter = netdev_priv(netdev);
2537         struct be_eq_obj *eqo;
2538         struct be_rx_obj *rxo;
2539         struct be_tx_obj *txo;
2540         u8 link_status;
2541         int status, i;
2542
2543         status = be_rx_qs_create(adapter);
2544         if (status)
2545                 goto err;
2546
2547         be_irq_register(adapter);
2548
2549         for_all_rx_queues(adapter, rxo, i)
2550                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2551
2552         for_all_tx_queues(adapter, txo, i)
2553                 be_cq_notify(adapter, txo->cq.id, true, 0);
2554
2555         be_async_mcc_enable(adapter);
2556
2557         for_all_evt_queues(adapter, eqo, i) {
2558                 napi_enable(&eqo->napi);
2559                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2560         }
2561
2562         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2563         if (!status)
2564                 be_link_status_update(adapter, link_status);
2565
2566         be_roce_dev_open(adapter);
2567         return 0;
2568 err:
2569         be_close(adapter->netdev);
2570         return -EIO;
2571 }
2572
2573 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2574 {
2575         struct be_dma_mem cmd;
2576         int status = 0;
2577         u8 mac[ETH_ALEN];
2578
2579         memset(mac, 0, ETH_ALEN);
2580
2581         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2582         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2583                                     GFP_KERNEL | __GFP_ZERO);
2584         if (cmd.va == NULL)
2585                 return -1;
2586
2587         if (enable) {
2588                 status = pci_write_config_dword(adapter->pdev,
2589                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2590                 if (status) {
2591                         dev_err(&adapter->pdev->dev,
2592                                 "Could not enable Wake-on-lan\n");
2593                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2594                                           cmd.dma);
2595                         return status;
2596                 }
2597                 status = be_cmd_enable_magic_wol(adapter,
2598                                 adapter->netdev->dev_addr, &cmd);
2599                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2600                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2601         } else {
2602                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2603                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2604                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2605         }
2606
2607         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2608         return status;
2609 }
2610
2611 /*
2612  * Generate a seed MAC address from the PF MAC Address using jhash.
2613  * MAC Address for VFs are assigned incrementally starting from the seed.
2614  * These addresses are programmed in the ASIC by the PF and the VF driver
2615  * queries for the MAC address during its probe.
2616  */
2617 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2618 {
2619         u32 vf;
2620         int status = 0;
2621         u8 mac[ETH_ALEN];
2622         struct be_vf_cfg *vf_cfg;
2623
2624         be_vf_eth_addr_generate(adapter, mac);
2625
2626         for_all_vfs(adapter, vf_cfg, vf) {
2627                 if (lancer_chip(adapter)) {
2628                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2629                 } else {
2630                         status = be_cmd_pmac_add(adapter, mac,
2631                                                  vf_cfg->if_handle,
2632                                                  &vf_cfg->pmac_id, vf + 1);
2633                 }
2634
2635                 if (status)
2636                         dev_err(&adapter->pdev->dev,
2637                         "Mac address assignment failed for VF %d\n", vf);
2638                 else
2639                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2640
2641                 mac[5] += 1;
2642         }
2643         return status;
2644 }
2645
2646 static int be_vfs_mac_query(struct be_adapter *adapter)
2647 {
2648         int status, vf;
2649         u8 mac[ETH_ALEN];
2650         struct be_vf_cfg *vf_cfg;
2651         bool active;
2652
2653         for_all_vfs(adapter, vf_cfg, vf) {
2654                 be_cmd_get_mac_from_list(adapter, mac, &active,
2655                                          &vf_cfg->pmac_id, 0);
2656
2657                 status = be_cmd_mac_addr_query(adapter, mac, false,
2658                                                vf_cfg->if_handle, 0);
2659                 if (status)
2660                         return status;
2661                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2662         }
2663         return 0;
2664 }
2665
2666 static void be_vf_clear(struct be_adapter *adapter)
2667 {
2668         struct be_vf_cfg *vf_cfg;
2669         u32 vf;
2670
2671         if (be_find_vfs(adapter, ASSIGNED)) {
2672                 dev_warn(&adapter->pdev->dev,
2673                          "VFs are assigned to VMs: not disabling VFs\n");
2674                 goto done;
2675         }
2676
2677         for_all_vfs(adapter, vf_cfg, vf) {
2678                 if (lancer_chip(adapter))
2679                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2680                 else
2681                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2682                                         vf_cfg->pmac_id, vf + 1);
2683
2684                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2685         }
2686         pci_disable_sriov(adapter->pdev);
2687 done:
2688         kfree(adapter->vf_cfg);
2689         adapter->num_vfs = 0;
2690 }
2691
2692 static int be_clear(struct be_adapter *adapter)
2693 {
2694         int i = 1;
2695
2696         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2697                 cancel_delayed_work_sync(&adapter->work);
2698                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2699         }
2700
2701         if (sriov_enabled(adapter))
2702                 be_vf_clear(adapter);
2703
2704         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2705                 be_cmd_pmac_del(adapter, adapter->if_handle,
2706                         adapter->pmac_id[i], 0);
2707
2708         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2709
2710         be_mcc_queues_destroy(adapter);
2711         be_rx_cqs_destroy(adapter);
2712         be_tx_queues_destroy(adapter);
2713         be_evt_queues_destroy(adapter);
2714
2715         kfree(adapter->pmac_id);
2716         adapter->pmac_id = NULL;
2717
2718         be_msix_disable(adapter);
2719         return 0;
2720 }
2721
2722 static int be_vfs_if_create(struct be_adapter *adapter)
2723 {
2724         struct be_vf_cfg *vf_cfg;
2725         u32 cap_flags, en_flags, vf;
2726         int status;
2727
2728         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2729                     BE_IF_FLAGS_MULTICAST;
2730
2731         for_all_vfs(adapter, vf_cfg, vf) {
2732                 if (!BE3_chip(adapter))
2733                         be_cmd_get_profile_config(adapter, &cap_flags,
2734                                                   NULL, vf + 1);
2735
2736                 /* If a FW profile exists, then cap_flags are updated */
2737                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2738                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2739                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2740                                           &vf_cfg->if_handle, vf + 1);
2741                 if (status)
2742                         goto err;
2743         }
2744 err:
2745         return status;
2746 }
2747
2748 static int be_vf_setup_init(struct be_adapter *adapter)
2749 {
2750         struct be_vf_cfg *vf_cfg;
2751         int vf;
2752
2753         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2754                                   GFP_KERNEL);
2755         if (!adapter->vf_cfg)
2756                 return -ENOMEM;
2757
2758         for_all_vfs(adapter, vf_cfg, vf) {
2759                 vf_cfg->if_handle = -1;
2760                 vf_cfg->pmac_id = -1;
2761         }
2762         return 0;
2763 }
2764
2765 static int be_vf_setup(struct be_adapter *adapter)
2766 {
2767         struct be_vf_cfg *vf_cfg;
2768         u16 def_vlan, lnk_speed;
2769         int status, old_vfs, vf;
2770         struct device *dev = &adapter->pdev->dev;
2771
2772         old_vfs = be_find_vfs(adapter, ENABLED);
2773         if (old_vfs) {
2774                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2775                 if (old_vfs != num_vfs)
2776                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2777                 adapter->num_vfs = old_vfs;
2778         } else {
2779                 if (num_vfs > adapter->dev_num_vfs)
2780                         dev_info(dev, "Device supports %d VFs and not %d\n",
2781                                  adapter->dev_num_vfs, num_vfs);
2782                 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2783
2784                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2785                 if (status) {
2786                         dev_err(dev, "SRIOV enable failed\n");
2787                         adapter->num_vfs = 0;
2788                         return 0;
2789                 }
2790         }
2791
2792         status = be_vf_setup_init(adapter);
2793         if (status)
2794                 goto err;
2795
2796         if (old_vfs) {
2797                 for_all_vfs(adapter, vf_cfg, vf) {
2798                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2799                         if (status)
2800                                 goto err;
2801                 }
2802         } else {
2803                 status = be_vfs_if_create(adapter);
2804                 if (status)
2805                         goto err;
2806         }
2807
2808         if (old_vfs) {
2809                 status = be_vfs_mac_query(adapter);
2810                 if (status)
2811                         goto err;
2812         } else {
2813                 status = be_vf_eth_addr_config(adapter);
2814                 if (status)
2815                         goto err;
2816         }
2817
2818         for_all_vfs(adapter, vf_cfg, vf) {
2819                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2820                  * Allow full available bandwidth
2821                  */
2822                 if (BE3_chip(adapter) && !old_vfs)
2823                         be_cmd_set_qos(adapter, 1000, vf+1);
2824
2825                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2826                                                   NULL, vf + 1);
2827                 if (!status)
2828                         vf_cfg->tx_rate = lnk_speed;
2829
2830                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2831                                                vf + 1, vf_cfg->if_handle);
2832                 if (status)
2833                         goto err;
2834                 vf_cfg->def_vid = def_vlan;
2835
2836                 be_cmd_enable_vf(adapter, vf + 1);
2837         }
2838         return 0;
2839 err:
2840         dev_err(dev, "VF setup failed\n");
2841         be_vf_clear(adapter);
2842         return status;
2843 }
2844
2845 static void be_setup_init(struct be_adapter *adapter)
2846 {
2847         adapter->vlan_prio_bmap = 0xff;
2848         adapter->phy.link_speed = -1;
2849         adapter->if_handle = -1;
2850         adapter->be3_native = false;
2851         adapter->promiscuous = false;
2852         if (be_physfn(adapter))
2853                 adapter->cmd_privileges = MAX_PRIVILEGES;
2854         else
2855                 adapter->cmd_privileges = MIN_PRIVILEGES;
2856 }
2857
2858 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2859                            bool *active_mac, u32 *pmac_id)
2860 {
2861         int status = 0;
2862
2863         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2864                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2865                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2866                         *active_mac = true;
2867                 else
2868                         *active_mac = false;
2869
2870                 return status;
2871         }
2872
2873         if (lancer_chip(adapter)) {
2874                 status = be_cmd_get_mac_from_list(adapter, mac,
2875                                                   active_mac, pmac_id, 0);
2876                 if (*active_mac) {
2877                         status = be_cmd_mac_addr_query(adapter, mac, false,
2878                                                        if_handle, *pmac_id);
2879                 }
2880         } else if (be_physfn(adapter)) {
2881                 /* For BE3, for PF get permanent MAC */
2882                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2883                 *active_mac = false;
2884         } else {
2885                 /* For BE3, for VF get soft MAC assigned by PF*/
2886                 status = be_cmd_mac_addr_query(adapter, mac, false,
2887                                                if_handle, 0);
2888                 *active_mac = true;
2889         }
2890         return status;
2891 }
2892
2893 static void be_get_resources(struct be_adapter *adapter)
2894 {
2895         u16 dev_num_vfs;
2896         int pos, status;
2897         bool profile_present = false;
2898         u16 txq_count = 0;
2899
2900         if (!BEx_chip(adapter)) {
2901                 status = be_cmd_get_func_config(adapter);
2902                 if (!status)
2903                         profile_present = true;
2904         } else if (BE3_chip(adapter) && be_physfn(adapter)) {
2905                 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
2906         }
2907
2908         if (profile_present) {
2909                 /* Sanity fixes for Lancer */
2910                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2911                                               BE_UC_PMAC_COUNT);
2912                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2913                                            BE_NUM_VLANS_SUPPORTED);
2914                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2915                                                BE_MAX_MC);
2916                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2917                                                MAX_TX_QS);
2918                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2919                                                 BE3_MAX_RSS_QS);
2920                 adapter->max_event_queues = min_t(u16,
2921                                                   adapter->max_event_queues,
2922                                                   BE3_MAX_RSS_QS);
2923
2924                 if (adapter->max_rss_queues &&
2925                     adapter->max_rss_queues == adapter->max_rx_queues)
2926                         adapter->max_rss_queues -= 1;
2927
2928                 if (adapter->max_event_queues < adapter->max_rss_queues)
2929                         adapter->max_rss_queues = adapter->max_event_queues;
2930
2931         } else {
2932                 if (be_physfn(adapter))
2933                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2934                 else
2935                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2936
2937                 if (adapter->function_mode & FLEX10_MODE)
2938                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2939                 else
2940                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2941
2942                 adapter->max_mcast_mac = BE_MAX_MC;
2943                 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
2944                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2945                                                MAX_TX_QS);
2946                 adapter->max_rss_queues = (adapter->be3_native) ?
2947                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2948                 adapter->max_event_queues = BE3_MAX_RSS_QS;
2949
2950                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2951                                         BE_IF_FLAGS_BROADCAST |
2952                                         BE_IF_FLAGS_MULTICAST |
2953                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
2954                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
2955                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
2956                                         BE_IF_FLAGS_PROMISCUOUS;
2957
2958                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2959                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2960         }
2961
2962         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2963         if (pos) {
2964                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2965                                      &dev_num_vfs);
2966                 if (BE3_chip(adapter))
2967                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2968                 adapter->dev_num_vfs = dev_num_vfs;
2969         }
2970 }
2971
2972 /* Routine to query per function resource limits */
2973 static int be_get_config(struct be_adapter *adapter)
2974 {
2975         int status;
2976
2977         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2978                                      &adapter->function_mode,
2979                                      &adapter->function_caps,
2980                                      &adapter->asic_rev);
2981         if (status)
2982                 goto err;
2983
2984         be_get_resources(adapter);
2985
2986         /* primary mac needs 1 pmac entry */
2987         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2988                                    sizeof(u32), GFP_KERNEL);
2989         if (!adapter->pmac_id) {
2990                 status = -ENOMEM;
2991                 goto err;
2992         }
2993
2994 err:
2995         return status;
2996 }
2997
2998 static int be_setup(struct be_adapter *adapter)
2999 {
3000         struct device *dev = &adapter->pdev->dev;
3001         u32 en_flags;
3002         u32 tx_fc, rx_fc;
3003         int status;
3004         u8 mac[ETH_ALEN];
3005         bool active_mac;
3006
3007         be_setup_init(adapter);
3008
3009         if (!lancer_chip(adapter))
3010                 be_cmd_req_native_mode(adapter);
3011
3012         status = be_get_config(adapter);
3013         if (status)
3014                 goto err;
3015
3016         be_msix_enable(adapter);
3017
3018         status = be_evt_queues_create(adapter);
3019         if (status)
3020                 goto err;
3021
3022         status = be_tx_cqs_create(adapter);
3023         if (status)
3024                 goto err;
3025
3026         status = be_rx_cqs_create(adapter);
3027         if (status)
3028                 goto err;
3029
3030         status = be_mcc_queues_create(adapter);
3031         if (status)
3032                 goto err;
3033
3034         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3035         /* In UMC mode FW does not return right privileges.
3036          * Override with correct privilege equivalent to PF.
3037          */
3038         if (be_is_mc(adapter))
3039                 adapter->cmd_privileges = MAX_PRIVILEGES;
3040
3041         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3042                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3043
3044         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3045                 en_flags |= BE_IF_FLAGS_RSS;
3046
3047         en_flags = en_flags & adapter->if_cap_flags;
3048
3049         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3050                                   &adapter->if_handle, 0);
3051         if (status != 0)
3052                 goto err;
3053
3054         memset(mac, 0, ETH_ALEN);
3055         active_mac = false;
3056         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3057                                  &active_mac, &adapter->pmac_id[0]);
3058         if (status != 0)
3059                 goto err;
3060
3061         if (!active_mac) {
3062                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3063                                          &adapter->pmac_id[0], 0);
3064                 if (status != 0)
3065                         goto err;
3066         }
3067
3068         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3069                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3070                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3071         }
3072
3073         status = be_tx_qs_create(adapter);
3074         if (status)
3075                 goto err;
3076
3077         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3078
3079         if (adapter->vlans_added)
3080                 be_vid_config(adapter);
3081
3082         be_set_rx_mode(adapter->netdev);
3083
3084         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3085
3086         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3087                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3088                                         adapter->rx_fc);
3089
3090         if (be_physfn(adapter) && num_vfs) {
3091                 if (adapter->dev_num_vfs)
3092                         be_vf_setup(adapter);
3093                 else
3094                         dev_warn(dev, "device doesn't support SRIOV\n");
3095         }
3096
3097         status = be_cmd_get_phy_info(adapter);
3098         if (!status && be_pause_supported(adapter))
3099                 adapter->phy.fc_autoneg = 1;
3100
3101         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3102         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3103         return 0;
3104 err:
3105         be_clear(adapter);
3106         return status;
3107 }
3108
3109 #ifdef CONFIG_NET_POLL_CONTROLLER
3110 static void be_netpoll(struct net_device *netdev)
3111 {
3112         struct be_adapter *adapter = netdev_priv(netdev);
3113         struct be_eq_obj *eqo;
3114         int i;
3115
3116         for_all_evt_queues(adapter, eqo, i) {
3117                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3118                 napi_schedule(&eqo->napi);
3119         }
3120
3121         return;
3122 }
3123 #endif
3124
3125 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3126 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3127
3128 static bool be_flash_redboot(struct be_adapter *adapter,
3129                         const u8 *p, u32 img_start, int image_size,
3130                         int hdr_size)
3131 {
3132         u32 crc_offset;
3133         u8 flashed_crc[4];
3134         int status;
3135
3136         crc_offset = hdr_size + img_start + image_size - 4;
3137
3138         p += crc_offset;
3139
3140         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3141                         (image_size - 4));
3142         if (status) {
3143                 dev_err(&adapter->pdev->dev,
3144                 "could not get crc from flash, not flashing redboot\n");
3145                 return false;
3146         }
3147
3148         /*update redboot only if crc does not match*/
3149         if (!memcmp(flashed_crc, p, 4))
3150                 return false;
3151         else
3152                 return true;
3153 }
3154
3155 static bool phy_flashing_required(struct be_adapter *adapter)
3156 {
3157         return (adapter->phy.phy_type == TN_8022 &&
3158                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3159 }
3160
3161 static bool is_comp_in_ufi(struct be_adapter *adapter,
3162                            struct flash_section_info *fsec, int type)
3163 {
3164         int i = 0, img_type = 0;
3165         struct flash_section_info_g2 *fsec_g2 = NULL;
3166
3167         if (BE2_chip(adapter))
3168                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3169
3170         for (i = 0; i < MAX_FLASH_COMP; i++) {
3171                 if (fsec_g2)
3172                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3173                 else
3174                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3175
3176                 if (img_type == type)
3177                         return true;
3178         }
3179         return false;
3180
3181 }
3182
3183 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3184                                          int header_size,
3185                                          const struct firmware *fw)
3186 {
3187         struct flash_section_info *fsec = NULL;
3188         const u8 *p = fw->data;
3189
3190         p += header_size;
3191         while (p < (fw->data + fw->size)) {
3192                 fsec = (struct flash_section_info *)p;
3193                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3194                         return fsec;
3195                 p += 32;
3196         }
3197         return NULL;
3198 }
3199
3200 static int be_flash(struct be_adapter *adapter, const u8 *img,
3201                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3202 {
3203         u32 total_bytes = 0, flash_op, num_bytes = 0;
3204         int status = 0;
3205         struct be_cmd_write_flashrom *req = flash_cmd->va;
3206
3207         total_bytes = img_size;
3208         while (total_bytes) {
3209                 num_bytes = min_t(u32, 32*1024, total_bytes);
3210
3211                 total_bytes -= num_bytes;
3212
3213                 if (!total_bytes) {
3214                         if (optype == OPTYPE_PHY_FW)
3215                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3216                         else
3217                                 flash_op = FLASHROM_OPER_FLASH;
3218                 } else {
3219                         if (optype == OPTYPE_PHY_FW)
3220                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3221                         else
3222                                 flash_op = FLASHROM_OPER_SAVE;
3223                 }
3224
3225                 memcpy(req->data_buf, img, num_bytes);
3226                 img += num_bytes;
3227                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3228                                                 flash_op, num_bytes);
3229                 if (status) {
3230                         if (status == ILLEGAL_IOCTL_REQ &&
3231                             optype == OPTYPE_PHY_FW)
3232                                 break;
3233                         dev_err(&adapter->pdev->dev,
3234                                 "cmd to write to flash rom failed.\n");
3235                         return status;
3236                 }
3237         }
3238         return 0;
3239 }
3240
3241 /* For BE2, BE3 and BE3-R */
3242 static int be_flash_BEx(struct be_adapter *adapter,
3243                          const struct firmware *fw,
3244                          struct be_dma_mem *flash_cmd,
3245                          int num_of_images)
3246
3247 {
3248         int status = 0, i, filehdr_size = 0;
3249         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3250         const u8 *p = fw->data;
3251         const struct flash_comp *pflashcomp;
3252         int num_comp, redboot;
3253         struct flash_section_info *fsec = NULL;
3254
3255         struct flash_comp gen3_flash_types[] = {
3256                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3257                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3258                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3259                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3260                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3261                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3262                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3263                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3264                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3265                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3266                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3267                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3268                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3269                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3270                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3271                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3272                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3273                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3274                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3275                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3276         };
3277
3278         struct flash_comp gen2_flash_types[] = {
3279                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3280                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3281                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3282                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3283                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3284                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3285                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3286                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3287                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3288                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3289                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3290                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3291                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3292                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3293                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3294                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3295         };
3296
3297         if (BE3_chip(adapter)) {
3298                 pflashcomp = gen3_flash_types;
3299                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3300                 num_comp = ARRAY_SIZE(gen3_flash_types);
3301         } else {
3302                 pflashcomp = gen2_flash_types;
3303                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3304                 num_comp = ARRAY_SIZE(gen2_flash_types);
3305         }
3306
3307         /* Get flash section info*/
3308         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3309         if (!fsec) {
3310                 dev_err(&adapter->pdev->dev,
3311                         "Invalid Cookie. UFI corrupted ?\n");
3312                 return -1;
3313         }
3314         for (i = 0; i < num_comp; i++) {
3315                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3316                         continue;
3317
3318                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3319                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3320                         continue;
3321
3322                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3323                     !phy_flashing_required(adapter))
3324                                 continue;
3325
3326                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3327                         redboot = be_flash_redboot(adapter, fw->data,
3328                                 pflashcomp[i].offset, pflashcomp[i].size,
3329                                 filehdr_size + img_hdrs_size);
3330                         if (!redboot)
3331                                 continue;
3332                 }
3333
3334                 p = fw->data;
3335                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3336                 if (p + pflashcomp[i].size > fw->data + fw->size)
3337                         return -1;
3338
3339                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3340                                         pflashcomp[i].size);
3341                 if (status) {
3342                         dev_err(&adapter->pdev->dev,
3343                                 "Flashing section type %d failed.\n",
3344                                 pflashcomp[i].img_type);
3345                         return status;
3346                 }
3347         }
3348         return 0;
3349 }
3350
3351 static int be_flash_skyhawk(struct be_adapter *adapter,
3352                 const struct firmware *fw,
3353                 struct be_dma_mem *flash_cmd, int num_of_images)
3354 {
3355         int status = 0, i, filehdr_size = 0;
3356         int img_offset, img_size, img_optype, redboot;
3357         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3358         const u8 *p = fw->data;
3359         struct flash_section_info *fsec = NULL;
3360
3361         filehdr_size = sizeof(struct flash_file_hdr_g3);
3362         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3363         if (!fsec) {
3364                 dev_err(&adapter->pdev->dev,
3365                         "Invalid Cookie. UFI corrupted ?\n");
3366                 return -1;
3367         }
3368
3369         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3370                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3371                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3372
3373                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3374                 case IMAGE_FIRMWARE_iSCSI:
3375                         img_optype = OPTYPE_ISCSI_ACTIVE;
3376                         break;
3377                 case IMAGE_BOOT_CODE:
3378                         img_optype = OPTYPE_REDBOOT;
3379                         break;
3380                 case IMAGE_OPTION_ROM_ISCSI:
3381                         img_optype = OPTYPE_BIOS;
3382                         break;
3383                 case IMAGE_OPTION_ROM_PXE:
3384                         img_optype = OPTYPE_PXE_BIOS;
3385                         break;
3386                 case IMAGE_OPTION_ROM_FCoE:
3387                         img_optype = OPTYPE_FCOE_BIOS;
3388                         break;
3389                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3390                         img_optype = OPTYPE_ISCSI_BACKUP;
3391                         break;
3392                 case IMAGE_NCSI:
3393                         img_optype = OPTYPE_NCSI_FW;
3394                         break;
3395                 default:
3396                         continue;
3397                 }
3398
3399                 if (img_optype == OPTYPE_REDBOOT) {
3400                         redboot = be_flash_redboot(adapter, fw->data,
3401                                         img_offset, img_size,
3402                                         filehdr_size + img_hdrs_size);
3403                         if (!redboot)
3404                                 continue;
3405                 }
3406
3407                 p = fw->data;
3408                 p += filehdr_size + img_offset + img_hdrs_size;
3409                 if (p + img_size > fw->data + fw->size)
3410                         return -1;
3411
3412                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3413                 if (status) {
3414                         dev_err(&adapter->pdev->dev,
3415                                 "Flashing section type %d failed.\n",
3416                                 fsec->fsec_entry[i].type);
3417                         return status;
3418                 }
3419         }
3420         return 0;
3421 }
3422
3423 static int lancer_wait_idle(struct be_adapter *adapter)
3424 {
3425 #define SLIPORT_IDLE_TIMEOUT 30
3426         u32 reg_val;
3427         int status = 0, i;
3428
3429         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3430                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3431                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3432                         break;
3433
3434                 ssleep(1);
3435         }
3436
3437         if (i == SLIPORT_IDLE_TIMEOUT)
3438                 status = -1;
3439
3440         return status;
3441 }
3442
3443 static int lancer_fw_reset(struct be_adapter *adapter)
3444 {
3445         int status = 0;
3446
3447         status = lancer_wait_idle(adapter);
3448         if (status)
3449                 return status;
3450
3451         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3452                   PHYSDEV_CONTROL_OFFSET);
3453
3454         return status;
3455 }
3456
3457 static int lancer_fw_download(struct be_adapter *adapter,
3458                                 const struct firmware *fw)
3459 {
3460 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3461 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3462         struct be_dma_mem flash_cmd;
3463         const u8 *data_ptr = NULL;
3464         u8 *dest_image_ptr = NULL;
3465         size_t image_size = 0;
3466         u32 chunk_size = 0;
3467         u32 data_written = 0;
3468         u32 offset = 0;
3469         int status = 0;
3470         u8 add_status = 0;
3471         u8 change_status;
3472
3473         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3474                 dev_err(&adapter->pdev->dev,
3475                         "FW Image not properly aligned. "
3476                         "Length must be 4 byte aligned.\n");
3477                 status = -EINVAL;
3478                 goto lancer_fw_exit;
3479         }
3480
3481         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3482                                 + LANCER_FW_DOWNLOAD_CHUNK;
3483         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3484                                           &flash_cmd.dma, GFP_KERNEL);
3485         if (!flash_cmd.va) {
3486                 status = -ENOMEM;
3487                 goto lancer_fw_exit;
3488         }
3489
3490         dest_image_ptr = flash_cmd.va +
3491                                 sizeof(struct lancer_cmd_req_write_object);
3492         image_size = fw->size;
3493         data_ptr = fw->data;
3494
3495         while (image_size) {
3496                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3497
3498                 /* Copy the image chunk content. */
3499                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3500
3501                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3502                                                  chunk_size, offset,
3503                                                  LANCER_FW_DOWNLOAD_LOCATION,
3504                                                  &data_written, &change_status,
3505                                                  &add_status);
3506                 if (status)
3507                         break;
3508
3509                 offset += data_written;
3510                 data_ptr += data_written;
3511                 image_size -= data_written;
3512         }
3513
3514         if (!status) {
3515                 /* Commit the FW written */
3516                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3517                                                  0, offset,
3518                                                  LANCER_FW_DOWNLOAD_LOCATION,
3519                                                  &data_written, &change_status,
3520                                                  &add_status);
3521         }
3522
3523         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3524                                 flash_cmd.dma);
3525         if (status) {
3526                 dev_err(&adapter->pdev->dev,
3527                         "Firmware load error. "
3528                         "Status code: 0x%x Additional Status: 0x%x\n",
3529                         status, add_status);
3530                 goto lancer_fw_exit;
3531         }
3532
3533         if (change_status == LANCER_FW_RESET_NEEDED) {
3534                 status = lancer_fw_reset(adapter);
3535                 if (status) {
3536                         dev_err(&adapter->pdev->dev,
3537                                 "Adapter busy for FW reset.\n"
3538                                 "New FW will not be active.\n");
3539                         goto lancer_fw_exit;
3540                 }
3541         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3542                         dev_err(&adapter->pdev->dev,
3543                                 "System reboot required for new FW"
3544                                 " to be active\n");
3545         }
3546
3547         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3548 lancer_fw_exit:
3549         return status;
3550 }
3551
3552 #define UFI_TYPE2               2
3553 #define UFI_TYPE3               3
3554 #define UFI_TYPE3R              10
3555 #define UFI_TYPE4               4
3556 static int be_get_ufi_type(struct be_adapter *adapter,
3557                            struct flash_file_hdr_g3 *fhdr)
3558 {
3559         if (fhdr == NULL)
3560                 goto be_get_ufi_exit;
3561
3562         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3563                 return UFI_TYPE4;
3564         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3565                 if (fhdr->asic_type_rev == 0x10)
3566                         return UFI_TYPE3R;
3567                 else
3568                         return UFI_TYPE3;
3569         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3570                 return UFI_TYPE2;
3571
3572 be_get_ufi_exit:
3573         dev_err(&adapter->pdev->dev,
3574                 "UFI and Interface are not compatible for flashing\n");
3575         return -1;
3576 }
3577
3578 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3579 {
3580         struct flash_file_hdr_g3 *fhdr3;
3581         struct image_hdr *img_hdr_ptr = NULL;
3582         struct be_dma_mem flash_cmd;
3583         const u8 *p;
3584         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3585
3586         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3587         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3588                                           &flash_cmd.dma, GFP_KERNEL);
3589         if (!flash_cmd.va) {
3590                 status = -ENOMEM;
3591                 goto be_fw_exit;
3592         }
3593
3594         p = fw->data;
3595         fhdr3 = (struct flash_file_hdr_g3 *)p;
3596
3597         ufi_type = be_get_ufi_type(adapter, fhdr3);
3598
3599         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3600         for (i = 0; i < num_imgs; i++) {
3601                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3602                                 (sizeof(struct flash_file_hdr_g3) +
3603                                  i * sizeof(struct image_hdr)));
3604                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3605                         switch (ufi_type) {
3606                         case UFI_TYPE4:
3607                                 status = be_flash_skyhawk(adapter, fw,
3608                                                         &flash_cmd, num_imgs);
3609                                 break;
3610                         case UFI_TYPE3R:
3611                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3612                                                       num_imgs);
3613                                 break;
3614                         case UFI_TYPE3:
3615                                 /* Do not flash this ufi on BE3-R cards */
3616                                 if (adapter->asic_rev < 0x10)
3617                                         status = be_flash_BEx(adapter, fw,
3618                                                               &flash_cmd,
3619                                                               num_imgs);
3620                                 else {
3621                                         status = -1;
3622                                         dev_err(&adapter->pdev->dev,
3623                                                 "Can't load BE3 UFI on BE3R\n");
3624                                 }
3625                         }
3626                 }
3627         }
3628
3629         if (ufi_type == UFI_TYPE2)
3630                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3631         else if (ufi_type == -1)
3632                 status = -1;
3633
3634         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3635                           flash_cmd.dma);
3636         if (status) {
3637                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3638                 goto be_fw_exit;
3639         }
3640
3641         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3642
3643 be_fw_exit:
3644         return status;
3645 }
3646
3647 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3648 {
3649         const struct firmware *fw;
3650         int status;
3651
3652         if (!netif_running(adapter->netdev)) {
3653                 dev_err(&adapter->pdev->dev,
3654                         "Firmware load not allowed (interface is down)\n");
3655                 return -1;
3656         }
3657
3658         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3659         if (status)
3660                 goto fw_exit;
3661
3662         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3663
3664         if (lancer_chip(adapter))
3665                 status = lancer_fw_download(adapter, fw);
3666         else
3667                 status = be_fw_download(adapter, fw);
3668
3669 fw_exit:
3670         release_firmware(fw);
3671         return status;
3672 }
3673
3674 static const struct net_device_ops be_netdev_ops = {
3675         .ndo_open               = be_open,
3676         .ndo_stop               = be_close,
3677         .ndo_start_xmit         = be_xmit,
3678         .ndo_set_rx_mode        = be_set_rx_mode,
3679         .ndo_set_mac_address    = be_mac_addr_set,
3680         .ndo_change_mtu         = be_change_mtu,
3681         .ndo_get_stats64        = be_get_stats64,
3682         .ndo_validate_addr      = eth_validate_addr,
3683         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3684         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3685         .ndo_set_vf_mac         = be_set_vf_mac,
3686         .ndo_set_vf_vlan        = be_set_vf_vlan,
3687         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3688         .ndo_get_vf_config      = be_get_vf_config,
3689 #ifdef CONFIG_NET_POLL_CONTROLLER
3690         .ndo_poll_controller    = be_netpoll,
3691 #endif
3692 };
3693
3694 static void be_netdev_init(struct net_device *netdev)
3695 {
3696         struct be_adapter *adapter = netdev_priv(netdev);
3697         struct be_eq_obj *eqo;
3698         int i;
3699
3700         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3701                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3702                 NETIF_F_HW_VLAN_CTAG_TX;
3703         if (be_multi_rxq(adapter))
3704                 netdev->hw_features |= NETIF_F_RXHASH;
3705
3706         netdev->features |= netdev->hw_features |
3707                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3708
3709         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3710                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3711
3712         netdev->priv_flags |= IFF_UNICAST_FLT;
3713
3714         netdev->flags |= IFF_MULTICAST;
3715
3716         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3717
3718         netdev->netdev_ops = &be_netdev_ops;
3719
3720         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3721
3722         for_all_evt_queues(adapter, eqo, i)
3723                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3724 }
3725
3726 static void be_unmap_pci_bars(struct be_adapter *adapter)
3727 {
3728         if (adapter->csr)
3729                 pci_iounmap(adapter->pdev, adapter->csr);
3730         if (adapter->db)
3731                 pci_iounmap(adapter->pdev, adapter->db);
3732 }
3733
3734 static int db_bar(struct be_adapter *adapter)
3735 {
3736         if (lancer_chip(adapter) || !be_physfn(adapter))
3737                 return 0;
3738         else
3739                 return 4;
3740 }
3741
3742 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3743 {
3744         if (skyhawk_chip(adapter)) {
3745                 adapter->roce_db.size = 4096;
3746                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3747                                                               db_bar(adapter));
3748                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3749                                                                db_bar(adapter));
3750         }
3751         return 0;
3752 }
3753
3754 static int be_map_pci_bars(struct be_adapter *adapter)
3755 {
3756         u8 __iomem *addr;
3757         u32 sli_intf;
3758
3759         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3760         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3761                                 SLI_INTF_IF_TYPE_SHIFT;
3762
3763         if (BEx_chip(adapter) && be_physfn(adapter)) {
3764                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3765                 if (adapter->csr == NULL)
3766                         return -ENOMEM;
3767         }
3768
3769         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3770         if (addr == NULL)
3771                 goto pci_map_err;
3772         adapter->db = addr;
3773
3774         be_roce_map_pci_bars(adapter);
3775         return 0;
3776
3777 pci_map_err:
3778         be_unmap_pci_bars(adapter);
3779         return -ENOMEM;
3780 }
3781
3782 static void be_ctrl_cleanup(struct be_adapter *adapter)
3783 {
3784         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3785
3786         be_unmap_pci_bars(adapter);
3787
3788         if (mem->va)
3789                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3790                                   mem->dma);
3791
3792         mem = &adapter->rx_filter;
3793         if (mem->va)
3794                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3795                                   mem->dma);
3796 }
3797
3798 static int be_ctrl_init(struct be_adapter *adapter)
3799 {
3800         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3801         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3802         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3803         u32 sli_intf;
3804         int status;
3805
3806         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3807         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3808                                  SLI_INTF_FAMILY_SHIFT;
3809         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3810
3811         status = be_map_pci_bars(adapter);
3812         if (status)
3813                 goto done;
3814
3815         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3816         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3817                                                 mbox_mem_alloc->size,
3818                                                 &mbox_mem_alloc->dma,
3819                                                 GFP_KERNEL);
3820         if (!mbox_mem_alloc->va) {
3821                 status = -ENOMEM;
3822                 goto unmap_pci_bars;
3823         }
3824         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3825         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3826         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3827         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3828
3829         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3830         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3831                                            &rx_filter->dma,
3832                                            GFP_KERNEL | __GFP_ZERO);
3833         if (rx_filter->va == NULL) {
3834                 status = -ENOMEM;
3835                 goto free_mbox;
3836         }
3837
3838         mutex_init(&adapter->mbox_lock);
3839         spin_lock_init(&adapter->mcc_lock);
3840         spin_lock_init(&adapter->mcc_cq_lock);
3841
3842         init_completion(&adapter->flash_compl);
3843         pci_save_state(adapter->pdev);
3844         return 0;
3845
3846 free_mbox:
3847         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3848                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3849
3850 unmap_pci_bars:
3851         be_unmap_pci_bars(adapter);
3852
3853 done:
3854         return status;
3855 }
3856
3857 static void be_stats_cleanup(struct be_adapter *adapter)
3858 {
3859         struct be_dma_mem *cmd = &adapter->stats_cmd;
3860
3861         if (cmd->va)
3862                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3863                                   cmd->va, cmd->dma);
3864 }
3865
3866 static int be_stats_init(struct be_adapter *adapter)
3867 {
3868         struct be_dma_mem *cmd = &adapter->stats_cmd;
3869
3870         if (lancer_chip(adapter))
3871                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3872         else if (BE2_chip(adapter))
3873                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3874         else
3875                 /* BE3 and Skyhawk */
3876                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3877
3878         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3879                                      GFP_KERNEL | __GFP_ZERO);
3880         if (cmd->va == NULL)
3881                 return -1;
3882         return 0;
3883 }
3884
3885 static void be_remove(struct pci_dev *pdev)
3886 {
3887         struct be_adapter *adapter = pci_get_drvdata(pdev);
3888
3889         if (!adapter)
3890                 return;
3891
3892         be_roce_dev_remove(adapter);
3893         be_intr_set(adapter, false);
3894
3895         cancel_delayed_work_sync(&adapter->func_recovery_work);
3896
3897         unregister_netdev(adapter->netdev);
3898
3899         be_clear(adapter);
3900
3901         /* tell fw we're done with firing cmds */
3902         be_cmd_fw_clean(adapter);
3903
3904         be_stats_cleanup(adapter);
3905
3906         be_ctrl_cleanup(adapter);
3907
3908         pci_disable_pcie_error_reporting(pdev);
3909
3910         pci_set_drvdata(pdev, NULL);
3911         pci_release_regions(pdev);
3912         pci_disable_device(pdev);
3913
3914         free_netdev(adapter->netdev);
3915 }
3916
3917 bool be_is_wol_supported(struct be_adapter *adapter)
3918 {
3919         return ((adapter->wol_cap & BE_WOL_CAP) &&
3920                 !be_is_wol_excluded(adapter)) ? true : false;
3921 }
3922
3923 u32 be_get_fw_log_level(struct be_adapter *adapter)
3924 {
3925         struct be_dma_mem extfat_cmd;
3926         struct be_fat_conf_params *cfgs;
3927         int status;
3928         u32 level = 0;
3929         int j;
3930
3931         if (lancer_chip(adapter))
3932                 return 0;
3933
3934         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3935         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3936         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3937                                              &extfat_cmd.dma);
3938
3939         if (!extfat_cmd.va) {
3940                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3941                         __func__);
3942                 goto err;
3943         }
3944
3945         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3946         if (!status) {
3947                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3948                                                 sizeof(struct be_cmd_resp_hdr));
3949                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3950                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3951                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3952                 }
3953         }
3954         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3955                             extfat_cmd.dma);
3956 err:
3957         return level;
3958 }
3959
3960 static int be_get_initial_config(struct be_adapter *adapter)
3961 {
3962         int status;
3963         u32 level;
3964
3965         status = be_cmd_get_cntl_attributes(adapter);
3966         if (status)
3967                 return status;
3968
3969         status = be_cmd_get_acpi_wol_cap(adapter);
3970         if (status) {
3971                 /* in case of a failure to get wol capabillities
3972                  * check the exclusion list to determine WOL capability */
3973                 if (!be_is_wol_excluded(adapter))
3974                         adapter->wol_cap |= BE_WOL_CAP;
3975         }
3976
3977         if (be_is_wol_supported(adapter))
3978                 adapter->wol = true;
3979
3980         /* Must be a power of 2 or else MODULO will BUG_ON */
3981         adapter->be_get_temp_freq = 64;
3982
3983         level = be_get_fw_log_level(adapter);
3984         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3985
3986         return 0;
3987 }
3988
3989 static int lancer_recover_func(struct be_adapter *adapter)
3990 {
3991         int status;
3992
3993         status = lancer_test_and_set_rdy_state(adapter);
3994         if (status)
3995                 goto err;
3996
3997         if (netif_running(adapter->netdev))
3998                 be_close(adapter->netdev);
3999
4000         be_clear(adapter);
4001
4002         adapter->hw_error = false;
4003         adapter->fw_timeout = false;
4004
4005         status = be_setup(adapter);
4006         if (status)
4007                 goto err;
4008
4009         if (netif_running(adapter->netdev)) {
4010                 status = be_open(adapter->netdev);
4011                 if (status)
4012                         goto err;
4013         }
4014
4015         dev_err(&adapter->pdev->dev,
4016                 "Adapter SLIPORT recovery succeeded\n");
4017         return 0;
4018 err:
4019         if (adapter->eeh_error)
4020                 dev_err(&adapter->pdev->dev,
4021                         "Adapter SLIPORT recovery failed\n");
4022
4023         return status;
4024 }
4025
4026 static void be_func_recovery_task(struct work_struct *work)
4027 {
4028         struct be_adapter *adapter =
4029                 container_of(work, struct be_adapter,  func_recovery_work.work);
4030         int status;
4031
4032         be_detect_error(adapter);
4033
4034         if (adapter->hw_error && lancer_chip(adapter)) {
4035
4036                 if (adapter->eeh_error)
4037                         goto out;
4038
4039                 rtnl_lock();
4040                 netif_device_detach(adapter->netdev);
4041                 rtnl_unlock();
4042
4043                 status = lancer_recover_func(adapter);
4044
4045                 if (!status)
4046                         netif_device_attach(adapter->netdev);
4047         }
4048
4049 out:
4050         schedule_delayed_work(&adapter->func_recovery_work,
4051                               msecs_to_jiffies(1000));
4052 }
4053
4054 static void be_worker(struct work_struct *work)
4055 {
4056         struct be_adapter *adapter =
4057                 container_of(work, struct be_adapter, work.work);
4058         struct be_rx_obj *rxo;
4059         struct be_eq_obj *eqo;
4060         int i;
4061
4062         /* when interrupts are not yet enabled, just reap any pending
4063         * mcc completions */
4064         if (!netif_running(adapter->netdev)) {
4065                 local_bh_disable();
4066                 be_process_mcc(adapter);
4067                 local_bh_enable();
4068                 goto reschedule;
4069         }
4070
4071         if (!adapter->stats_cmd_sent) {
4072                 if (lancer_chip(adapter))
4073                         lancer_cmd_get_pport_stats(adapter,
4074                                                 &adapter->stats_cmd);
4075                 else
4076                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4077         }
4078
4079         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4080                 be_cmd_get_die_temperature(adapter);
4081
4082         for_all_rx_queues(adapter, rxo, i) {
4083                 if (rxo->rx_post_starved) {
4084                         rxo->rx_post_starved = false;
4085                         be_post_rx_frags(rxo, GFP_KERNEL);
4086                 }
4087         }
4088
4089         for_all_evt_queues(adapter, eqo, i)
4090                 be_eqd_update(adapter, eqo);
4091
4092 reschedule:
4093         adapter->work_counter++;
4094         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4095 }
4096
4097 static bool be_reset_required(struct be_adapter *adapter)
4098 {
4099         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4100 }
4101
4102 static char *mc_name(struct be_adapter *adapter)
4103 {
4104         if (adapter->function_mode & FLEX10_MODE)
4105                 return "FLEX10";
4106         else if (adapter->function_mode & VNIC_MODE)
4107                 return "vNIC";
4108         else if (adapter->function_mode & UMC_ENABLED)
4109                 return "UMC";
4110         else
4111                 return "";
4112 }
4113
4114 static inline char *func_name(struct be_adapter *adapter)
4115 {
4116         return be_physfn(adapter) ? "PF" : "VF";
4117 }
4118
4119 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4120 {
4121         int status = 0;
4122         struct be_adapter *adapter;
4123         struct net_device *netdev;
4124         char port_name;
4125
4126         status = pci_enable_device(pdev);
4127         if (status)
4128                 goto do_none;
4129
4130         status = pci_request_regions(pdev, DRV_NAME);
4131         if (status)
4132                 goto disable_dev;
4133         pci_set_master(pdev);
4134
4135         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4136         if (netdev == NULL) {
4137                 status = -ENOMEM;
4138                 goto rel_reg;
4139         }
4140         adapter = netdev_priv(netdev);
4141         adapter->pdev = pdev;
4142         pci_set_drvdata(pdev, adapter);
4143         adapter->netdev = netdev;
4144         SET_NETDEV_DEV(netdev, &pdev->dev);
4145
4146         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4147         if (!status) {
4148                 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4149                 if (status < 0) {
4150                         dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4151                         goto free_netdev;
4152                 }
4153                 netdev->features |= NETIF_F_HIGHDMA;
4154         } else {
4155                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4156                 if (status) {
4157                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4158                         goto free_netdev;
4159                 }
4160         }
4161
4162         status = pci_enable_pcie_error_reporting(pdev);
4163         if (status)
4164                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4165
4166         status = be_ctrl_init(adapter);
4167         if (status)
4168                 goto free_netdev;
4169
4170         /* sync up with fw's ready state */
4171         if (be_physfn(adapter)) {
4172                 status = be_fw_wait_ready(adapter);
4173                 if (status)
4174                         goto ctrl_clean;
4175         }
4176
4177         /* tell fw we're ready to fire cmds */
4178         status = be_cmd_fw_init(adapter);
4179         if (status)
4180                 goto ctrl_clean;
4181
4182         if (be_reset_required(adapter)) {
4183                 status = be_cmd_reset_function(adapter);
4184                 if (status)
4185                         goto ctrl_clean;
4186         }
4187
4188         /* Wait for interrupts to quiesce after an FLR */
4189         msleep(100);
4190
4191         /* Allow interrupts for other ULPs running on NIC function */
4192         be_intr_set(adapter, true);
4193
4194         status = be_stats_init(adapter);
4195         if (status)
4196                 goto ctrl_clean;
4197
4198         status = be_get_initial_config(adapter);
4199         if (status)
4200                 goto stats_clean;
4201
4202         INIT_DELAYED_WORK(&adapter->work, be_worker);
4203         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4204         adapter->rx_fc = adapter->tx_fc = true;
4205
4206         status = be_setup(adapter);
4207         if (status)
4208                 goto stats_clean;
4209
4210         be_netdev_init(netdev);
4211         status = register_netdev(netdev);
4212         if (status != 0)
4213                 goto unsetup;
4214
4215         be_roce_dev_add(adapter);
4216
4217         schedule_delayed_work(&adapter->func_recovery_work,
4218                               msecs_to_jiffies(1000));
4219
4220         be_cmd_query_port_name(adapter, &port_name);
4221
4222         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4223                  func_name(adapter), mc_name(adapter), port_name);
4224
4225         return 0;
4226
4227 unsetup:
4228         be_clear(adapter);
4229 stats_clean:
4230         be_stats_cleanup(adapter);
4231 ctrl_clean:
4232         be_ctrl_cleanup(adapter);
4233 free_netdev:
4234         free_netdev(netdev);
4235         pci_set_drvdata(pdev, NULL);
4236 rel_reg:
4237         pci_release_regions(pdev);
4238 disable_dev:
4239         pci_disable_device(pdev);
4240 do_none:
4241         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4242         return status;
4243 }
4244
4245 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4246 {
4247         struct be_adapter *adapter = pci_get_drvdata(pdev);
4248         struct net_device *netdev =  adapter->netdev;
4249
4250         if (adapter->wol)
4251                 be_setup_wol(adapter, true);
4252
4253         cancel_delayed_work_sync(&adapter->func_recovery_work);
4254
4255         netif_device_detach(netdev);
4256         if (netif_running(netdev)) {
4257                 rtnl_lock();
4258                 be_close(netdev);
4259                 rtnl_unlock();
4260         }
4261         be_clear(adapter);
4262
4263         pci_save_state(pdev);
4264         pci_disable_device(pdev);
4265         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4266         return 0;
4267 }
4268
4269 static int be_resume(struct pci_dev *pdev)
4270 {
4271         int status = 0;
4272         struct be_adapter *adapter = pci_get_drvdata(pdev);
4273         struct net_device *netdev =  adapter->netdev;
4274
4275         netif_device_detach(netdev);
4276
4277         status = pci_enable_device(pdev);
4278         if (status)
4279                 return status;
4280
4281         pci_set_power_state(pdev, 0);
4282         pci_restore_state(pdev);
4283
4284         /* tell fw we're ready to fire cmds */
4285         status = be_cmd_fw_init(adapter);
4286         if (status)
4287                 return status;
4288
4289         be_setup(adapter);
4290         if (netif_running(netdev)) {
4291                 rtnl_lock();
4292                 be_open(netdev);
4293                 rtnl_unlock();
4294         }
4295
4296         schedule_delayed_work(&adapter->func_recovery_work,
4297                               msecs_to_jiffies(1000));
4298         netif_device_attach(netdev);
4299
4300         if (adapter->wol)
4301                 be_setup_wol(adapter, false);
4302
4303         return 0;
4304 }
4305
4306 /*
4307  * An FLR will stop BE from DMAing any data.
4308  */
4309 static void be_shutdown(struct pci_dev *pdev)
4310 {
4311         struct be_adapter *adapter = pci_get_drvdata(pdev);
4312
4313         if (!adapter)
4314                 return;
4315
4316         cancel_delayed_work_sync(&adapter->work);
4317         cancel_delayed_work_sync(&adapter->func_recovery_work);
4318
4319         netif_device_detach(adapter->netdev);
4320
4321         be_cmd_reset_function(adapter);
4322
4323         pci_disable_device(pdev);
4324 }
4325
4326 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4327                                 pci_channel_state_t state)
4328 {
4329         struct be_adapter *adapter = pci_get_drvdata(pdev);
4330         struct net_device *netdev =  adapter->netdev;
4331
4332         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4333
4334         adapter->eeh_error = true;
4335
4336         cancel_delayed_work_sync(&adapter->func_recovery_work);
4337
4338         rtnl_lock();
4339         netif_device_detach(netdev);
4340         rtnl_unlock();
4341
4342         if (netif_running(netdev)) {
4343                 rtnl_lock();
4344                 be_close(netdev);
4345                 rtnl_unlock();
4346         }
4347         be_clear(adapter);
4348
4349         if (state == pci_channel_io_perm_failure)
4350                 return PCI_ERS_RESULT_DISCONNECT;
4351
4352         pci_disable_device(pdev);
4353
4354         /* The error could cause the FW to trigger a flash debug dump.
4355          * Resetting the card while flash dump is in progress
4356          * can cause it not to recover; wait for it to finish.
4357          * Wait only for first function as it is needed only once per
4358          * adapter.
4359          */
4360         if (pdev->devfn == 0)
4361                 ssleep(30);
4362
4363         return PCI_ERS_RESULT_NEED_RESET;
4364 }
4365
4366 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4367 {
4368         struct be_adapter *adapter = pci_get_drvdata(pdev);
4369         int status;
4370
4371         dev_info(&adapter->pdev->dev, "EEH reset\n");
4372         be_clear_all_error(adapter);
4373
4374         status = pci_enable_device(pdev);
4375         if (status)
4376                 return PCI_ERS_RESULT_DISCONNECT;
4377
4378         pci_set_master(pdev);
4379         pci_set_power_state(pdev, 0);
4380         pci_restore_state(pdev);
4381
4382         /* Check if card is ok and fw is ready */
4383         dev_info(&adapter->pdev->dev,
4384                  "Waiting for FW to be ready after EEH reset\n");
4385         status = be_fw_wait_ready(adapter);
4386         if (status)
4387                 return PCI_ERS_RESULT_DISCONNECT;
4388
4389         pci_cleanup_aer_uncorrect_error_status(pdev);
4390         return PCI_ERS_RESULT_RECOVERED;
4391 }
4392
4393 static void be_eeh_resume(struct pci_dev *pdev)
4394 {
4395         int status = 0;
4396         struct be_adapter *adapter = pci_get_drvdata(pdev);
4397         struct net_device *netdev =  adapter->netdev;
4398
4399         dev_info(&adapter->pdev->dev, "EEH resume\n");
4400
4401         pci_save_state(pdev);
4402
4403         /* tell fw we're ready to fire cmds */
4404         status = be_cmd_fw_init(adapter);
4405         if (status)
4406                 goto err;
4407
4408         status = be_cmd_reset_function(adapter);
4409         if (status)
4410                 goto err;
4411
4412         status = be_setup(adapter);
4413         if (status)
4414                 goto err;
4415
4416         if (netif_running(netdev)) {
4417                 status = be_open(netdev);
4418                 if (status)
4419                         goto err;
4420         }
4421
4422         schedule_delayed_work(&adapter->func_recovery_work,
4423                               msecs_to_jiffies(1000));
4424         netif_device_attach(netdev);
4425         return;
4426 err:
4427         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4428 }
4429
4430 static const struct pci_error_handlers be_eeh_handlers = {
4431         .error_detected = be_eeh_err_detected,
4432         .slot_reset = be_eeh_reset,
4433         .resume = be_eeh_resume,
4434 };
4435
4436 static struct pci_driver be_driver = {
4437         .name = DRV_NAME,
4438         .id_table = be_dev_ids,
4439         .probe = be_probe,
4440         .remove = be_remove,
4441         .suspend = be_suspend,
4442         .resume = be_resume,
4443         .shutdown = be_shutdown,
4444         .err_handler = &be_eeh_handlers
4445 };
4446
4447 static int __init be_init_module(void)
4448 {
4449         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4450             rx_frag_size != 2048) {
4451                 printk(KERN_WARNING DRV_NAME
4452                         " : Module param rx_frag_size must be 2048/4096/8192."
4453                         " Using 2048\n");
4454                 rx_frag_size = 2048;
4455         }
4456
4457         return pci_register_driver(&be_driver);
4458 }
4459 module_init(be_init_module);
4460
4461 static void __exit be_exit_module(void)
4462 {
4463         pci_unregister_driver(&be_driver);
4464 }
4465 module_exit(be_exit_module);