be2net: Fix to fail probe if MSI-X enable fails for a VF
[cascardo/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL | __GFP_ZERO);
150         if (!mem->va)
151                 return -ENOMEM;
152         return 0;
153 }
154
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
156 {
157         u32 reg, enabled;
158
159         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160                                 &reg);
161         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163         if (!enabled && enable)
164                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else if (enabled && !enable)
166                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else
168                 return;
169
170         pci_write_config_dword(adapter->pdev,
171                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
175 {
176         int status = 0;
177
178         /* On lancer interrupts can't be controlled via this register */
179         if (lancer_chip(adapter))
180                 return;
181
182         if (adapter->eeh_error)
183                 return;
184
185         status = be_cmd_intr_set(adapter, enable);
186         if (status)
187                 be_reg_intr_set(adapter, enable);
188 }
189
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
191 {
192         u32 val = 0;
193         val |= qid & DB_RQ_RING_ID_MASK;
194         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
195
196         wmb();
197         iowrite32(val, adapter->db + DB_RQ_OFFSET);
198 }
199
200 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201                           u16 posted)
202 {
203         u32 val = 0;
204         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
205         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
206
207         wmb();
208         iowrite32(val, adapter->db + txo->db_offset);
209 }
210
211 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
212                 bool arm, bool clear_int, u16 num_popped)
213 {
214         u32 val = 0;
215         val |= qid & DB_EQ_RING_ID_MASK;
216         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
218
219         if (adapter->eeh_error)
220                 return;
221
222         if (arm)
223                 val |= 1 << DB_EQ_REARM_SHIFT;
224         if (clear_int)
225                 val |= 1 << DB_EQ_CLR_SHIFT;
226         val |= 1 << DB_EQ_EVNT_SHIFT;
227         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
228         iowrite32(val, adapter->db + DB_EQ_OFFSET);
229 }
230
231 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
232 {
233         u32 val = 0;
234         val |= qid & DB_CQ_RING_ID_MASK;
235         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
237
238         if (adapter->eeh_error)
239                 return;
240
241         if (arm)
242                 val |= 1 << DB_CQ_REARM_SHIFT;
243         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244         iowrite32(val, adapter->db + DB_CQ_OFFSET);
245 }
246
247 static int be_mac_addr_set(struct net_device *netdev, void *p)
248 {
249         struct be_adapter *adapter = netdev_priv(netdev);
250         struct sockaddr *addr = p;
251         int status = 0;
252         u8 current_mac[ETH_ALEN];
253         u32 pmac_id = adapter->pmac_id[0];
254         bool active_mac = true;
255
256         if (!is_valid_ether_addr(addr->sa_data))
257                 return -EADDRNOTAVAIL;
258
259         /* For BE VF, MAC address is already activated by PF.
260          * Hence only operation left is updating netdev->devaddr.
261          * Update it if user is passing the same MAC which was used
262          * during configuring VF MAC from PF(Hypervisor).
263          */
264         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265                 status = be_cmd_mac_addr_query(adapter, current_mac,
266                                                false, adapter->if_handle, 0);
267                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268                         goto done;
269                 else
270                         goto err;
271         }
272
273         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274                 goto done;
275
276         /* For Lancer check if any MAC is active.
277          * If active, get its mac id.
278          */
279         if (lancer_chip(adapter) && !be_physfn(adapter))
280                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281                                          &pmac_id, 0);
282
283         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284                                  adapter->if_handle,
285                                  &adapter->pmac_id[0], 0);
286
287         if (status)
288                 goto err;
289
290         if (active_mac)
291                 be_cmd_pmac_del(adapter, adapter->if_handle,
292                                 pmac_id, 0);
293 done:
294         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295         return 0;
296 err:
297         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
298         return status;
299 }
300
301 /* BE2 supports only v0 cmd */
302 static void *hw_stats_from_cmd(struct be_adapter *adapter)
303 {
304         if (BE2_chip(adapter)) {
305                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307                 return &cmd->hw_stats;
308         } else  {
309                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311                 return &cmd->hw_stats;
312         }
313 }
314
315 /* BE2 supports only v0 cmd */
316 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317 {
318         if (BE2_chip(adapter)) {
319                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321                 return &hw_stats->erx;
322         } else {
323                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325                 return &hw_stats->erx;
326         }
327 }
328
329 static void populate_be_v0_stats(struct be_adapter *adapter)
330 {
331         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
334         struct be_port_rxf_stats_v0 *port_stats =
335                                         &rxf_stats->port[adapter->port_num];
336         struct be_drv_stats *drvs = &adapter->drv_stats;
337
338         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
339         drvs->rx_pause_frames = port_stats->rx_pause_frames;
340         drvs->rx_crc_errors = port_stats->rx_crc_errors;
341         drvs->rx_control_frames = port_stats->rx_control_frames;
342         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
353         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
354         drvs->rx_dropped_header_too_small =
355                 port_stats->rx_dropped_header_too_small;
356         drvs->rx_address_filtered =
357                                         port_stats->rx_address_filtered +
358                                         port_stats->rx_vlan_filtered;
359         drvs->rx_alignment_symbol_errors =
360                 port_stats->rx_alignment_symbol_errors;
361
362         drvs->tx_pauseframes = port_stats->tx_pauseframes;
363         drvs->tx_controlframes = port_stats->tx_controlframes;
364
365         if (adapter->port_num)
366                 drvs->jabber_events = rxf_stats->port1_jabber_events;
367         else
368                 drvs->jabber_events = rxf_stats->port0_jabber_events;
369         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
370         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
371         drvs->forwarded_packets = rxf_stats->forwarded_packets;
372         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
373         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
375         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376 }
377
378 static void populate_be_v1_stats(struct be_adapter *adapter)
379 {
380         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
383         struct be_port_rxf_stats_v1 *port_stats =
384                                         &rxf_stats->port[adapter->port_num];
385         struct be_drv_stats *drvs = &adapter->drv_stats;
386
387         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
388         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
390         drvs->rx_pause_frames = port_stats->rx_pause_frames;
391         drvs->rx_crc_errors = port_stats->rx_crc_errors;
392         drvs->rx_control_frames = port_stats->rx_control_frames;
393         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403         drvs->rx_dropped_header_too_small =
404                 port_stats->rx_dropped_header_too_small;
405         drvs->rx_input_fifo_overflow_drop =
406                 port_stats->rx_input_fifo_overflow_drop;
407         drvs->rx_address_filtered = port_stats->rx_address_filtered;
408         drvs->rx_alignment_symbol_errors =
409                 port_stats->rx_alignment_symbol_errors;
410         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
411         drvs->tx_pauseframes = port_stats->tx_pauseframes;
412         drvs->tx_controlframes = port_stats->tx_controlframes;
413         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
414         drvs->jabber_events = port_stats->jabber_events;
415         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
416         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
417         drvs->forwarded_packets = rxf_stats->forwarded_packets;
418         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
419         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
421         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422 }
423
424 static void populate_lancer_stats(struct be_adapter *adapter)
425 {
426
427         struct be_drv_stats *drvs = &adapter->drv_stats;
428         struct lancer_pport_stats *pport_stats =
429                                         pport_stats_from_cmd(adapter);
430
431         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
435         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
436         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
437         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441         drvs->rx_dropped_tcp_length =
442                                 pport_stats->rx_dropped_invalid_tcp_length;
443         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446         drvs->rx_dropped_header_too_small =
447                                 pport_stats->rx_dropped_header_too_small;
448         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
449         drvs->rx_address_filtered =
450                                         pport_stats->rx_address_filtered +
451                                         pport_stats->rx_vlan_filtered;
452         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
453         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
454         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
456         drvs->jabber_events = pport_stats->rx_jabbers;
457         drvs->forwarded_packets = pport_stats->num_forwards_lo;
458         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
459         drvs->rx_drops_too_many_frags =
460                                 pport_stats->rx_drops_too_many_frags_lo;
461 }
462
463 static void accumulate_16bit_val(u32 *acc, u16 val)
464 {
465 #define lo(x)                   (x & 0xFFFF)
466 #define hi(x)                   (x & 0xFFFF0000)
467         bool wrapped = val < lo(*acc);
468         u32 newacc = hi(*acc) + val;
469
470         if (wrapped)
471                 newacc += 65536;
472         ACCESS_ONCE(*acc) = newacc;
473 }
474
475 void populate_erx_stats(struct be_adapter *adapter,
476                         struct be_rx_obj *rxo,
477                         u32 erx_stat)
478 {
479         if (!BEx_chip(adapter))
480                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481         else
482                 /* below erx HW counter can actually wrap around after
483                  * 65535. Driver accumulates a 32-bit value
484                  */
485                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486                                      (u16)erx_stat);
487 }
488
489 void be_parse_stats(struct be_adapter *adapter)
490 {
491         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492         struct be_rx_obj *rxo;
493         int i;
494         u32 erx_stat;
495
496         if (lancer_chip(adapter)) {
497                 populate_lancer_stats(adapter);
498         } else {
499                 if (BE2_chip(adapter))
500                         populate_be_v0_stats(adapter);
501                 else
502                         /* for BE3 and Skyhawk */
503                         populate_be_v1_stats(adapter);
504
505                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506                 for_all_rx_queues(adapter, rxo, i) {
507                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508                         populate_erx_stats(adapter, rxo, erx_stat);
509                 }
510         }
511 }
512
513 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514                                         struct rtnl_link_stats64 *stats)
515 {
516         struct be_adapter *adapter = netdev_priv(netdev);
517         struct be_drv_stats *drvs = &adapter->drv_stats;
518         struct be_rx_obj *rxo;
519         struct be_tx_obj *txo;
520         u64 pkts, bytes;
521         unsigned int start;
522         int i;
523
524         for_all_rx_queues(adapter, rxo, i) {
525                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526                 do {
527                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528                         pkts = rx_stats(rxo)->rx_pkts;
529                         bytes = rx_stats(rxo)->rx_bytes;
530                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531                 stats->rx_packets += pkts;
532                 stats->rx_bytes += bytes;
533                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535                                         rx_stats(rxo)->rx_drops_no_frags;
536         }
537
538         for_all_tx_queues(adapter, txo, i) {
539                 const struct be_tx_stats *tx_stats = tx_stats(txo);
540                 do {
541                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542                         pkts = tx_stats(txo)->tx_pkts;
543                         bytes = tx_stats(txo)->tx_bytes;
544                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545                 stats->tx_packets += pkts;
546                 stats->tx_bytes += bytes;
547         }
548
549         /* bad pkts received */
550         stats->rx_errors = drvs->rx_crc_errors +
551                 drvs->rx_alignment_symbol_errors +
552                 drvs->rx_in_range_errors +
553                 drvs->rx_out_range_errors +
554                 drvs->rx_frame_too_long +
555                 drvs->rx_dropped_too_small +
556                 drvs->rx_dropped_too_short +
557                 drvs->rx_dropped_header_too_small +
558                 drvs->rx_dropped_tcp_length +
559                 drvs->rx_dropped_runt;
560
561         /* detailed rx errors */
562         stats->rx_length_errors = drvs->rx_in_range_errors +
563                 drvs->rx_out_range_errors +
564                 drvs->rx_frame_too_long;
565
566         stats->rx_crc_errors = drvs->rx_crc_errors;
567
568         /* frame alignment errors */
569         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
570
571         /* receiver fifo overrun */
572         /* drops_no_pbuf is no per i/f, it's per BE card */
573         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
574                                 drvs->rx_input_fifo_overflow_drop +
575                                 drvs->rx_drops_no_pbuf;
576         return stats;
577 }
578
579 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
580 {
581         struct net_device *netdev = adapter->netdev;
582
583         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
584                 netif_carrier_off(netdev);
585                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
586         }
587
588         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589                 netif_carrier_on(netdev);
590         else
591                 netif_carrier_off(netdev);
592 }
593
594 static void be_tx_stats_update(struct be_tx_obj *txo,
595                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
596 {
597         struct be_tx_stats *stats = tx_stats(txo);
598
599         u64_stats_update_begin(&stats->sync);
600         stats->tx_reqs++;
601         stats->tx_wrbs += wrb_cnt;
602         stats->tx_bytes += copied;
603         stats->tx_pkts += (gso_segs ? gso_segs : 1);
604         if (stopped)
605                 stats->tx_stops++;
606         u64_stats_update_end(&stats->sync);
607 }
608
609 /* Determine number of WRB entries needed to xmit data in an skb */
610 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611                                                                 bool *dummy)
612 {
613         int cnt = (skb->len > skb->data_len);
614
615         cnt += skb_shinfo(skb)->nr_frags;
616
617         /* to account for hdr wrb */
618         cnt++;
619         if (lancer_chip(adapter) || !(cnt & 1)) {
620                 *dummy = false;
621         } else {
622                 /* add a dummy to make it an even num */
623                 cnt++;
624                 *dummy = true;
625         }
626         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627         return cnt;
628 }
629
630 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631 {
632         wrb->frag_pa_hi = upper_32_bits(addr);
633         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
635         wrb->rsvd0 = 0;
636 }
637
638 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639                                         struct sk_buff *skb)
640 {
641         u8 vlan_prio;
642         u16 vlan_tag;
643
644         vlan_tag = vlan_tx_tag_get(skb);
645         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646         /* If vlan priority provided by OS is NOT in available bmap */
647         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649                                 adapter->recommended_prio;
650
651         return vlan_tag;
652 }
653
654 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
655                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
656 {
657         u16 vlan_tag;
658
659         memset(hdr, 0, sizeof(*hdr));
660
661         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
663         if (skb_is_gso(skb)) {
664                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666                         hdr, skb_shinfo(skb)->gso_size);
667                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
668                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
669         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670                 if (is_tcp_pkt(skb))
671                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672                 else if (is_udp_pkt(skb))
673                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674         }
675
676         if (vlan_tx_tag_present(skb)) {
677                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
678                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
679                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
680         }
681
682         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
684         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
685         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687 }
688
689 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
690                 bool unmap_single)
691 {
692         dma_addr_t dma;
693
694         be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
697         if (wrb->frag_len) {
698                 if (unmap_single)
699                         dma_unmap_single(dev, dma, wrb->frag_len,
700                                          DMA_TO_DEVICE);
701                 else
702                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
703         }
704 }
705
706 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
707                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708                 bool skip_hw_vlan)
709 {
710         dma_addr_t busaddr;
711         int i, copied = 0;
712         struct device *dev = &adapter->pdev->dev;
713         struct sk_buff *first_skb = skb;
714         struct be_eth_wrb *wrb;
715         struct be_eth_hdr_wrb *hdr;
716         bool map_single = false;
717         u16 map_head;
718
719         hdr = queue_head_node(txq);
720         queue_head_inc(txq);
721         map_head = txq->head;
722
723         if (skb->len > skb->data_len) {
724                 int len = skb_headlen(skb);
725                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726                 if (dma_mapping_error(dev, busaddr))
727                         goto dma_err;
728                 map_single = true;
729                 wrb = queue_head_node(txq);
730                 wrb_fill(wrb, busaddr, len);
731                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732                 queue_head_inc(txq);
733                 copied += len;
734         }
735
736         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
737                 const struct skb_frag_struct *frag =
738                         &skb_shinfo(skb)->frags[i];
739                 busaddr = skb_frag_dma_map(dev, frag, 0,
740                                            skb_frag_size(frag), DMA_TO_DEVICE);
741                 if (dma_mapping_error(dev, busaddr))
742                         goto dma_err;
743                 wrb = queue_head_node(txq);
744                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
745                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746                 queue_head_inc(txq);
747                 copied += skb_frag_size(frag);
748         }
749
750         if (dummy_wrb) {
751                 wrb = queue_head_node(txq);
752                 wrb_fill(wrb, 0, 0);
753                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754                 queue_head_inc(txq);
755         }
756
757         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
758         be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760         return copied;
761 dma_err:
762         txq->head = map_head;
763         while (copied) {
764                 wrb = queue_head_node(txq);
765                 unmap_tx_frag(dev, wrb, map_single);
766                 map_single = false;
767                 copied -= wrb->frag_len;
768                 queue_head_inc(txq);
769         }
770         return 0;
771 }
772
773 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
774                                              struct sk_buff *skb,
775                                              bool *skip_hw_vlan)
776 {
777         u16 vlan_tag = 0;
778
779         skb = skb_share_check(skb, GFP_ATOMIC);
780         if (unlikely(!skb))
781                 return skb;
782
783         if (vlan_tx_tag_present(skb)) {
784                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
785                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
786                 if (skb)
787                         skb->vlan_tci = 0;
788         }
789
790         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
791                 if (!vlan_tag)
792                         vlan_tag = adapter->pvid;
793                 if (skip_hw_vlan)
794                         *skip_hw_vlan = true;
795         }
796
797         if (vlan_tag) {
798                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
799                 if (unlikely(!skb))
800                         return skb;
801
802                 skb->vlan_tci = 0;
803         }
804
805         /* Insert the outer VLAN, if any */
806         if (adapter->qnq_vid) {
807                 vlan_tag = adapter->qnq_vid;
808                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
809                 if (unlikely(!skb))
810                         return skb;
811                 if (skip_hw_vlan)
812                         *skip_hw_vlan = true;
813         }
814
815         return skb;
816 }
817
818 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
819 {
820         struct ethhdr *eh = (struct ethhdr *)skb->data;
821         u16 offset = ETH_HLEN;
822
823         if (eh->h_proto == htons(ETH_P_IPV6)) {
824                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
825
826                 offset += sizeof(struct ipv6hdr);
827                 if (ip6h->nexthdr != NEXTHDR_TCP &&
828                     ip6h->nexthdr != NEXTHDR_UDP) {
829                         struct ipv6_opt_hdr *ehdr =
830                                 (struct ipv6_opt_hdr *) (skb->data + offset);
831
832                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
833                         if (ehdr->hdrlen == 0xff)
834                                 return true;
835                 }
836         }
837         return false;
838 }
839
840 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
841 {
842         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
843 }
844
845 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
846 {
847         return BE3_chip(adapter) &&
848                 be_ipv6_exthdr_check(skb);
849 }
850
851 static netdev_tx_t be_xmit(struct sk_buff *skb,
852                         struct net_device *netdev)
853 {
854         struct be_adapter *adapter = netdev_priv(netdev);
855         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
856         struct be_queue_info *txq = &txo->q;
857         struct iphdr *ip = NULL;
858         u32 wrb_cnt = 0, copied = 0;
859         u32 start = txq->head, eth_hdr_len;
860         bool dummy_wrb, stopped = false;
861         bool skip_hw_vlan = false;
862         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
863
864         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
865                 VLAN_ETH_HLEN : ETH_HLEN;
866
867         /* For padded packets, BE HW modifies tot_len field in IP header
868          * incorrecly when VLAN tag is inserted by HW.
869          */
870         if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
871                 ip = (struct iphdr *)ip_hdr(skb);
872                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
873         }
874
875         /* If vlan tag is already inlined in the packet, skip HW VLAN
876          * tagging in UMC mode
877          */
878         if ((adapter->function_mode & UMC_ENABLED) &&
879             veh->h_vlan_proto == htons(ETH_P_8021Q))
880                         skip_hw_vlan = true;
881
882         /* HW has a bug wherein it will calculate CSUM for VLAN
883          * pkts even though it is disabled.
884          * Manually insert VLAN in pkt.
885          */
886         if (skb->ip_summed != CHECKSUM_PARTIAL &&
887                         vlan_tx_tag_present(skb)) {
888                 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
889                 if (unlikely(!skb))
890                         goto tx_drop;
891         }
892
893         /* HW may lockup when VLAN HW tagging is requested on
894          * certain ipv6 packets. Drop such pkts if the HW workaround to
895          * skip HW tagging is not enabled by FW.
896          */
897         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
898                      (adapter->pvid || adapter->qnq_vid) &&
899                      !qnq_async_evt_rcvd(adapter)))
900                 goto tx_drop;
901
902         /* Manual VLAN tag insertion to prevent:
903          * ASIC lockup when the ASIC inserts VLAN tag into
904          * certain ipv6 packets. Insert VLAN tags in driver,
905          * and set event, completion, vlan bits accordingly
906          * in the Tx WRB.
907          */
908         if (be_ipv6_tx_stall_chk(adapter, skb) &&
909             be_vlan_tag_tx_chk(adapter, skb)) {
910                 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
911                 if (unlikely(!skb))
912                         goto tx_drop;
913         }
914
915         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
916
917         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
918                               skip_hw_vlan);
919         if (copied) {
920                 int gso_segs = skb_shinfo(skb)->gso_segs;
921
922                 /* record the sent skb in the sent_skb table */
923                 BUG_ON(txo->sent_skb_list[start]);
924                 txo->sent_skb_list[start] = skb;
925
926                 /* Ensure txq has space for the next skb; Else stop the queue
927                  * *BEFORE* ringing the tx doorbell, so that we serialze the
928                  * tx compls of the current transmit which'll wake up the queue
929                  */
930                 atomic_add(wrb_cnt, &txq->used);
931                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
932                                                                 txq->len) {
933                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
934                         stopped = true;
935                 }
936
937                 be_txq_notify(adapter, txo, wrb_cnt);
938
939                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
940         } else {
941                 txq->head = start;
942                 dev_kfree_skb_any(skb);
943         }
944 tx_drop:
945         return NETDEV_TX_OK;
946 }
947
948 static int be_change_mtu(struct net_device *netdev, int new_mtu)
949 {
950         struct be_adapter *adapter = netdev_priv(netdev);
951         if (new_mtu < BE_MIN_MTU ||
952                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
953                                         (ETH_HLEN + ETH_FCS_LEN))) {
954                 dev_info(&adapter->pdev->dev,
955                         "MTU must be between %d and %d bytes\n",
956                         BE_MIN_MTU,
957                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
958                 return -EINVAL;
959         }
960         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
961                         netdev->mtu, new_mtu);
962         netdev->mtu = new_mtu;
963         return 0;
964 }
965
966 /*
967  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
968  * If the user configures more, place BE in vlan promiscuous mode.
969  */
970 static int be_vid_config(struct be_adapter *adapter)
971 {
972         u16 vids[BE_NUM_VLANS_SUPPORTED];
973         u16 num = 0, i;
974         int status = 0;
975
976         /* No need to further configure vids if in promiscuous mode */
977         if (adapter->promiscuous)
978                 return 0;
979
980         if (adapter->vlans_added > adapter->max_vlans)
981                 goto set_vlan_promisc;
982
983         /* Construct VLAN Table to give to HW */
984         for (i = 0; i < VLAN_N_VID; i++)
985                 if (adapter->vlan_tag[i])
986                         vids[num++] = cpu_to_le16(i);
987
988         status = be_cmd_vlan_config(adapter, adapter->if_handle,
989                                     vids, num, 1, 0);
990
991         /* Set to VLAN promisc mode as setting VLAN filter failed */
992         if (status) {
993                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
994                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
995                 goto set_vlan_promisc;
996         }
997
998         return status;
999
1000 set_vlan_promisc:
1001         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1002                                     NULL, 0, 1, 1);
1003         return status;
1004 }
1005
1006 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1007 {
1008         struct be_adapter *adapter = netdev_priv(netdev);
1009         int status = 0;
1010
1011         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1012                 status = -EINVAL;
1013                 goto ret;
1014         }
1015
1016         /* Packets with VID 0 are always received by Lancer by default */
1017         if (lancer_chip(adapter) && vid == 0)
1018                 goto ret;
1019
1020         adapter->vlan_tag[vid] = 1;
1021         if (adapter->vlans_added <= (adapter->max_vlans + 1))
1022                 status = be_vid_config(adapter);
1023
1024         if (!status)
1025                 adapter->vlans_added++;
1026         else
1027                 adapter->vlan_tag[vid] = 0;
1028 ret:
1029         return status;
1030 }
1031
1032 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1033 {
1034         struct be_adapter *adapter = netdev_priv(netdev);
1035         int status = 0;
1036
1037         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1038                 status = -EINVAL;
1039                 goto ret;
1040         }
1041
1042         /* Packets with VID 0 are always received by Lancer by default */
1043         if (lancer_chip(adapter) && vid == 0)
1044                 goto ret;
1045
1046         adapter->vlan_tag[vid] = 0;
1047         if (adapter->vlans_added <= adapter->max_vlans)
1048                 status = be_vid_config(adapter);
1049
1050         if (!status)
1051                 adapter->vlans_added--;
1052         else
1053                 adapter->vlan_tag[vid] = 1;
1054 ret:
1055         return status;
1056 }
1057
1058 static void be_set_rx_mode(struct net_device *netdev)
1059 {
1060         struct be_adapter *adapter = netdev_priv(netdev);
1061         int status;
1062
1063         if (netdev->flags & IFF_PROMISC) {
1064                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1065                 adapter->promiscuous = true;
1066                 goto done;
1067         }
1068
1069         /* BE was previously in promiscuous mode; disable it */
1070         if (adapter->promiscuous) {
1071                 adapter->promiscuous = false;
1072                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1073
1074                 if (adapter->vlans_added)
1075                         be_vid_config(adapter);
1076         }
1077
1078         /* Enable multicast promisc if num configured exceeds what we support */
1079         if (netdev->flags & IFF_ALLMULTI ||
1080             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
1081                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1082                 goto done;
1083         }
1084
1085         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1086                 struct netdev_hw_addr *ha;
1087                 int i = 1; /* First slot is claimed by the Primary MAC */
1088
1089                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1090                         be_cmd_pmac_del(adapter, adapter->if_handle,
1091                                         adapter->pmac_id[i], 0);
1092                 }
1093
1094                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1095                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1096                         adapter->promiscuous = true;
1097                         goto done;
1098                 }
1099
1100                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1101                         adapter->uc_macs++; /* First slot is for Primary MAC */
1102                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1103                                         adapter->if_handle,
1104                                         &adapter->pmac_id[adapter->uc_macs], 0);
1105                 }
1106         }
1107
1108         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1109
1110         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1111         if (status) {
1112                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1113                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1114                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1115         }
1116 done:
1117         return;
1118 }
1119
1120 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1121 {
1122         struct be_adapter *adapter = netdev_priv(netdev);
1123         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1124         int status;
1125         bool active_mac = false;
1126         u32 pmac_id;
1127         u8 old_mac[ETH_ALEN];
1128
1129         if (!sriov_enabled(adapter))
1130                 return -EPERM;
1131
1132         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1133                 return -EINVAL;
1134
1135         if (lancer_chip(adapter)) {
1136                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1137                                                   &pmac_id, vf + 1);
1138                 if (!status && active_mac)
1139                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1140                                         pmac_id, vf + 1);
1141
1142                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1143         } else {
1144                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1145                                          vf_cfg->pmac_id, vf + 1);
1146
1147                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1148                                          &vf_cfg->pmac_id, vf + 1);
1149         }
1150
1151         if (status)
1152                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1153                                 mac, vf);
1154         else
1155                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1156
1157         return status;
1158 }
1159
1160 static int be_get_vf_config(struct net_device *netdev, int vf,
1161                         struct ifla_vf_info *vi)
1162 {
1163         struct be_adapter *adapter = netdev_priv(netdev);
1164         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1165
1166         if (!sriov_enabled(adapter))
1167                 return -EPERM;
1168
1169         if (vf >= adapter->num_vfs)
1170                 return -EINVAL;
1171
1172         vi->vf = vf;
1173         vi->tx_rate = vf_cfg->tx_rate;
1174         vi->vlan = vf_cfg->vlan_tag;
1175         vi->qos = 0;
1176         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1177
1178         return 0;
1179 }
1180
1181 static int be_set_vf_vlan(struct net_device *netdev,
1182                         int vf, u16 vlan, u8 qos)
1183 {
1184         struct be_adapter *adapter = netdev_priv(netdev);
1185         int status = 0;
1186
1187         if (!sriov_enabled(adapter))
1188                 return -EPERM;
1189
1190         if (vf >= adapter->num_vfs || vlan > 4095)
1191                 return -EINVAL;
1192
1193         if (vlan) {
1194                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1195                         /* If this is new value, program it. Else skip. */
1196                         adapter->vf_cfg[vf].vlan_tag = vlan;
1197
1198                         status = be_cmd_set_hsw_config(adapter, vlan,
1199                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1200                 }
1201         } else {
1202                 /* Reset Transparent Vlan Tagging. */
1203                 adapter->vf_cfg[vf].vlan_tag = 0;
1204                 vlan = adapter->vf_cfg[vf].def_vid;
1205                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1206                         adapter->vf_cfg[vf].if_handle);
1207         }
1208
1209
1210         if (status)
1211                 dev_info(&adapter->pdev->dev,
1212                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1213         return status;
1214 }
1215
1216 static int be_set_vf_tx_rate(struct net_device *netdev,
1217                         int vf, int rate)
1218 {
1219         struct be_adapter *adapter = netdev_priv(netdev);
1220         int status = 0;
1221
1222         if (!sriov_enabled(adapter))
1223                 return -EPERM;
1224
1225         if (vf >= adapter->num_vfs)
1226                 return -EINVAL;
1227
1228         if (rate < 100 || rate > 10000) {
1229                 dev_err(&adapter->pdev->dev,
1230                         "tx rate must be between 100 and 10000 Mbps\n");
1231                 return -EINVAL;
1232         }
1233
1234         if (lancer_chip(adapter))
1235                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1236         else
1237                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1238
1239         if (status)
1240                 dev_err(&adapter->pdev->dev,
1241                                 "tx rate %d on VF %d failed\n", rate, vf);
1242         else
1243                 adapter->vf_cfg[vf].tx_rate = rate;
1244         return status;
1245 }
1246
1247 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1248 {
1249         struct pci_dev *dev, *pdev = adapter->pdev;
1250         int vfs = 0, assigned_vfs = 0, pos;
1251         u16 offset, stride;
1252
1253         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1254         if (!pos)
1255                 return 0;
1256         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1257         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1258
1259         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1260         while (dev) {
1261                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1262                         vfs++;
1263                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1264                                 assigned_vfs++;
1265                 }
1266                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1267         }
1268         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1269 }
1270
1271 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1272 {
1273         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1274         ulong now = jiffies;
1275         ulong delta = now - stats->rx_jiffies;
1276         u64 pkts;
1277         unsigned int start, eqd;
1278
1279         if (!eqo->enable_aic) {
1280                 eqd = eqo->eqd;
1281                 goto modify_eqd;
1282         }
1283
1284         if (eqo->idx >= adapter->num_rx_qs)
1285                 return;
1286
1287         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1288
1289         /* Wrapped around */
1290         if (time_before(now, stats->rx_jiffies)) {
1291                 stats->rx_jiffies = now;
1292                 return;
1293         }
1294
1295         /* Update once a second */
1296         if (delta < HZ)
1297                 return;
1298
1299         do {
1300                 start = u64_stats_fetch_begin_bh(&stats->sync);
1301                 pkts = stats->rx_pkts;
1302         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1303
1304         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1305         stats->rx_pkts_prev = pkts;
1306         stats->rx_jiffies = now;
1307         eqd = (stats->rx_pps / 110000) << 3;
1308         eqd = min(eqd, eqo->max_eqd);
1309         eqd = max(eqd, eqo->min_eqd);
1310         if (eqd < 10)
1311                 eqd = 0;
1312
1313 modify_eqd:
1314         if (eqd != eqo->cur_eqd) {
1315                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1316                 eqo->cur_eqd = eqd;
1317         }
1318 }
1319
1320 static void be_rx_stats_update(struct be_rx_obj *rxo,
1321                 struct be_rx_compl_info *rxcp)
1322 {
1323         struct be_rx_stats *stats = rx_stats(rxo);
1324
1325         u64_stats_update_begin(&stats->sync);
1326         stats->rx_compl++;
1327         stats->rx_bytes += rxcp->pkt_size;
1328         stats->rx_pkts++;
1329         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1330                 stats->rx_mcast_pkts++;
1331         if (rxcp->err)
1332                 stats->rx_compl_err++;
1333         u64_stats_update_end(&stats->sync);
1334 }
1335
1336 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1337 {
1338         /* L4 checksum is not reliable for non TCP/UDP packets.
1339          * Also ignore ipcksm for ipv6 pkts */
1340         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1341                                 (rxcp->ip_csum || rxcp->ipv6);
1342 }
1343
1344 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1345                                                 u16 frag_idx)
1346 {
1347         struct be_adapter *adapter = rxo->adapter;
1348         struct be_rx_page_info *rx_page_info;
1349         struct be_queue_info *rxq = &rxo->q;
1350
1351         rx_page_info = &rxo->page_info_tbl[frag_idx];
1352         BUG_ON(!rx_page_info->page);
1353
1354         if (rx_page_info->last_page_user) {
1355                 dma_unmap_page(&adapter->pdev->dev,
1356                                dma_unmap_addr(rx_page_info, bus),
1357                                adapter->big_page_size, DMA_FROM_DEVICE);
1358                 rx_page_info->last_page_user = false;
1359         }
1360
1361         atomic_dec(&rxq->used);
1362         return rx_page_info;
1363 }
1364
1365 /* Throwaway the data in the Rx completion */
1366 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1367                                 struct be_rx_compl_info *rxcp)
1368 {
1369         struct be_queue_info *rxq = &rxo->q;
1370         struct be_rx_page_info *page_info;
1371         u16 i, num_rcvd = rxcp->num_rcvd;
1372
1373         for (i = 0; i < num_rcvd; i++) {
1374                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1375                 put_page(page_info->page);
1376                 memset(page_info, 0, sizeof(*page_info));
1377                 index_inc(&rxcp->rxq_idx, rxq->len);
1378         }
1379 }
1380
1381 /*
1382  * skb_fill_rx_data forms a complete skb for an ether frame
1383  * indicated by rxcp.
1384  */
1385 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1386                              struct be_rx_compl_info *rxcp)
1387 {
1388         struct be_queue_info *rxq = &rxo->q;
1389         struct be_rx_page_info *page_info;
1390         u16 i, j;
1391         u16 hdr_len, curr_frag_len, remaining;
1392         u8 *start;
1393
1394         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1395         start = page_address(page_info->page) + page_info->page_offset;
1396         prefetch(start);
1397
1398         /* Copy data in the first descriptor of this completion */
1399         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1400
1401         skb->len = curr_frag_len;
1402         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1403                 memcpy(skb->data, start, curr_frag_len);
1404                 /* Complete packet has now been moved to data */
1405                 put_page(page_info->page);
1406                 skb->data_len = 0;
1407                 skb->tail += curr_frag_len;
1408         } else {
1409                 hdr_len = ETH_HLEN;
1410                 memcpy(skb->data, start, hdr_len);
1411                 skb_shinfo(skb)->nr_frags = 1;
1412                 skb_frag_set_page(skb, 0, page_info->page);
1413                 skb_shinfo(skb)->frags[0].page_offset =
1414                                         page_info->page_offset + hdr_len;
1415                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1416                 skb->data_len = curr_frag_len - hdr_len;
1417                 skb->truesize += rx_frag_size;
1418                 skb->tail += hdr_len;
1419         }
1420         page_info->page = NULL;
1421
1422         if (rxcp->pkt_size <= rx_frag_size) {
1423                 BUG_ON(rxcp->num_rcvd != 1);
1424                 return;
1425         }
1426
1427         /* More frags present for this completion */
1428         index_inc(&rxcp->rxq_idx, rxq->len);
1429         remaining = rxcp->pkt_size - curr_frag_len;
1430         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1431                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1432                 curr_frag_len = min(remaining, rx_frag_size);
1433
1434                 /* Coalesce all frags from the same physical page in one slot */
1435                 if (page_info->page_offset == 0) {
1436                         /* Fresh page */
1437                         j++;
1438                         skb_frag_set_page(skb, j, page_info->page);
1439                         skb_shinfo(skb)->frags[j].page_offset =
1440                                                         page_info->page_offset;
1441                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1442                         skb_shinfo(skb)->nr_frags++;
1443                 } else {
1444                         put_page(page_info->page);
1445                 }
1446
1447                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1448                 skb->len += curr_frag_len;
1449                 skb->data_len += curr_frag_len;
1450                 skb->truesize += rx_frag_size;
1451                 remaining -= curr_frag_len;
1452                 index_inc(&rxcp->rxq_idx, rxq->len);
1453                 page_info->page = NULL;
1454         }
1455         BUG_ON(j > MAX_SKB_FRAGS);
1456 }
1457
1458 /* Process the RX completion indicated by rxcp when GRO is disabled */
1459 static void be_rx_compl_process(struct be_rx_obj *rxo,
1460                                 struct be_rx_compl_info *rxcp)
1461 {
1462         struct be_adapter *adapter = rxo->adapter;
1463         struct net_device *netdev = adapter->netdev;
1464         struct sk_buff *skb;
1465
1466         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1467         if (unlikely(!skb)) {
1468                 rx_stats(rxo)->rx_drops_no_skbs++;
1469                 be_rx_compl_discard(rxo, rxcp);
1470                 return;
1471         }
1472
1473         skb_fill_rx_data(rxo, skb, rxcp);
1474
1475         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1476                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1477         else
1478                 skb_checksum_none_assert(skb);
1479
1480         skb->protocol = eth_type_trans(skb, netdev);
1481         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1482         if (netdev->features & NETIF_F_RXHASH)
1483                 skb->rxhash = rxcp->rss_hash;
1484
1485
1486         if (rxcp->vlanf)
1487                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1488
1489         netif_receive_skb(skb);
1490 }
1491
1492 /* Process the RX completion indicated by rxcp when GRO is enabled */
1493 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1494                              struct be_rx_compl_info *rxcp)
1495 {
1496         struct be_adapter *adapter = rxo->adapter;
1497         struct be_rx_page_info *page_info;
1498         struct sk_buff *skb = NULL;
1499         struct be_queue_info *rxq = &rxo->q;
1500         u16 remaining, curr_frag_len;
1501         u16 i, j;
1502
1503         skb = napi_get_frags(napi);
1504         if (!skb) {
1505                 be_rx_compl_discard(rxo, rxcp);
1506                 return;
1507         }
1508
1509         remaining = rxcp->pkt_size;
1510         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1511                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1512
1513                 curr_frag_len = min(remaining, rx_frag_size);
1514
1515                 /* Coalesce all frags from the same physical page in one slot */
1516                 if (i == 0 || page_info->page_offset == 0) {
1517                         /* First frag or Fresh page */
1518                         j++;
1519                         skb_frag_set_page(skb, j, page_info->page);
1520                         skb_shinfo(skb)->frags[j].page_offset =
1521                                                         page_info->page_offset;
1522                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1523                 } else {
1524                         put_page(page_info->page);
1525                 }
1526                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1527                 skb->truesize += rx_frag_size;
1528                 remaining -= curr_frag_len;
1529                 index_inc(&rxcp->rxq_idx, rxq->len);
1530                 memset(page_info, 0, sizeof(*page_info));
1531         }
1532         BUG_ON(j > MAX_SKB_FRAGS);
1533
1534         skb_shinfo(skb)->nr_frags = j + 1;
1535         skb->len = rxcp->pkt_size;
1536         skb->data_len = rxcp->pkt_size;
1537         skb->ip_summed = CHECKSUM_UNNECESSARY;
1538         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1539         if (adapter->netdev->features & NETIF_F_RXHASH)
1540                 skb->rxhash = rxcp->rss_hash;
1541
1542         if (rxcp->vlanf)
1543                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1544
1545         napi_gro_frags(napi);
1546 }
1547
1548 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1549                                  struct be_rx_compl_info *rxcp)
1550 {
1551         rxcp->pkt_size =
1552                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1553         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1554         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1555         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1556         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1557         rxcp->ip_csum =
1558                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1559         rxcp->l4_csum =
1560                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1561         rxcp->ipv6 =
1562                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1563         rxcp->rxq_idx =
1564                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1565         rxcp->num_rcvd =
1566                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1567         rxcp->pkt_type =
1568                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1569         rxcp->rss_hash =
1570                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1571         if (rxcp->vlanf) {
1572                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1573                                           compl);
1574                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1575                                                compl);
1576         }
1577         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1578 }
1579
1580 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1581                                  struct be_rx_compl_info *rxcp)
1582 {
1583         rxcp->pkt_size =
1584                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1585         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1586         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1587         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1588         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1589         rxcp->ip_csum =
1590                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1591         rxcp->l4_csum =
1592                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1593         rxcp->ipv6 =
1594                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1595         rxcp->rxq_idx =
1596                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1597         rxcp->num_rcvd =
1598                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1599         rxcp->pkt_type =
1600                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1601         rxcp->rss_hash =
1602                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1603         if (rxcp->vlanf) {
1604                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1605                                           compl);
1606                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1607                                                compl);
1608         }
1609         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1610 }
1611
1612 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1613 {
1614         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1615         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1616         struct be_adapter *adapter = rxo->adapter;
1617
1618         /* For checking the valid bit it is Ok to use either definition as the
1619          * valid bit is at the same position in both v0 and v1 Rx compl */
1620         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1621                 return NULL;
1622
1623         rmb();
1624         be_dws_le_to_cpu(compl, sizeof(*compl));
1625
1626         if (adapter->be3_native)
1627                 be_parse_rx_compl_v1(compl, rxcp);
1628         else
1629                 be_parse_rx_compl_v0(compl, rxcp);
1630
1631         if (rxcp->vlanf) {
1632                 /* vlanf could be wrongly set in some cards.
1633                  * ignore if vtm is not set */
1634                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1635                         rxcp->vlanf = 0;
1636
1637                 if (!lancer_chip(adapter))
1638                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1639
1640                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1641                     !adapter->vlan_tag[rxcp->vlan_tag])
1642                         rxcp->vlanf = 0;
1643         }
1644
1645         /* As the compl has been parsed, reset it; we wont touch it again */
1646         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1647
1648         queue_tail_inc(&rxo->cq);
1649         return rxcp;
1650 }
1651
1652 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1653 {
1654         u32 order = get_order(size);
1655
1656         if (order > 0)
1657                 gfp |= __GFP_COMP;
1658         return  alloc_pages(gfp, order);
1659 }
1660
1661 /*
1662  * Allocate a page, split it to fragments of size rx_frag_size and post as
1663  * receive buffers to BE
1664  */
1665 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1666 {
1667         struct be_adapter *adapter = rxo->adapter;
1668         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1669         struct be_queue_info *rxq = &rxo->q;
1670         struct page *pagep = NULL;
1671         struct be_eth_rx_d *rxd;
1672         u64 page_dmaaddr = 0, frag_dmaaddr;
1673         u32 posted, page_offset = 0;
1674
1675         page_info = &rxo->page_info_tbl[rxq->head];
1676         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1677                 if (!pagep) {
1678                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1679                         if (unlikely(!pagep)) {
1680                                 rx_stats(rxo)->rx_post_fail++;
1681                                 break;
1682                         }
1683                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1684                                                     0, adapter->big_page_size,
1685                                                     DMA_FROM_DEVICE);
1686                         page_info->page_offset = 0;
1687                 } else {
1688                         get_page(pagep);
1689                         page_info->page_offset = page_offset + rx_frag_size;
1690                 }
1691                 page_offset = page_info->page_offset;
1692                 page_info->page = pagep;
1693                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1694                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1695
1696                 rxd = queue_head_node(rxq);
1697                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1698                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1699
1700                 /* Any space left in the current big page for another frag? */
1701                 if ((page_offset + rx_frag_size + rx_frag_size) >
1702                                         adapter->big_page_size) {
1703                         pagep = NULL;
1704                         page_info->last_page_user = true;
1705                 }
1706
1707                 prev_page_info = page_info;
1708                 queue_head_inc(rxq);
1709                 page_info = &rxo->page_info_tbl[rxq->head];
1710         }
1711         if (pagep)
1712                 prev_page_info->last_page_user = true;
1713
1714         if (posted) {
1715                 atomic_add(posted, &rxq->used);
1716                 be_rxq_notify(adapter, rxq->id, posted);
1717         } else if (atomic_read(&rxq->used) == 0) {
1718                 /* Let be_worker replenish when memory is available */
1719                 rxo->rx_post_starved = true;
1720         }
1721 }
1722
1723 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1724 {
1725         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1726
1727         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1728                 return NULL;
1729
1730         rmb();
1731         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1732
1733         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1734
1735         queue_tail_inc(tx_cq);
1736         return txcp;
1737 }
1738
1739 static u16 be_tx_compl_process(struct be_adapter *adapter,
1740                 struct be_tx_obj *txo, u16 last_index)
1741 {
1742         struct be_queue_info *txq = &txo->q;
1743         struct be_eth_wrb *wrb;
1744         struct sk_buff **sent_skbs = txo->sent_skb_list;
1745         struct sk_buff *sent_skb;
1746         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1747         bool unmap_skb_hdr = true;
1748
1749         sent_skb = sent_skbs[txq->tail];
1750         BUG_ON(!sent_skb);
1751         sent_skbs[txq->tail] = NULL;
1752
1753         /* skip header wrb */
1754         queue_tail_inc(txq);
1755
1756         do {
1757                 cur_index = txq->tail;
1758                 wrb = queue_tail_node(txq);
1759                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1760                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1761                 unmap_skb_hdr = false;
1762
1763                 num_wrbs++;
1764                 queue_tail_inc(txq);
1765         } while (cur_index != last_index);
1766
1767         kfree_skb(sent_skb);
1768         return num_wrbs;
1769 }
1770
1771 /* Return the number of events in the event queue */
1772 static inline int events_get(struct be_eq_obj *eqo)
1773 {
1774         struct be_eq_entry *eqe;
1775         int num = 0;
1776
1777         do {
1778                 eqe = queue_tail_node(&eqo->q);
1779                 if (eqe->evt == 0)
1780                         break;
1781
1782                 rmb();
1783                 eqe->evt = 0;
1784                 num++;
1785                 queue_tail_inc(&eqo->q);
1786         } while (true);
1787
1788         return num;
1789 }
1790
1791 /* Leaves the EQ is disarmed state */
1792 static void be_eq_clean(struct be_eq_obj *eqo)
1793 {
1794         int num = events_get(eqo);
1795
1796         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1797 }
1798
1799 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1800 {
1801         struct be_rx_page_info *page_info;
1802         struct be_queue_info *rxq = &rxo->q;
1803         struct be_queue_info *rx_cq = &rxo->cq;
1804         struct be_rx_compl_info *rxcp;
1805         struct be_adapter *adapter = rxo->adapter;
1806         int flush_wait = 0;
1807         u16 tail;
1808
1809         /* Consume pending rx completions.
1810          * Wait for the flush completion (identified by zero num_rcvd)
1811          * to arrive. Notify CQ even when there are no more CQ entries
1812          * for HW to flush partially coalesced CQ entries.
1813          * In Lancer, there is no need to wait for flush compl.
1814          */
1815         for (;;) {
1816                 rxcp = be_rx_compl_get(rxo);
1817                 if (rxcp == NULL) {
1818                         if (lancer_chip(adapter))
1819                                 break;
1820
1821                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1822                                 dev_warn(&adapter->pdev->dev,
1823                                          "did not receive flush compl\n");
1824                                 break;
1825                         }
1826                         be_cq_notify(adapter, rx_cq->id, true, 0);
1827                         mdelay(1);
1828                 } else {
1829                         be_rx_compl_discard(rxo, rxcp);
1830                         be_cq_notify(adapter, rx_cq->id, true, 1);
1831                         if (rxcp->num_rcvd == 0)
1832                                 break;
1833                 }
1834         }
1835
1836         /* After cleanup, leave the CQ in unarmed state */
1837         be_cq_notify(adapter, rx_cq->id, false, 0);
1838
1839         /* Then free posted rx buffers that were not used */
1840         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1841         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1842                 page_info = get_rx_page_info(rxo, tail);
1843                 put_page(page_info->page);
1844                 memset(page_info, 0, sizeof(*page_info));
1845         }
1846         BUG_ON(atomic_read(&rxq->used));
1847         rxq->tail = rxq->head = 0;
1848 }
1849
1850 static void be_tx_compl_clean(struct be_adapter *adapter)
1851 {
1852         struct be_tx_obj *txo;
1853         struct be_queue_info *txq;
1854         struct be_eth_tx_compl *txcp;
1855         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1856         struct sk_buff *sent_skb;
1857         bool dummy_wrb;
1858         int i, pending_txqs;
1859
1860         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1861         do {
1862                 pending_txqs = adapter->num_tx_qs;
1863
1864                 for_all_tx_queues(adapter, txo, i) {
1865                         txq = &txo->q;
1866                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1867                                 end_idx =
1868                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1869                                                       wrb_index, txcp);
1870                                 num_wrbs += be_tx_compl_process(adapter, txo,
1871                                                                 end_idx);
1872                                 cmpl++;
1873                         }
1874                         if (cmpl) {
1875                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1876                                 atomic_sub(num_wrbs, &txq->used);
1877                                 cmpl = 0;
1878                                 num_wrbs = 0;
1879                         }
1880                         if (atomic_read(&txq->used) == 0)
1881                                 pending_txqs--;
1882                 }
1883
1884                 if (pending_txqs == 0 || ++timeo > 200)
1885                         break;
1886
1887                 mdelay(1);
1888         } while (true);
1889
1890         for_all_tx_queues(adapter, txo, i) {
1891                 txq = &txo->q;
1892                 if (atomic_read(&txq->used))
1893                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1894                                 atomic_read(&txq->used));
1895
1896                 /* free posted tx for which compls will never arrive */
1897                 while (atomic_read(&txq->used)) {
1898                         sent_skb = txo->sent_skb_list[txq->tail];
1899                         end_idx = txq->tail;
1900                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1901                                                    &dummy_wrb);
1902                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1903                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1904                         atomic_sub(num_wrbs, &txq->used);
1905                 }
1906         }
1907 }
1908
1909 static void be_evt_queues_destroy(struct be_adapter *adapter)
1910 {
1911         struct be_eq_obj *eqo;
1912         int i;
1913
1914         for_all_evt_queues(adapter, eqo, i) {
1915                 if (eqo->q.created) {
1916                         be_eq_clean(eqo);
1917                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1918                 }
1919                 be_queue_free(adapter, &eqo->q);
1920         }
1921 }
1922
1923 static int be_evt_queues_create(struct be_adapter *adapter)
1924 {
1925         struct be_queue_info *eq;
1926         struct be_eq_obj *eqo;
1927         int i, rc;
1928
1929         adapter->num_evt_qs = num_irqs(adapter);
1930
1931         for_all_evt_queues(adapter, eqo, i) {
1932                 eqo->adapter = adapter;
1933                 eqo->tx_budget = BE_TX_BUDGET;
1934                 eqo->idx = i;
1935                 eqo->max_eqd = BE_MAX_EQD;
1936                 eqo->enable_aic = true;
1937
1938                 eq = &eqo->q;
1939                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1940                                         sizeof(struct be_eq_entry));
1941                 if (rc)
1942                         return rc;
1943
1944                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1945                 if (rc)
1946                         return rc;
1947         }
1948         return 0;
1949 }
1950
1951 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1952 {
1953         struct be_queue_info *q;
1954
1955         q = &adapter->mcc_obj.q;
1956         if (q->created)
1957                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1958         be_queue_free(adapter, q);
1959
1960         q = &adapter->mcc_obj.cq;
1961         if (q->created)
1962                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1963         be_queue_free(adapter, q);
1964 }
1965
1966 /* Must be called only after TX qs are created as MCC shares TX EQ */
1967 static int be_mcc_queues_create(struct be_adapter *adapter)
1968 {
1969         struct be_queue_info *q, *cq;
1970
1971         cq = &adapter->mcc_obj.cq;
1972         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1973                         sizeof(struct be_mcc_compl)))
1974                 goto err;
1975
1976         /* Use the default EQ for MCC completions */
1977         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1978                 goto mcc_cq_free;
1979
1980         q = &adapter->mcc_obj.q;
1981         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1982                 goto mcc_cq_destroy;
1983
1984         if (be_cmd_mccq_create(adapter, q, cq))
1985                 goto mcc_q_free;
1986
1987         return 0;
1988
1989 mcc_q_free:
1990         be_queue_free(adapter, q);
1991 mcc_cq_destroy:
1992         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1993 mcc_cq_free:
1994         be_queue_free(adapter, cq);
1995 err:
1996         return -1;
1997 }
1998
1999 static void be_tx_queues_destroy(struct be_adapter *adapter)
2000 {
2001         struct be_queue_info *q;
2002         struct be_tx_obj *txo;
2003         u8 i;
2004
2005         for_all_tx_queues(adapter, txo, i) {
2006                 q = &txo->q;
2007                 if (q->created)
2008                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2009                 be_queue_free(adapter, q);
2010
2011                 q = &txo->cq;
2012                 if (q->created)
2013                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2014                 be_queue_free(adapter, q);
2015         }
2016 }
2017
2018 static int be_num_txqs_want(struct be_adapter *adapter)
2019 {
2020         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2021             be_is_mc(adapter) ||
2022             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
2023             BE2_chip(adapter))
2024                 return 1;
2025         else
2026                 return adapter->max_tx_queues;
2027 }
2028
2029 static int be_tx_cqs_create(struct be_adapter *adapter)
2030 {
2031         struct be_queue_info *cq, *eq;
2032         int status;
2033         struct be_tx_obj *txo;
2034         u8 i;
2035
2036         adapter->num_tx_qs = be_num_txqs_want(adapter);
2037         if (adapter->num_tx_qs != MAX_TX_QS) {
2038                 rtnl_lock();
2039                 netif_set_real_num_tx_queues(adapter->netdev,
2040                         adapter->num_tx_qs);
2041                 rtnl_unlock();
2042         }
2043
2044         for_all_tx_queues(adapter, txo, i) {
2045                 cq = &txo->cq;
2046                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2047                                         sizeof(struct be_eth_tx_compl));
2048                 if (status)
2049                         return status;
2050
2051                 /* If num_evt_qs is less than num_tx_qs, then more than
2052                  * one txq share an eq
2053                  */
2054                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2055                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2056                 if (status)
2057                         return status;
2058         }
2059         return 0;
2060 }
2061
2062 static int be_tx_qs_create(struct be_adapter *adapter)
2063 {
2064         struct be_tx_obj *txo;
2065         int i, status;
2066
2067         for_all_tx_queues(adapter, txo, i) {
2068                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2069                                         sizeof(struct be_eth_wrb));
2070                 if (status)
2071                         return status;
2072
2073                 status = be_cmd_txq_create(adapter, txo);
2074                 if (status)
2075                         return status;
2076         }
2077
2078         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2079                  adapter->num_tx_qs);
2080         return 0;
2081 }
2082
2083 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2084 {
2085         struct be_queue_info *q;
2086         struct be_rx_obj *rxo;
2087         int i;
2088
2089         for_all_rx_queues(adapter, rxo, i) {
2090                 q = &rxo->cq;
2091                 if (q->created)
2092                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2093                 be_queue_free(adapter, q);
2094         }
2095 }
2096
2097 static int be_rx_cqs_create(struct be_adapter *adapter)
2098 {
2099         struct be_queue_info *eq, *cq;
2100         struct be_rx_obj *rxo;
2101         int rc, i;
2102
2103         /* We'll create as many RSS rings as there are irqs.
2104          * But when there's only one irq there's no use creating RSS rings
2105          */
2106         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2107                                 num_irqs(adapter) + 1 : 1;
2108         if (adapter->num_rx_qs != MAX_RX_QS) {
2109                 rtnl_lock();
2110                 netif_set_real_num_rx_queues(adapter->netdev,
2111                                              adapter->num_rx_qs);
2112                 rtnl_unlock();
2113         }
2114
2115         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2116         for_all_rx_queues(adapter, rxo, i) {
2117                 rxo->adapter = adapter;
2118                 cq = &rxo->cq;
2119                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2120                                 sizeof(struct be_eth_rx_compl));
2121                 if (rc)
2122                         return rc;
2123
2124                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2125                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2126                 if (rc)
2127                         return rc;
2128         }
2129
2130         dev_info(&adapter->pdev->dev,
2131                  "created %d RSS queue(s) and 1 default RX queue\n",
2132                  adapter->num_rx_qs - 1);
2133         return 0;
2134 }
2135
2136 static irqreturn_t be_intx(int irq, void *dev)
2137 {
2138         struct be_eq_obj *eqo = dev;
2139         struct be_adapter *adapter = eqo->adapter;
2140         int num_evts = 0;
2141
2142         /* IRQ is not expected when NAPI is scheduled as the EQ
2143          * will not be armed.
2144          * But, this can happen on Lancer INTx where it takes
2145          * a while to de-assert INTx or in BE2 where occasionaly
2146          * an interrupt may be raised even when EQ is unarmed.
2147          * If NAPI is already scheduled, then counting & notifying
2148          * events will orphan them.
2149          */
2150         if (napi_schedule_prep(&eqo->napi)) {
2151                 num_evts = events_get(eqo);
2152                 __napi_schedule(&eqo->napi);
2153                 if (num_evts)
2154                         eqo->spurious_intr = 0;
2155         }
2156         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2157
2158         /* Return IRQ_HANDLED only for the the first spurious intr
2159          * after a valid intr to stop the kernel from branding
2160          * this irq as a bad one!
2161          */
2162         if (num_evts || eqo->spurious_intr++ == 0)
2163                 return IRQ_HANDLED;
2164         else
2165                 return IRQ_NONE;
2166 }
2167
2168 static irqreturn_t be_msix(int irq, void *dev)
2169 {
2170         struct be_eq_obj *eqo = dev;
2171
2172         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2173         napi_schedule(&eqo->napi);
2174         return IRQ_HANDLED;
2175 }
2176
2177 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2178 {
2179         return (rxcp->tcpf && !rxcp->err) ? true : false;
2180 }
2181
2182 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2183                         int budget)
2184 {
2185         struct be_adapter *adapter = rxo->adapter;
2186         struct be_queue_info *rx_cq = &rxo->cq;
2187         struct be_rx_compl_info *rxcp;
2188         u32 work_done;
2189
2190         for (work_done = 0; work_done < budget; work_done++) {
2191                 rxcp = be_rx_compl_get(rxo);
2192                 if (!rxcp)
2193                         break;
2194
2195                 /* Is it a flush compl that has no data */
2196                 if (unlikely(rxcp->num_rcvd == 0))
2197                         goto loop_continue;
2198
2199                 /* Discard compl with partial DMA Lancer B0 */
2200                 if (unlikely(!rxcp->pkt_size)) {
2201                         be_rx_compl_discard(rxo, rxcp);
2202                         goto loop_continue;
2203                 }
2204
2205                 /* On BE drop pkts that arrive due to imperfect filtering in
2206                  * promiscuous mode on some skews
2207                  */
2208                 if (unlikely(rxcp->port != adapter->port_num &&
2209                                 !lancer_chip(adapter))) {
2210                         be_rx_compl_discard(rxo, rxcp);
2211                         goto loop_continue;
2212                 }
2213
2214                 if (do_gro(rxcp))
2215                         be_rx_compl_process_gro(rxo, napi, rxcp);
2216                 else
2217                         be_rx_compl_process(rxo, rxcp);
2218 loop_continue:
2219                 be_rx_stats_update(rxo, rxcp);
2220         }
2221
2222         if (work_done) {
2223                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2224
2225                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2226                         be_post_rx_frags(rxo, GFP_ATOMIC);
2227         }
2228
2229         return work_done;
2230 }
2231
2232 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2233                           int budget, int idx)
2234 {
2235         struct be_eth_tx_compl *txcp;
2236         int num_wrbs = 0, work_done;
2237
2238         for (work_done = 0; work_done < budget; work_done++) {
2239                 txcp = be_tx_compl_get(&txo->cq);
2240                 if (!txcp)
2241                         break;
2242                 num_wrbs += be_tx_compl_process(adapter, txo,
2243                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2244                                         wrb_index, txcp));
2245         }
2246
2247         if (work_done) {
2248                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2249                 atomic_sub(num_wrbs, &txo->q.used);
2250
2251                 /* As Tx wrbs have been freed up, wake up netdev queue
2252                  * if it was stopped due to lack of tx wrbs.  */
2253                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2254                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2255                         netif_wake_subqueue(adapter->netdev, idx);
2256                 }
2257
2258                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2259                 tx_stats(txo)->tx_compl += work_done;
2260                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2261         }
2262         return (work_done < budget); /* Done */
2263 }
2264
2265 int be_poll(struct napi_struct *napi, int budget)
2266 {
2267         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2268         struct be_adapter *adapter = eqo->adapter;
2269         int max_work = 0, work, i, num_evts;
2270         bool tx_done;
2271
2272         num_evts = events_get(eqo);
2273
2274         /* Process all TXQs serviced by this EQ */
2275         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2276                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2277                                         eqo->tx_budget, i);
2278                 if (!tx_done)
2279                         max_work = budget;
2280         }
2281
2282         /* This loop will iterate twice for EQ0 in which
2283          * completions of the last RXQ (default one) are also processed
2284          * For other EQs the loop iterates only once
2285          */
2286         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2287                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2288                 max_work = max(work, max_work);
2289         }
2290
2291         if (is_mcc_eqo(eqo))
2292                 be_process_mcc(adapter);
2293
2294         if (max_work < budget) {
2295                 napi_complete(napi);
2296                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2297         } else {
2298                 /* As we'll continue in polling mode, count and clear events */
2299                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2300         }
2301         return max_work;
2302 }
2303
2304 void be_detect_error(struct be_adapter *adapter)
2305 {
2306         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2307         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2308         u32 i;
2309
2310         if (be_hw_error(adapter))
2311                 return;
2312
2313         if (lancer_chip(adapter)) {
2314                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2315                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2316                         sliport_err1 = ioread32(adapter->db +
2317                                         SLIPORT_ERROR1_OFFSET);
2318                         sliport_err2 = ioread32(adapter->db +
2319                                         SLIPORT_ERROR2_OFFSET);
2320                 }
2321         } else {
2322                 pci_read_config_dword(adapter->pdev,
2323                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2324                 pci_read_config_dword(adapter->pdev,
2325                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2326                 pci_read_config_dword(adapter->pdev,
2327                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2328                 pci_read_config_dword(adapter->pdev,
2329                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2330
2331                 ue_lo = (ue_lo & ~ue_lo_mask);
2332                 ue_hi = (ue_hi & ~ue_hi_mask);
2333         }
2334
2335         /* On certain platforms BE hardware can indicate spurious UEs.
2336          * Allow the h/w to stop working completely in case of a real UE.
2337          * Hence not setting the hw_error for UE detection.
2338          */
2339         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2340                 adapter->hw_error = true;
2341                 dev_err(&adapter->pdev->dev,
2342                         "Error detected in the card\n");
2343         }
2344
2345         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2346                 dev_err(&adapter->pdev->dev,
2347                         "ERR: sliport status 0x%x\n", sliport_status);
2348                 dev_err(&adapter->pdev->dev,
2349                         "ERR: sliport error1 0x%x\n", sliport_err1);
2350                 dev_err(&adapter->pdev->dev,
2351                         "ERR: sliport error2 0x%x\n", sliport_err2);
2352         }
2353
2354         if (ue_lo) {
2355                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2356                         if (ue_lo & 1)
2357                                 dev_err(&adapter->pdev->dev,
2358                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2359                 }
2360         }
2361
2362         if (ue_hi) {
2363                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2364                         if (ue_hi & 1)
2365                                 dev_err(&adapter->pdev->dev,
2366                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2367                 }
2368         }
2369
2370 }
2371
2372 static void be_msix_disable(struct be_adapter *adapter)
2373 {
2374         if (msix_enabled(adapter)) {
2375                 pci_disable_msix(adapter->pdev);
2376                 adapter->num_msix_vec = 0;
2377         }
2378 }
2379
2380 static uint be_num_rss_want(struct be_adapter *adapter)
2381 {
2382         u32 num = 0;
2383
2384         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2385             (lancer_chip(adapter) ||
2386              (!sriov_want(adapter) && be_physfn(adapter)))) {
2387                 num = adapter->max_rss_queues;
2388                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2389         }
2390         return num;
2391 }
2392
2393 static int be_msix_enable(struct be_adapter *adapter)
2394 {
2395 #define BE_MIN_MSIX_VECTORS             1
2396         int i, status, num_vec, num_roce_vec = 0;
2397         struct device *dev = &adapter->pdev->dev;
2398
2399         /* If RSS queues are not used, need a vec for default RX Q */
2400         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2401         if (be_roce_supported(adapter)) {
2402                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2403                                         (num_online_cpus() + 1));
2404                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2405                 num_vec += num_roce_vec;
2406                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2407         }
2408         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2409
2410         for (i = 0; i < num_vec; i++)
2411                 adapter->msix_entries[i].entry = i;
2412
2413         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2414         if (status == 0) {
2415                 goto done;
2416         } else if (status >= BE_MIN_MSIX_VECTORS) {
2417                 num_vec = status;
2418                 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2419                                          num_vec);
2420                 if (!status)
2421                         goto done;
2422         }
2423
2424         dev_warn(dev, "MSIx enable failed\n");
2425         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2426         if (!be_physfn(adapter))
2427                 return status;
2428         return 0;
2429 done:
2430         if (be_roce_supported(adapter)) {
2431                 if (num_vec > num_roce_vec) {
2432                         adapter->num_msix_vec = num_vec - num_roce_vec;
2433                         adapter->num_msix_roce_vec =
2434                                 num_vec - adapter->num_msix_vec;
2435                 } else {
2436                         adapter->num_msix_vec = num_vec;
2437                         adapter->num_msix_roce_vec = 0;
2438                 }
2439         } else
2440                 adapter->num_msix_vec = num_vec;
2441         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2442         return 0;
2443 }
2444
2445 static inline int be_msix_vec_get(struct be_adapter *adapter,
2446                                 struct be_eq_obj *eqo)
2447 {
2448         return adapter->msix_entries[eqo->idx].vector;
2449 }
2450
2451 static int be_msix_register(struct be_adapter *adapter)
2452 {
2453         struct net_device *netdev = adapter->netdev;
2454         struct be_eq_obj *eqo;
2455         int status, i, vec;
2456
2457         for_all_evt_queues(adapter, eqo, i) {
2458                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2459                 vec = be_msix_vec_get(adapter, eqo);
2460                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2461                 if (status)
2462                         goto err_msix;
2463         }
2464
2465         return 0;
2466 err_msix:
2467         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2468                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2469         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2470                 status);
2471         be_msix_disable(adapter);
2472         return status;
2473 }
2474
2475 static int be_irq_register(struct be_adapter *adapter)
2476 {
2477         struct net_device *netdev = adapter->netdev;
2478         int status;
2479
2480         if (msix_enabled(adapter)) {
2481                 status = be_msix_register(adapter);
2482                 if (status == 0)
2483                         goto done;
2484                 /* INTx is not supported for VF */
2485                 if (!be_physfn(adapter))
2486                         return status;
2487         }
2488
2489         /* INTx: only the first EQ is used */
2490         netdev->irq = adapter->pdev->irq;
2491         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2492                              &adapter->eq_obj[0]);
2493         if (status) {
2494                 dev_err(&adapter->pdev->dev,
2495                         "INTx request IRQ failed - err %d\n", status);
2496                 return status;
2497         }
2498 done:
2499         adapter->isr_registered = true;
2500         return 0;
2501 }
2502
2503 static void be_irq_unregister(struct be_adapter *adapter)
2504 {
2505         struct net_device *netdev = adapter->netdev;
2506         struct be_eq_obj *eqo;
2507         int i;
2508
2509         if (!adapter->isr_registered)
2510                 return;
2511
2512         /* INTx */
2513         if (!msix_enabled(adapter)) {
2514                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2515                 goto done;
2516         }
2517
2518         /* MSIx */
2519         for_all_evt_queues(adapter, eqo, i)
2520                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2521
2522 done:
2523         adapter->isr_registered = false;
2524 }
2525
2526 static void be_rx_qs_destroy(struct be_adapter *adapter)
2527 {
2528         struct be_queue_info *q;
2529         struct be_rx_obj *rxo;
2530         int i;
2531
2532         for_all_rx_queues(adapter, rxo, i) {
2533                 q = &rxo->q;
2534                 if (q->created) {
2535                         be_cmd_rxq_destroy(adapter, q);
2536                         /* After the rxq is invalidated, wait for a grace time
2537                          * of 1ms for all dma to end and the flush compl to
2538                          * arrive
2539                          */
2540                         mdelay(1);
2541                         be_rx_cq_clean(rxo);
2542                 }
2543                 be_queue_free(adapter, q);
2544         }
2545 }
2546
2547 static int be_close(struct net_device *netdev)
2548 {
2549         struct be_adapter *adapter = netdev_priv(netdev);
2550         struct be_eq_obj *eqo;
2551         int i;
2552
2553         be_roce_dev_close(adapter);
2554
2555         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2556                 for_all_evt_queues(adapter, eqo, i)
2557                         napi_disable(&eqo->napi);
2558                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2559         }
2560
2561         be_async_mcc_disable(adapter);
2562
2563         /* Wait for all pending tx completions to arrive so that
2564          * all tx skbs are freed.
2565          */
2566         be_tx_compl_clean(adapter);
2567
2568         be_rx_qs_destroy(adapter);
2569
2570         for_all_evt_queues(adapter, eqo, i) {
2571                 if (msix_enabled(adapter))
2572                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2573                 else
2574                         synchronize_irq(netdev->irq);
2575                 be_eq_clean(eqo);
2576         }
2577
2578         be_irq_unregister(adapter);
2579
2580         return 0;
2581 }
2582
2583 static int be_rx_qs_create(struct be_adapter *adapter)
2584 {
2585         struct be_rx_obj *rxo;
2586         int rc, i, j;
2587         u8 rsstable[128];
2588
2589         for_all_rx_queues(adapter, rxo, i) {
2590                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2591                                     sizeof(struct be_eth_rx_d));
2592                 if (rc)
2593                         return rc;
2594         }
2595
2596         /* The FW would like the default RXQ to be created first */
2597         rxo = default_rxo(adapter);
2598         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2599                                adapter->if_handle, false, &rxo->rss_id);
2600         if (rc)
2601                 return rc;
2602
2603         for_all_rss_queues(adapter, rxo, i) {
2604                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2605                                        rx_frag_size, adapter->if_handle,
2606                                        true, &rxo->rss_id);
2607                 if (rc)
2608                         return rc;
2609         }
2610
2611         if (be_multi_rxq(adapter)) {
2612                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2613                         for_all_rss_queues(adapter, rxo, i) {
2614                                 if ((j + i) >= 128)
2615                                         break;
2616                                 rsstable[j + i] = rxo->rss_id;
2617                         }
2618                 }
2619                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2620                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2621
2622                 if (!BEx_chip(adapter))
2623                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2624                                                 RSS_ENABLE_UDP_IPV6;
2625
2626                 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2627                                        128);
2628                 if (rc) {
2629                         adapter->rss_flags = 0;
2630                         return rc;
2631                 }
2632         }
2633
2634         /* First time posting */
2635         for_all_rx_queues(adapter, rxo, i)
2636                 be_post_rx_frags(rxo, GFP_KERNEL);
2637         return 0;
2638 }
2639
2640 static int be_open(struct net_device *netdev)
2641 {
2642         struct be_adapter *adapter = netdev_priv(netdev);
2643         struct be_eq_obj *eqo;
2644         struct be_rx_obj *rxo;
2645         struct be_tx_obj *txo;
2646         u8 link_status;
2647         int status, i;
2648
2649         status = be_rx_qs_create(adapter);
2650         if (status)
2651                 goto err;
2652
2653         status = be_irq_register(adapter);
2654         if (status)
2655                 goto err;
2656
2657         for_all_rx_queues(adapter, rxo, i)
2658                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2659
2660         for_all_tx_queues(adapter, txo, i)
2661                 be_cq_notify(adapter, txo->cq.id, true, 0);
2662
2663         be_async_mcc_enable(adapter);
2664
2665         for_all_evt_queues(adapter, eqo, i) {
2666                 napi_enable(&eqo->napi);
2667                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2668         }
2669         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2670
2671         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2672         if (!status)
2673                 be_link_status_update(adapter, link_status);
2674
2675         be_roce_dev_open(adapter);
2676         return 0;
2677 err:
2678         be_close(adapter->netdev);
2679         return -EIO;
2680 }
2681
2682 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2683 {
2684         struct be_dma_mem cmd;
2685         int status = 0;
2686         u8 mac[ETH_ALEN];
2687
2688         memset(mac, 0, ETH_ALEN);
2689
2690         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2691         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2692                                     GFP_KERNEL | __GFP_ZERO);
2693         if (cmd.va == NULL)
2694                 return -1;
2695
2696         if (enable) {
2697                 status = pci_write_config_dword(adapter->pdev,
2698                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2699                 if (status) {
2700                         dev_err(&adapter->pdev->dev,
2701                                 "Could not enable Wake-on-lan\n");
2702                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2703                                           cmd.dma);
2704                         return status;
2705                 }
2706                 status = be_cmd_enable_magic_wol(adapter,
2707                                 adapter->netdev->dev_addr, &cmd);
2708                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2709                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2710         } else {
2711                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2712                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2713                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2714         }
2715
2716         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2717         return status;
2718 }
2719
2720 /*
2721  * Generate a seed MAC address from the PF MAC Address using jhash.
2722  * MAC Address for VFs are assigned incrementally starting from the seed.
2723  * These addresses are programmed in the ASIC by the PF and the VF driver
2724  * queries for the MAC address during its probe.
2725  */
2726 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2727 {
2728         u32 vf;
2729         int status = 0;
2730         u8 mac[ETH_ALEN];
2731         struct be_vf_cfg *vf_cfg;
2732
2733         be_vf_eth_addr_generate(adapter, mac);
2734
2735         for_all_vfs(adapter, vf_cfg, vf) {
2736                 if (lancer_chip(adapter)) {
2737                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2738                 } else {
2739                         status = be_cmd_pmac_add(adapter, mac,
2740                                                  vf_cfg->if_handle,
2741                                                  &vf_cfg->pmac_id, vf + 1);
2742                 }
2743
2744                 if (status)
2745                         dev_err(&adapter->pdev->dev,
2746                         "Mac address assignment failed for VF %d\n", vf);
2747                 else
2748                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2749
2750                 mac[5] += 1;
2751         }
2752         return status;
2753 }
2754
2755 static int be_vfs_mac_query(struct be_adapter *adapter)
2756 {
2757         int status, vf;
2758         u8 mac[ETH_ALEN];
2759         struct be_vf_cfg *vf_cfg;
2760         bool active;
2761
2762         for_all_vfs(adapter, vf_cfg, vf) {
2763                 be_cmd_get_mac_from_list(adapter, mac, &active,
2764                                          &vf_cfg->pmac_id, 0);
2765
2766                 status = be_cmd_mac_addr_query(adapter, mac, false,
2767                                                vf_cfg->if_handle, 0);
2768                 if (status)
2769                         return status;
2770                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2771         }
2772         return 0;
2773 }
2774
2775 static void be_vf_clear(struct be_adapter *adapter)
2776 {
2777         struct be_vf_cfg *vf_cfg;
2778         u32 vf;
2779
2780         if (be_find_vfs(adapter, ASSIGNED)) {
2781                 dev_warn(&adapter->pdev->dev,
2782                          "VFs are assigned to VMs: not disabling VFs\n");
2783                 goto done;
2784         }
2785
2786         for_all_vfs(adapter, vf_cfg, vf) {
2787                 if (lancer_chip(adapter))
2788                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2789                 else
2790                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2791                                         vf_cfg->pmac_id, vf + 1);
2792
2793                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2794         }
2795         pci_disable_sriov(adapter->pdev);
2796 done:
2797         kfree(adapter->vf_cfg);
2798         adapter->num_vfs = 0;
2799 }
2800
2801 static int be_clear(struct be_adapter *adapter)
2802 {
2803         int i = 1;
2804
2805         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2806                 cancel_delayed_work_sync(&adapter->work);
2807                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2808         }
2809
2810         if (sriov_enabled(adapter))
2811                 be_vf_clear(adapter);
2812
2813         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2814                 be_cmd_pmac_del(adapter, adapter->if_handle,
2815                         adapter->pmac_id[i], 0);
2816
2817         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2818
2819         be_mcc_queues_destroy(adapter);
2820         be_rx_cqs_destroy(adapter);
2821         be_tx_queues_destroy(adapter);
2822         be_evt_queues_destroy(adapter);
2823
2824         kfree(adapter->pmac_id);
2825         adapter->pmac_id = NULL;
2826
2827         be_msix_disable(adapter);
2828         return 0;
2829 }
2830
2831 static int be_vfs_if_create(struct be_adapter *adapter)
2832 {
2833         struct be_vf_cfg *vf_cfg;
2834         u32 cap_flags, en_flags, vf;
2835         int status;
2836
2837         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2838                     BE_IF_FLAGS_MULTICAST;
2839
2840         for_all_vfs(adapter, vf_cfg, vf) {
2841                 if (!BE3_chip(adapter))
2842                         be_cmd_get_profile_config(adapter, &cap_flags,
2843                                                   NULL, vf + 1);
2844
2845                 /* If a FW profile exists, then cap_flags are updated */
2846                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2847                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2848                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2849                                           &vf_cfg->if_handle, vf + 1);
2850                 if (status)
2851                         goto err;
2852         }
2853 err:
2854         return status;
2855 }
2856
2857 static int be_vf_setup_init(struct be_adapter *adapter)
2858 {
2859         struct be_vf_cfg *vf_cfg;
2860         int vf;
2861
2862         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2863                                   GFP_KERNEL);
2864         if (!adapter->vf_cfg)
2865                 return -ENOMEM;
2866
2867         for_all_vfs(adapter, vf_cfg, vf) {
2868                 vf_cfg->if_handle = -1;
2869                 vf_cfg->pmac_id = -1;
2870         }
2871         return 0;
2872 }
2873
2874 static int be_vf_setup(struct be_adapter *adapter)
2875 {
2876         struct be_vf_cfg *vf_cfg;
2877         u16 def_vlan, lnk_speed;
2878         int status, old_vfs, vf;
2879         struct device *dev = &adapter->pdev->dev;
2880
2881         old_vfs = be_find_vfs(adapter, ENABLED);
2882         if (old_vfs) {
2883                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2884                 if (old_vfs != num_vfs)
2885                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2886                 adapter->num_vfs = old_vfs;
2887         } else {
2888                 if (num_vfs > adapter->dev_num_vfs)
2889                         dev_info(dev, "Device supports %d VFs and not %d\n",
2890                                  adapter->dev_num_vfs, num_vfs);
2891                 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2892
2893                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2894                 if (status) {
2895                         dev_err(dev, "SRIOV enable failed\n");
2896                         adapter->num_vfs = 0;
2897                         return 0;
2898                 }
2899         }
2900
2901         status = be_vf_setup_init(adapter);
2902         if (status)
2903                 goto err;
2904
2905         if (old_vfs) {
2906                 for_all_vfs(adapter, vf_cfg, vf) {
2907                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2908                         if (status)
2909                                 goto err;
2910                 }
2911         } else {
2912                 status = be_vfs_if_create(adapter);
2913                 if (status)
2914                         goto err;
2915         }
2916
2917         if (old_vfs) {
2918                 status = be_vfs_mac_query(adapter);
2919                 if (status)
2920                         goto err;
2921         } else {
2922                 status = be_vf_eth_addr_config(adapter);
2923                 if (status)
2924                         goto err;
2925         }
2926
2927         for_all_vfs(adapter, vf_cfg, vf) {
2928                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2929                  * Allow full available bandwidth
2930                  */
2931                 if (BE3_chip(adapter) && !old_vfs)
2932                         be_cmd_set_qos(adapter, 1000, vf+1);
2933
2934                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2935                                                   NULL, vf + 1);
2936                 if (!status)
2937                         vf_cfg->tx_rate = lnk_speed;
2938
2939                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2940                                                vf + 1, vf_cfg->if_handle);
2941                 if (status)
2942                         goto err;
2943                 vf_cfg->def_vid = def_vlan;
2944
2945                 be_cmd_enable_vf(adapter, vf + 1);
2946         }
2947         return 0;
2948 err:
2949         dev_err(dev, "VF setup failed\n");
2950         be_vf_clear(adapter);
2951         return status;
2952 }
2953
2954 static void be_setup_init(struct be_adapter *adapter)
2955 {
2956         adapter->vlan_prio_bmap = 0xff;
2957         adapter->phy.link_speed = -1;
2958         adapter->if_handle = -1;
2959         adapter->be3_native = false;
2960         adapter->promiscuous = false;
2961         if (be_physfn(adapter))
2962                 adapter->cmd_privileges = MAX_PRIVILEGES;
2963         else
2964                 adapter->cmd_privileges = MIN_PRIVILEGES;
2965 }
2966
2967 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2968                            bool *active_mac, u32 *pmac_id)
2969 {
2970         int status = 0;
2971
2972         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2973                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2974                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2975                         *active_mac = true;
2976                 else
2977                         *active_mac = false;
2978
2979                 return status;
2980         }
2981
2982         if (lancer_chip(adapter)) {
2983                 status = be_cmd_get_mac_from_list(adapter, mac,
2984                                                   active_mac, pmac_id, 0);
2985                 if (*active_mac) {
2986                         status = be_cmd_mac_addr_query(adapter, mac, false,
2987                                                        if_handle, *pmac_id);
2988                 }
2989         } else if (be_physfn(adapter)) {
2990                 /* For BE3, for PF get permanent MAC */
2991                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2992                 *active_mac = false;
2993         } else {
2994                 /* For BE3, for VF get soft MAC assigned by PF*/
2995                 status = be_cmd_mac_addr_query(adapter, mac, false,
2996                                                if_handle, 0);
2997                 *active_mac = true;
2998         }
2999         return status;
3000 }
3001
3002 static void be_get_resources(struct be_adapter *adapter)
3003 {
3004         u16 dev_num_vfs;
3005         int pos, status;
3006         bool profile_present = false;
3007         u16 txq_count = 0;
3008
3009         if (!BEx_chip(adapter)) {
3010                 status = be_cmd_get_func_config(adapter);
3011                 if (!status)
3012                         profile_present = true;
3013         } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3014                 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
3015         }
3016
3017         if (profile_present) {
3018                 /* Sanity fixes for Lancer */
3019                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3020                                               BE_UC_PMAC_COUNT);
3021                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3022                                            BE_NUM_VLANS_SUPPORTED);
3023                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3024                                                BE_MAX_MC);
3025                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3026                                                MAX_TX_QS);
3027                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3028                                                 BE3_MAX_RSS_QS);
3029                 adapter->max_event_queues = min_t(u16,
3030                                                   adapter->max_event_queues,
3031                                                   BE3_MAX_RSS_QS);
3032
3033                 if (adapter->max_rss_queues &&
3034                     adapter->max_rss_queues == adapter->max_rx_queues)
3035                         adapter->max_rss_queues -= 1;
3036
3037                 if (adapter->max_event_queues < adapter->max_rss_queues)
3038                         adapter->max_rss_queues = adapter->max_event_queues;
3039
3040         } else {
3041                 if (be_physfn(adapter))
3042                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3043                 else
3044                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3045
3046                 if (adapter->function_mode & FLEX10_MODE)
3047                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3048                 else
3049                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3050
3051                 adapter->max_mcast_mac = BE_MAX_MC;
3052                 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3053                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3054                                                MAX_TX_QS);
3055                 adapter->max_rss_queues = (adapter->be3_native) ?
3056                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3057                 adapter->max_event_queues = BE3_MAX_RSS_QS;
3058
3059                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3060                                         BE_IF_FLAGS_BROADCAST |
3061                                         BE_IF_FLAGS_MULTICAST |
3062                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
3063                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
3064                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
3065                                         BE_IF_FLAGS_PROMISCUOUS;
3066
3067                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3068                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3069         }
3070
3071         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3072         if (pos) {
3073                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3074                                      &dev_num_vfs);
3075                 if (BE3_chip(adapter))
3076                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3077                 adapter->dev_num_vfs = dev_num_vfs;
3078         }
3079 }
3080
3081 /* Routine to query per function resource limits */
3082 static int be_get_config(struct be_adapter *adapter)
3083 {
3084         int status;
3085
3086         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3087                                      &adapter->function_mode,
3088                                      &adapter->function_caps,
3089                                      &adapter->asic_rev);
3090         if (status)
3091                 goto err;
3092
3093         be_get_resources(adapter);
3094
3095         /* primary mac needs 1 pmac entry */
3096         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3097                                    sizeof(u32), GFP_KERNEL);
3098         if (!adapter->pmac_id) {
3099                 status = -ENOMEM;
3100                 goto err;
3101         }
3102
3103 err:
3104         return status;
3105 }
3106
3107 static int be_setup(struct be_adapter *adapter)
3108 {
3109         struct device *dev = &adapter->pdev->dev;
3110         u32 en_flags;
3111         u32 tx_fc, rx_fc;
3112         int status;
3113         u8 mac[ETH_ALEN];
3114         bool active_mac;
3115
3116         be_setup_init(adapter);
3117
3118         if (!lancer_chip(adapter))
3119                 be_cmd_req_native_mode(adapter);
3120
3121         status = be_get_config(adapter);
3122         if (status)
3123                 goto err;
3124
3125         status = be_msix_enable(adapter);
3126         if (status)
3127                 goto err;
3128
3129         status = be_evt_queues_create(adapter);
3130         if (status)
3131                 goto err;
3132
3133         status = be_tx_cqs_create(adapter);
3134         if (status)
3135                 goto err;
3136
3137         status = be_rx_cqs_create(adapter);
3138         if (status)
3139                 goto err;
3140
3141         status = be_mcc_queues_create(adapter);
3142         if (status)
3143                 goto err;
3144
3145         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3146         /* In UMC mode FW does not return right privileges.
3147          * Override with correct privilege equivalent to PF.
3148          */
3149         if (be_is_mc(adapter))
3150                 adapter->cmd_privileges = MAX_PRIVILEGES;
3151
3152         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3153                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3154
3155         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3156                 en_flags |= BE_IF_FLAGS_RSS;
3157
3158         en_flags = en_flags & adapter->if_cap_flags;
3159
3160         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3161                                   &adapter->if_handle, 0);
3162         if (status != 0)
3163                 goto err;
3164
3165         memset(mac, 0, ETH_ALEN);
3166         active_mac = false;
3167         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3168                                  &active_mac, &adapter->pmac_id[0]);
3169         if (status != 0)
3170                 goto err;
3171
3172         if (!active_mac) {
3173                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3174                                          &adapter->pmac_id[0], 0);
3175                 if (status != 0)
3176                         goto err;
3177         }
3178
3179         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3180                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3181                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3182         }
3183
3184         status = be_tx_qs_create(adapter);
3185         if (status)
3186                 goto err;
3187
3188         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3189
3190         if (adapter->vlans_added)
3191                 be_vid_config(adapter);
3192
3193         be_set_rx_mode(adapter->netdev);
3194
3195         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3196
3197         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3198                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3199                                         adapter->rx_fc);
3200
3201         if (be_physfn(adapter) && num_vfs) {
3202                 if (adapter->dev_num_vfs)
3203                         be_vf_setup(adapter);
3204                 else
3205                         dev_warn(dev, "device doesn't support SRIOV\n");
3206         }
3207
3208         status = be_cmd_get_phy_info(adapter);
3209         if (!status && be_pause_supported(adapter))
3210                 adapter->phy.fc_autoneg = 1;
3211
3212         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3213         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3214         return 0;
3215 err:
3216         be_clear(adapter);
3217         return status;
3218 }
3219
3220 #ifdef CONFIG_NET_POLL_CONTROLLER
3221 static void be_netpoll(struct net_device *netdev)
3222 {
3223         struct be_adapter *adapter = netdev_priv(netdev);
3224         struct be_eq_obj *eqo;
3225         int i;
3226
3227         for_all_evt_queues(adapter, eqo, i) {
3228                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3229                 napi_schedule(&eqo->napi);
3230         }
3231
3232         return;
3233 }
3234 #endif
3235
3236 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3237 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3238
3239 static bool be_flash_redboot(struct be_adapter *adapter,
3240                         const u8 *p, u32 img_start, int image_size,
3241                         int hdr_size)
3242 {
3243         u32 crc_offset;
3244         u8 flashed_crc[4];
3245         int status;
3246
3247         crc_offset = hdr_size + img_start + image_size - 4;
3248
3249         p += crc_offset;
3250
3251         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3252                         (image_size - 4));
3253         if (status) {
3254                 dev_err(&adapter->pdev->dev,
3255                 "could not get crc from flash, not flashing redboot\n");
3256                 return false;
3257         }
3258
3259         /*update redboot only if crc does not match*/
3260         if (!memcmp(flashed_crc, p, 4))
3261                 return false;
3262         else
3263                 return true;
3264 }
3265
3266 static bool phy_flashing_required(struct be_adapter *adapter)
3267 {
3268         return (adapter->phy.phy_type == TN_8022 &&
3269                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3270 }
3271
3272 static bool is_comp_in_ufi(struct be_adapter *adapter,
3273                            struct flash_section_info *fsec, int type)
3274 {
3275         int i = 0, img_type = 0;
3276         struct flash_section_info_g2 *fsec_g2 = NULL;
3277
3278         if (BE2_chip(adapter))
3279                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3280
3281         for (i = 0; i < MAX_FLASH_COMP; i++) {
3282                 if (fsec_g2)
3283                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3284                 else
3285                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3286
3287                 if (img_type == type)
3288                         return true;
3289         }
3290         return false;
3291
3292 }
3293
3294 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3295                                          int header_size,
3296                                          const struct firmware *fw)
3297 {
3298         struct flash_section_info *fsec = NULL;
3299         const u8 *p = fw->data;
3300
3301         p += header_size;
3302         while (p < (fw->data + fw->size)) {
3303                 fsec = (struct flash_section_info *)p;
3304                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3305                         return fsec;
3306                 p += 32;
3307         }
3308         return NULL;
3309 }
3310
3311 static int be_flash(struct be_adapter *adapter, const u8 *img,
3312                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3313 {
3314         u32 total_bytes = 0, flash_op, num_bytes = 0;
3315         int status = 0;
3316         struct be_cmd_write_flashrom *req = flash_cmd->va;
3317
3318         total_bytes = img_size;
3319         while (total_bytes) {
3320                 num_bytes = min_t(u32, 32*1024, total_bytes);
3321
3322                 total_bytes -= num_bytes;
3323
3324                 if (!total_bytes) {
3325                         if (optype == OPTYPE_PHY_FW)
3326                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3327                         else
3328                                 flash_op = FLASHROM_OPER_FLASH;
3329                 } else {
3330                         if (optype == OPTYPE_PHY_FW)
3331                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3332                         else
3333                                 flash_op = FLASHROM_OPER_SAVE;
3334                 }
3335
3336                 memcpy(req->data_buf, img, num_bytes);
3337                 img += num_bytes;
3338                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3339                                                 flash_op, num_bytes);
3340                 if (status) {
3341                         if (status == ILLEGAL_IOCTL_REQ &&
3342                             optype == OPTYPE_PHY_FW)
3343                                 break;
3344                         dev_err(&adapter->pdev->dev,
3345                                 "cmd to write to flash rom failed.\n");
3346                         return status;
3347                 }
3348         }
3349         return 0;
3350 }
3351
3352 /* For BE2, BE3 and BE3-R */
3353 static int be_flash_BEx(struct be_adapter *adapter,
3354                          const struct firmware *fw,
3355                          struct be_dma_mem *flash_cmd,
3356                          int num_of_images)
3357
3358 {
3359         int status = 0, i, filehdr_size = 0;
3360         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3361         const u8 *p = fw->data;
3362         const struct flash_comp *pflashcomp;
3363         int num_comp, redboot;
3364         struct flash_section_info *fsec = NULL;
3365
3366         struct flash_comp gen3_flash_types[] = {
3367                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3368                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3369                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3370                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3371                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3372                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3373                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3374                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3375                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3376                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3377                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3378                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3379                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3380                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3381                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3382                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3383                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3384                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3385                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3386                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3387         };
3388
3389         struct flash_comp gen2_flash_types[] = {
3390                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3391                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3392                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3393                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3394                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3395                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3396                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3397                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3398                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3399                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3400                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3401                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3402                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3403                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3404                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3405                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3406         };
3407
3408         if (BE3_chip(adapter)) {
3409                 pflashcomp = gen3_flash_types;
3410                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3411                 num_comp = ARRAY_SIZE(gen3_flash_types);
3412         } else {
3413                 pflashcomp = gen2_flash_types;
3414                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3415                 num_comp = ARRAY_SIZE(gen2_flash_types);
3416         }
3417
3418         /* Get flash section info*/
3419         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3420         if (!fsec) {
3421                 dev_err(&adapter->pdev->dev,
3422                         "Invalid Cookie. UFI corrupted ?\n");
3423                 return -1;
3424         }
3425         for (i = 0; i < num_comp; i++) {
3426                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3427                         continue;
3428
3429                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3430                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3431                         continue;
3432
3433                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3434                     !phy_flashing_required(adapter))
3435                                 continue;
3436
3437                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3438                         redboot = be_flash_redboot(adapter, fw->data,
3439                                 pflashcomp[i].offset, pflashcomp[i].size,
3440                                 filehdr_size + img_hdrs_size);
3441                         if (!redboot)
3442                                 continue;
3443                 }
3444
3445                 p = fw->data;
3446                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3447                 if (p + pflashcomp[i].size > fw->data + fw->size)
3448                         return -1;
3449
3450                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3451                                         pflashcomp[i].size);
3452                 if (status) {
3453                         dev_err(&adapter->pdev->dev,
3454                                 "Flashing section type %d failed.\n",
3455                                 pflashcomp[i].img_type);
3456                         return status;
3457                 }
3458         }
3459         return 0;
3460 }
3461
3462 static int be_flash_skyhawk(struct be_adapter *adapter,
3463                 const struct firmware *fw,
3464                 struct be_dma_mem *flash_cmd, int num_of_images)
3465 {
3466         int status = 0, i, filehdr_size = 0;
3467         int img_offset, img_size, img_optype, redboot;
3468         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3469         const u8 *p = fw->data;
3470         struct flash_section_info *fsec = NULL;
3471
3472         filehdr_size = sizeof(struct flash_file_hdr_g3);
3473         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3474         if (!fsec) {
3475                 dev_err(&adapter->pdev->dev,
3476                         "Invalid Cookie. UFI corrupted ?\n");
3477                 return -1;
3478         }
3479
3480         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3481                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3482                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3483
3484                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3485                 case IMAGE_FIRMWARE_iSCSI:
3486                         img_optype = OPTYPE_ISCSI_ACTIVE;
3487                         break;
3488                 case IMAGE_BOOT_CODE:
3489                         img_optype = OPTYPE_REDBOOT;
3490                         break;
3491                 case IMAGE_OPTION_ROM_ISCSI:
3492                         img_optype = OPTYPE_BIOS;
3493                         break;
3494                 case IMAGE_OPTION_ROM_PXE:
3495                         img_optype = OPTYPE_PXE_BIOS;
3496                         break;
3497                 case IMAGE_OPTION_ROM_FCoE:
3498                         img_optype = OPTYPE_FCOE_BIOS;
3499                         break;
3500                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3501                         img_optype = OPTYPE_ISCSI_BACKUP;
3502                         break;
3503                 case IMAGE_NCSI:
3504                         img_optype = OPTYPE_NCSI_FW;
3505                         break;
3506                 default:
3507                         continue;
3508                 }
3509
3510                 if (img_optype == OPTYPE_REDBOOT) {
3511                         redboot = be_flash_redboot(adapter, fw->data,
3512                                         img_offset, img_size,
3513                                         filehdr_size + img_hdrs_size);
3514                         if (!redboot)
3515                                 continue;
3516                 }
3517
3518                 p = fw->data;
3519                 p += filehdr_size + img_offset + img_hdrs_size;
3520                 if (p + img_size > fw->data + fw->size)
3521                         return -1;
3522
3523                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3524                 if (status) {
3525                         dev_err(&adapter->pdev->dev,
3526                                 "Flashing section type %d failed.\n",
3527                                 fsec->fsec_entry[i].type);
3528                         return status;
3529                 }
3530         }
3531         return 0;
3532 }
3533
3534 static int lancer_wait_idle(struct be_adapter *adapter)
3535 {
3536 #define SLIPORT_IDLE_TIMEOUT 30
3537         u32 reg_val;
3538         int status = 0, i;
3539
3540         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3541                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3542                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3543                         break;
3544
3545                 ssleep(1);
3546         }
3547
3548         if (i == SLIPORT_IDLE_TIMEOUT)
3549                 status = -1;
3550
3551         return status;
3552 }
3553
3554 static int lancer_fw_reset(struct be_adapter *adapter)
3555 {
3556         int status = 0;
3557
3558         status = lancer_wait_idle(adapter);
3559         if (status)
3560                 return status;
3561
3562         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3563                   PHYSDEV_CONTROL_OFFSET);
3564
3565         return status;
3566 }
3567
3568 static int lancer_fw_download(struct be_adapter *adapter,
3569                                 const struct firmware *fw)
3570 {
3571 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3572 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3573         struct be_dma_mem flash_cmd;
3574         const u8 *data_ptr = NULL;
3575         u8 *dest_image_ptr = NULL;
3576         size_t image_size = 0;
3577         u32 chunk_size = 0;
3578         u32 data_written = 0;
3579         u32 offset = 0;
3580         int status = 0;
3581         u8 add_status = 0;
3582         u8 change_status;
3583
3584         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3585                 dev_err(&adapter->pdev->dev,
3586                         "FW Image not properly aligned. "
3587                         "Length must be 4 byte aligned.\n");
3588                 status = -EINVAL;
3589                 goto lancer_fw_exit;
3590         }
3591
3592         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3593                                 + LANCER_FW_DOWNLOAD_CHUNK;
3594         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3595                                           &flash_cmd.dma, GFP_KERNEL);
3596         if (!flash_cmd.va) {
3597                 status = -ENOMEM;
3598                 goto lancer_fw_exit;
3599         }
3600
3601         dest_image_ptr = flash_cmd.va +
3602                                 sizeof(struct lancer_cmd_req_write_object);
3603         image_size = fw->size;
3604         data_ptr = fw->data;
3605
3606         while (image_size) {
3607                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3608
3609                 /* Copy the image chunk content. */
3610                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3611
3612                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3613                                                  chunk_size, offset,
3614                                                  LANCER_FW_DOWNLOAD_LOCATION,
3615                                                  &data_written, &change_status,
3616                                                  &add_status);
3617                 if (status)
3618                         break;
3619
3620                 offset += data_written;
3621                 data_ptr += data_written;
3622                 image_size -= data_written;
3623         }
3624
3625         if (!status) {
3626                 /* Commit the FW written */
3627                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3628                                                  0, offset,
3629                                                  LANCER_FW_DOWNLOAD_LOCATION,
3630                                                  &data_written, &change_status,
3631                                                  &add_status);
3632         }
3633
3634         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3635                                 flash_cmd.dma);
3636         if (status) {
3637                 dev_err(&adapter->pdev->dev,
3638                         "Firmware load error. "
3639                         "Status code: 0x%x Additional Status: 0x%x\n",
3640                         status, add_status);
3641                 goto lancer_fw_exit;
3642         }
3643
3644         if (change_status == LANCER_FW_RESET_NEEDED) {
3645                 status = lancer_fw_reset(adapter);
3646                 if (status) {
3647                         dev_err(&adapter->pdev->dev,
3648                                 "Adapter busy for FW reset.\n"
3649                                 "New FW will not be active.\n");
3650                         goto lancer_fw_exit;
3651                 }
3652         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3653                         dev_err(&adapter->pdev->dev,
3654                                 "System reboot required for new FW"
3655                                 " to be active\n");
3656         }
3657
3658         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3659 lancer_fw_exit:
3660         return status;
3661 }
3662
3663 #define UFI_TYPE2               2
3664 #define UFI_TYPE3               3
3665 #define UFI_TYPE3R              10
3666 #define UFI_TYPE4               4
3667 static int be_get_ufi_type(struct be_adapter *adapter,
3668                            struct flash_file_hdr_g3 *fhdr)
3669 {
3670         if (fhdr == NULL)
3671                 goto be_get_ufi_exit;
3672
3673         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3674                 return UFI_TYPE4;
3675         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3676                 if (fhdr->asic_type_rev == 0x10)
3677                         return UFI_TYPE3R;
3678                 else
3679                         return UFI_TYPE3;
3680         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3681                 return UFI_TYPE2;
3682
3683 be_get_ufi_exit:
3684         dev_err(&adapter->pdev->dev,
3685                 "UFI and Interface are not compatible for flashing\n");
3686         return -1;
3687 }
3688
3689 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3690 {
3691         struct flash_file_hdr_g3 *fhdr3;
3692         struct image_hdr *img_hdr_ptr = NULL;
3693         struct be_dma_mem flash_cmd;
3694         const u8 *p;
3695         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3696
3697         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3698         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3699                                           &flash_cmd.dma, GFP_KERNEL);
3700         if (!flash_cmd.va) {
3701                 status = -ENOMEM;
3702                 goto be_fw_exit;
3703         }
3704
3705         p = fw->data;
3706         fhdr3 = (struct flash_file_hdr_g3 *)p;
3707
3708         ufi_type = be_get_ufi_type(adapter, fhdr3);
3709
3710         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3711         for (i = 0; i < num_imgs; i++) {
3712                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3713                                 (sizeof(struct flash_file_hdr_g3) +
3714                                  i * sizeof(struct image_hdr)));
3715                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3716                         switch (ufi_type) {
3717                         case UFI_TYPE4:
3718                                 status = be_flash_skyhawk(adapter, fw,
3719                                                         &flash_cmd, num_imgs);
3720                                 break;
3721                         case UFI_TYPE3R:
3722                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3723                                                       num_imgs);
3724                                 break;
3725                         case UFI_TYPE3:
3726                                 /* Do not flash this ufi on BE3-R cards */
3727                                 if (adapter->asic_rev < 0x10)
3728                                         status = be_flash_BEx(adapter, fw,
3729                                                               &flash_cmd,
3730                                                               num_imgs);
3731                                 else {
3732                                         status = -1;
3733                                         dev_err(&adapter->pdev->dev,
3734                                                 "Can't load BE3 UFI on BE3R\n");
3735                                 }
3736                         }
3737                 }
3738         }
3739
3740         if (ufi_type == UFI_TYPE2)
3741                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3742         else if (ufi_type == -1)
3743                 status = -1;
3744
3745         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3746                           flash_cmd.dma);
3747         if (status) {
3748                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3749                 goto be_fw_exit;
3750         }
3751
3752         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3753
3754 be_fw_exit:
3755         return status;
3756 }
3757
3758 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3759 {
3760         const struct firmware *fw;
3761         int status;
3762
3763         if (!netif_running(adapter->netdev)) {
3764                 dev_err(&adapter->pdev->dev,
3765                         "Firmware load not allowed (interface is down)\n");
3766                 return -1;
3767         }
3768
3769         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3770         if (status)
3771                 goto fw_exit;
3772
3773         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3774
3775         if (lancer_chip(adapter))
3776                 status = lancer_fw_download(adapter, fw);
3777         else
3778                 status = be_fw_download(adapter, fw);
3779
3780 fw_exit:
3781         release_firmware(fw);
3782         return status;
3783 }
3784
3785 static const struct net_device_ops be_netdev_ops = {
3786         .ndo_open               = be_open,
3787         .ndo_stop               = be_close,
3788         .ndo_start_xmit         = be_xmit,
3789         .ndo_set_rx_mode        = be_set_rx_mode,
3790         .ndo_set_mac_address    = be_mac_addr_set,
3791         .ndo_change_mtu         = be_change_mtu,
3792         .ndo_get_stats64        = be_get_stats64,
3793         .ndo_validate_addr      = eth_validate_addr,
3794         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3795         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3796         .ndo_set_vf_mac         = be_set_vf_mac,
3797         .ndo_set_vf_vlan        = be_set_vf_vlan,
3798         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3799         .ndo_get_vf_config      = be_get_vf_config,
3800 #ifdef CONFIG_NET_POLL_CONTROLLER
3801         .ndo_poll_controller    = be_netpoll,
3802 #endif
3803 };
3804
3805 static void be_netdev_init(struct net_device *netdev)
3806 {
3807         struct be_adapter *adapter = netdev_priv(netdev);
3808         struct be_eq_obj *eqo;
3809         int i;
3810
3811         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3812                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3813                 NETIF_F_HW_VLAN_CTAG_TX;
3814         if (be_multi_rxq(adapter))
3815                 netdev->hw_features |= NETIF_F_RXHASH;
3816
3817         netdev->features |= netdev->hw_features |
3818                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3819
3820         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3821                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3822
3823         netdev->priv_flags |= IFF_UNICAST_FLT;
3824
3825         netdev->flags |= IFF_MULTICAST;
3826
3827         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3828
3829         netdev->netdev_ops = &be_netdev_ops;
3830
3831         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3832
3833         for_all_evt_queues(adapter, eqo, i)
3834                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3835 }
3836
3837 static void be_unmap_pci_bars(struct be_adapter *adapter)
3838 {
3839         if (adapter->csr)
3840                 pci_iounmap(adapter->pdev, adapter->csr);
3841         if (adapter->db)
3842                 pci_iounmap(adapter->pdev, adapter->db);
3843 }
3844
3845 static int db_bar(struct be_adapter *adapter)
3846 {
3847         if (lancer_chip(adapter) || !be_physfn(adapter))
3848                 return 0;
3849         else
3850                 return 4;
3851 }
3852
3853 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3854 {
3855         if (skyhawk_chip(adapter)) {
3856                 adapter->roce_db.size = 4096;
3857                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3858                                                               db_bar(adapter));
3859                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3860                                                                db_bar(adapter));
3861         }
3862         return 0;
3863 }
3864
3865 static int be_map_pci_bars(struct be_adapter *adapter)
3866 {
3867         u8 __iomem *addr;
3868         u32 sli_intf;
3869
3870         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3871         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3872                                 SLI_INTF_IF_TYPE_SHIFT;
3873
3874         if (BEx_chip(adapter) && be_physfn(adapter)) {
3875                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3876                 if (adapter->csr == NULL)
3877                         return -ENOMEM;
3878         }
3879
3880         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3881         if (addr == NULL)
3882                 goto pci_map_err;
3883         adapter->db = addr;
3884
3885         be_roce_map_pci_bars(adapter);
3886         return 0;
3887
3888 pci_map_err:
3889         be_unmap_pci_bars(adapter);
3890         return -ENOMEM;
3891 }
3892
3893 static void be_ctrl_cleanup(struct be_adapter *adapter)
3894 {
3895         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3896
3897         be_unmap_pci_bars(adapter);
3898
3899         if (mem->va)
3900                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3901                                   mem->dma);
3902
3903         mem = &adapter->rx_filter;
3904         if (mem->va)
3905                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3906                                   mem->dma);
3907 }
3908
3909 static int be_ctrl_init(struct be_adapter *adapter)
3910 {
3911         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3912         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3913         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3914         u32 sli_intf;
3915         int status;
3916
3917         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3918         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3919                                  SLI_INTF_FAMILY_SHIFT;
3920         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3921
3922         status = be_map_pci_bars(adapter);
3923         if (status)
3924                 goto done;
3925
3926         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3927         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3928                                                 mbox_mem_alloc->size,
3929                                                 &mbox_mem_alloc->dma,
3930                                                 GFP_KERNEL);
3931         if (!mbox_mem_alloc->va) {
3932                 status = -ENOMEM;
3933                 goto unmap_pci_bars;
3934         }
3935         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3936         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3937         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3938         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3939
3940         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3941         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3942                                            &rx_filter->dma,
3943                                            GFP_KERNEL | __GFP_ZERO);
3944         if (rx_filter->va == NULL) {
3945                 status = -ENOMEM;
3946                 goto free_mbox;
3947         }
3948
3949         mutex_init(&adapter->mbox_lock);
3950         spin_lock_init(&adapter->mcc_lock);
3951         spin_lock_init(&adapter->mcc_cq_lock);
3952
3953         init_completion(&adapter->flash_compl);
3954         pci_save_state(adapter->pdev);
3955         return 0;
3956
3957 free_mbox:
3958         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3959                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3960
3961 unmap_pci_bars:
3962         be_unmap_pci_bars(adapter);
3963
3964 done:
3965         return status;
3966 }
3967
3968 static void be_stats_cleanup(struct be_adapter *adapter)
3969 {
3970         struct be_dma_mem *cmd = &adapter->stats_cmd;
3971
3972         if (cmd->va)
3973                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3974                                   cmd->va, cmd->dma);
3975 }
3976
3977 static int be_stats_init(struct be_adapter *adapter)
3978 {
3979         struct be_dma_mem *cmd = &adapter->stats_cmd;
3980
3981         if (lancer_chip(adapter))
3982                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3983         else if (BE2_chip(adapter))
3984                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3985         else
3986                 /* BE3 and Skyhawk */
3987                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3988
3989         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3990                                      GFP_KERNEL | __GFP_ZERO);
3991         if (cmd->va == NULL)
3992                 return -1;
3993         return 0;
3994 }
3995
3996 static void be_remove(struct pci_dev *pdev)
3997 {
3998         struct be_adapter *adapter = pci_get_drvdata(pdev);
3999
4000         if (!adapter)
4001                 return;
4002
4003         be_roce_dev_remove(adapter);
4004         be_intr_set(adapter, false);
4005
4006         cancel_delayed_work_sync(&adapter->func_recovery_work);
4007
4008         unregister_netdev(adapter->netdev);
4009
4010         be_clear(adapter);
4011
4012         /* tell fw we're done with firing cmds */
4013         be_cmd_fw_clean(adapter);
4014
4015         be_stats_cleanup(adapter);
4016
4017         be_ctrl_cleanup(adapter);
4018
4019         pci_disable_pcie_error_reporting(pdev);
4020
4021         pci_set_drvdata(pdev, NULL);
4022         pci_release_regions(pdev);
4023         pci_disable_device(pdev);
4024
4025         free_netdev(adapter->netdev);
4026 }
4027
4028 bool be_is_wol_supported(struct be_adapter *adapter)
4029 {
4030         return ((adapter->wol_cap & BE_WOL_CAP) &&
4031                 !be_is_wol_excluded(adapter)) ? true : false;
4032 }
4033
4034 u32 be_get_fw_log_level(struct be_adapter *adapter)
4035 {
4036         struct be_dma_mem extfat_cmd;
4037         struct be_fat_conf_params *cfgs;
4038         int status;
4039         u32 level = 0;
4040         int j;
4041
4042         if (lancer_chip(adapter))
4043                 return 0;
4044
4045         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4046         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4047         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4048                                              &extfat_cmd.dma);
4049
4050         if (!extfat_cmd.va) {
4051                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4052                         __func__);
4053                 goto err;
4054         }
4055
4056         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4057         if (!status) {
4058                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4059                                                 sizeof(struct be_cmd_resp_hdr));
4060                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4061                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4062                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4063                 }
4064         }
4065         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4066                             extfat_cmd.dma);
4067 err:
4068         return level;
4069 }
4070
4071 static int be_get_initial_config(struct be_adapter *adapter)
4072 {
4073         int status;
4074         u32 level;
4075
4076         status = be_cmd_get_cntl_attributes(adapter);
4077         if (status)
4078                 return status;
4079
4080         status = be_cmd_get_acpi_wol_cap(adapter);
4081         if (status) {
4082                 /* in case of a failure to get wol capabillities
4083                  * check the exclusion list to determine WOL capability */
4084                 if (!be_is_wol_excluded(adapter))
4085                         adapter->wol_cap |= BE_WOL_CAP;
4086         }
4087
4088         if (be_is_wol_supported(adapter))
4089                 adapter->wol = true;
4090
4091         /* Must be a power of 2 or else MODULO will BUG_ON */
4092         adapter->be_get_temp_freq = 64;
4093
4094         level = be_get_fw_log_level(adapter);
4095         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4096
4097         return 0;
4098 }
4099
4100 static int lancer_recover_func(struct be_adapter *adapter)
4101 {
4102         int status;
4103
4104         status = lancer_test_and_set_rdy_state(adapter);
4105         if (status)
4106                 goto err;
4107
4108         if (netif_running(adapter->netdev))
4109                 be_close(adapter->netdev);
4110
4111         be_clear(adapter);
4112
4113         adapter->hw_error = false;
4114         adapter->fw_timeout = false;
4115
4116         status = be_setup(adapter);
4117         if (status)
4118                 goto err;
4119
4120         if (netif_running(adapter->netdev)) {
4121                 status = be_open(adapter->netdev);
4122                 if (status)
4123                         goto err;
4124         }
4125
4126         dev_err(&adapter->pdev->dev,
4127                 "Adapter SLIPORT recovery succeeded\n");
4128         return 0;
4129 err:
4130         if (adapter->eeh_error)
4131                 dev_err(&adapter->pdev->dev,
4132                         "Adapter SLIPORT recovery failed\n");
4133
4134         return status;
4135 }
4136
4137 static void be_func_recovery_task(struct work_struct *work)
4138 {
4139         struct be_adapter *adapter =
4140                 container_of(work, struct be_adapter,  func_recovery_work.work);
4141         int status;
4142
4143         be_detect_error(adapter);
4144
4145         if (adapter->hw_error && lancer_chip(adapter)) {
4146
4147                 if (adapter->eeh_error)
4148                         goto out;
4149
4150                 rtnl_lock();
4151                 netif_device_detach(adapter->netdev);
4152                 rtnl_unlock();
4153
4154                 status = lancer_recover_func(adapter);
4155
4156                 if (!status)
4157                         netif_device_attach(adapter->netdev);
4158         }
4159
4160 out:
4161         schedule_delayed_work(&adapter->func_recovery_work,
4162                               msecs_to_jiffies(1000));
4163 }
4164
4165 static void be_worker(struct work_struct *work)
4166 {
4167         struct be_adapter *adapter =
4168                 container_of(work, struct be_adapter, work.work);
4169         struct be_rx_obj *rxo;
4170         struct be_eq_obj *eqo;
4171         int i;
4172
4173         /* when interrupts are not yet enabled, just reap any pending
4174         * mcc completions */
4175         if (!netif_running(adapter->netdev)) {
4176                 local_bh_disable();
4177                 be_process_mcc(adapter);
4178                 local_bh_enable();
4179                 goto reschedule;
4180         }
4181
4182         if (!adapter->stats_cmd_sent) {
4183                 if (lancer_chip(adapter))
4184                         lancer_cmd_get_pport_stats(adapter,
4185                                                 &adapter->stats_cmd);
4186                 else
4187                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4188         }
4189
4190         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4191                 be_cmd_get_die_temperature(adapter);
4192
4193         for_all_rx_queues(adapter, rxo, i) {
4194                 if (rxo->rx_post_starved) {
4195                         rxo->rx_post_starved = false;
4196                         be_post_rx_frags(rxo, GFP_KERNEL);
4197                 }
4198         }
4199
4200         for_all_evt_queues(adapter, eqo, i)
4201                 be_eqd_update(adapter, eqo);
4202
4203 reschedule:
4204         adapter->work_counter++;
4205         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4206 }
4207
4208 static bool be_reset_required(struct be_adapter *adapter)
4209 {
4210         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4211 }
4212
4213 static char *mc_name(struct be_adapter *adapter)
4214 {
4215         if (adapter->function_mode & FLEX10_MODE)
4216                 return "FLEX10";
4217         else if (adapter->function_mode & VNIC_MODE)
4218                 return "vNIC";
4219         else if (adapter->function_mode & UMC_ENABLED)
4220                 return "UMC";
4221         else
4222                 return "";
4223 }
4224
4225 static inline char *func_name(struct be_adapter *adapter)
4226 {
4227         return be_physfn(adapter) ? "PF" : "VF";
4228 }
4229
4230 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4231 {
4232         int status = 0;
4233         struct be_adapter *adapter;
4234         struct net_device *netdev;
4235         char port_name;
4236
4237         status = pci_enable_device(pdev);
4238         if (status)
4239                 goto do_none;
4240
4241         status = pci_request_regions(pdev, DRV_NAME);
4242         if (status)
4243                 goto disable_dev;
4244         pci_set_master(pdev);
4245
4246         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4247         if (netdev == NULL) {
4248                 status = -ENOMEM;
4249                 goto rel_reg;
4250         }
4251         adapter = netdev_priv(netdev);
4252         adapter->pdev = pdev;
4253         pci_set_drvdata(pdev, adapter);
4254         adapter->netdev = netdev;
4255         SET_NETDEV_DEV(netdev, &pdev->dev);
4256
4257         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4258         if (!status) {
4259                 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4260                 if (status < 0) {
4261                         dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4262                         goto free_netdev;
4263                 }
4264                 netdev->features |= NETIF_F_HIGHDMA;
4265         } else {
4266                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4267                 if (status) {
4268                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4269                         goto free_netdev;
4270                 }
4271         }
4272
4273         status = pci_enable_pcie_error_reporting(pdev);
4274         if (status)
4275                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4276
4277         status = be_ctrl_init(adapter);
4278         if (status)
4279                 goto free_netdev;
4280
4281         /* sync up with fw's ready state */
4282         if (be_physfn(adapter)) {
4283                 status = be_fw_wait_ready(adapter);
4284                 if (status)
4285                         goto ctrl_clean;
4286         }
4287
4288         if (be_reset_required(adapter)) {
4289                 status = be_cmd_reset_function(adapter);
4290                 if (status)
4291                         goto ctrl_clean;
4292
4293                 /* Wait for interrupts to quiesce after an FLR */
4294                 msleep(100);
4295         }
4296
4297         /* Allow interrupts for other ULPs running on NIC function */
4298         be_intr_set(adapter, true);
4299
4300         /* tell fw we're ready to fire cmds */
4301         status = be_cmd_fw_init(adapter);
4302         if (status)
4303                 goto ctrl_clean;
4304
4305         status = be_stats_init(adapter);
4306         if (status)
4307                 goto ctrl_clean;
4308
4309         status = be_get_initial_config(adapter);
4310         if (status)
4311                 goto stats_clean;
4312
4313         INIT_DELAYED_WORK(&adapter->work, be_worker);
4314         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4315         adapter->rx_fc = adapter->tx_fc = true;
4316
4317         status = be_setup(adapter);
4318         if (status)
4319                 goto stats_clean;
4320
4321         be_netdev_init(netdev);
4322         status = register_netdev(netdev);
4323         if (status != 0)
4324                 goto unsetup;
4325
4326         be_roce_dev_add(adapter);
4327
4328         schedule_delayed_work(&adapter->func_recovery_work,
4329                               msecs_to_jiffies(1000));
4330
4331         be_cmd_query_port_name(adapter, &port_name);
4332
4333         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4334                  func_name(adapter), mc_name(adapter), port_name);
4335
4336         return 0;
4337
4338 unsetup:
4339         be_clear(adapter);
4340 stats_clean:
4341         be_stats_cleanup(adapter);
4342 ctrl_clean:
4343         be_ctrl_cleanup(adapter);
4344 free_netdev:
4345         free_netdev(netdev);
4346         pci_set_drvdata(pdev, NULL);
4347 rel_reg:
4348         pci_release_regions(pdev);
4349 disable_dev:
4350         pci_disable_device(pdev);
4351 do_none:
4352         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4353         return status;
4354 }
4355
4356 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4357 {
4358         struct be_adapter *adapter = pci_get_drvdata(pdev);
4359         struct net_device *netdev =  adapter->netdev;
4360
4361         if (adapter->wol)
4362                 be_setup_wol(adapter, true);
4363
4364         cancel_delayed_work_sync(&adapter->func_recovery_work);
4365
4366         netif_device_detach(netdev);
4367         if (netif_running(netdev)) {
4368                 rtnl_lock();
4369                 be_close(netdev);
4370                 rtnl_unlock();
4371         }
4372         be_clear(adapter);
4373
4374         pci_save_state(pdev);
4375         pci_disable_device(pdev);
4376         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4377         return 0;
4378 }
4379
4380 static int be_resume(struct pci_dev *pdev)
4381 {
4382         int status = 0;
4383         struct be_adapter *adapter = pci_get_drvdata(pdev);
4384         struct net_device *netdev =  adapter->netdev;
4385
4386         netif_device_detach(netdev);
4387
4388         status = pci_enable_device(pdev);
4389         if (status)
4390                 return status;
4391
4392         pci_set_power_state(pdev, 0);
4393         pci_restore_state(pdev);
4394
4395         /* tell fw we're ready to fire cmds */
4396         status = be_cmd_fw_init(adapter);
4397         if (status)
4398                 return status;
4399
4400         be_setup(adapter);
4401         if (netif_running(netdev)) {
4402                 rtnl_lock();
4403                 be_open(netdev);
4404                 rtnl_unlock();
4405         }
4406
4407         schedule_delayed_work(&adapter->func_recovery_work,
4408                               msecs_to_jiffies(1000));
4409         netif_device_attach(netdev);
4410
4411         if (adapter->wol)
4412                 be_setup_wol(adapter, false);
4413
4414         return 0;
4415 }
4416
4417 /*
4418  * An FLR will stop BE from DMAing any data.
4419  */
4420 static void be_shutdown(struct pci_dev *pdev)
4421 {
4422         struct be_adapter *adapter = pci_get_drvdata(pdev);
4423
4424         if (!adapter)
4425                 return;
4426
4427         cancel_delayed_work_sync(&adapter->work);
4428         cancel_delayed_work_sync(&adapter->func_recovery_work);
4429
4430         netif_device_detach(adapter->netdev);
4431
4432         be_cmd_reset_function(adapter);
4433
4434         pci_disable_device(pdev);
4435 }
4436
4437 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4438                                 pci_channel_state_t state)
4439 {
4440         struct be_adapter *adapter = pci_get_drvdata(pdev);
4441         struct net_device *netdev =  adapter->netdev;
4442
4443         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4444
4445         adapter->eeh_error = true;
4446
4447         cancel_delayed_work_sync(&adapter->func_recovery_work);
4448
4449         rtnl_lock();
4450         netif_device_detach(netdev);
4451         rtnl_unlock();
4452
4453         if (netif_running(netdev)) {
4454                 rtnl_lock();
4455                 be_close(netdev);
4456                 rtnl_unlock();
4457         }
4458         be_clear(adapter);
4459
4460         if (state == pci_channel_io_perm_failure)
4461                 return PCI_ERS_RESULT_DISCONNECT;
4462
4463         pci_disable_device(pdev);
4464
4465         /* The error could cause the FW to trigger a flash debug dump.
4466          * Resetting the card while flash dump is in progress
4467          * can cause it not to recover; wait for it to finish.
4468          * Wait only for first function as it is needed only once per
4469          * adapter.
4470          */
4471         if (pdev->devfn == 0)
4472                 ssleep(30);
4473
4474         return PCI_ERS_RESULT_NEED_RESET;
4475 }
4476
4477 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4478 {
4479         struct be_adapter *adapter = pci_get_drvdata(pdev);
4480         int status;
4481
4482         dev_info(&adapter->pdev->dev, "EEH reset\n");
4483         be_clear_all_error(adapter);
4484
4485         status = pci_enable_device(pdev);
4486         if (status)
4487                 return PCI_ERS_RESULT_DISCONNECT;
4488
4489         pci_set_master(pdev);
4490         pci_set_power_state(pdev, 0);
4491         pci_restore_state(pdev);
4492
4493         /* Check if card is ok and fw is ready */
4494         dev_info(&adapter->pdev->dev,
4495                  "Waiting for FW to be ready after EEH reset\n");
4496         status = be_fw_wait_ready(adapter);
4497         if (status)
4498                 return PCI_ERS_RESULT_DISCONNECT;
4499
4500         pci_cleanup_aer_uncorrect_error_status(pdev);
4501         return PCI_ERS_RESULT_RECOVERED;
4502 }
4503
4504 static void be_eeh_resume(struct pci_dev *pdev)
4505 {
4506         int status = 0;
4507         struct be_adapter *adapter = pci_get_drvdata(pdev);
4508         struct net_device *netdev =  adapter->netdev;
4509
4510         dev_info(&adapter->pdev->dev, "EEH resume\n");
4511
4512         pci_save_state(pdev);
4513
4514         status = be_cmd_reset_function(adapter);
4515         if (status)
4516                 goto err;
4517
4518         /* tell fw we're ready to fire cmds */
4519         status = be_cmd_fw_init(adapter);
4520         if (status)
4521                 goto err;
4522
4523         status = be_setup(adapter);
4524         if (status)
4525                 goto err;
4526
4527         if (netif_running(netdev)) {
4528                 status = be_open(netdev);
4529                 if (status)
4530                         goto err;
4531         }
4532
4533         schedule_delayed_work(&adapter->func_recovery_work,
4534                               msecs_to_jiffies(1000));
4535         netif_device_attach(netdev);
4536         return;
4537 err:
4538         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4539 }
4540
4541 static const struct pci_error_handlers be_eeh_handlers = {
4542         .error_detected = be_eeh_err_detected,
4543         .slot_reset = be_eeh_reset,
4544         .resume = be_eeh_resume,
4545 };
4546
4547 static struct pci_driver be_driver = {
4548         .name = DRV_NAME,
4549         .id_table = be_dev_ids,
4550         .probe = be_probe,
4551         .remove = be_remove,
4552         .suspend = be_suspend,
4553         .resume = be_resume,
4554         .shutdown = be_shutdown,
4555         .err_handler = &be_eeh_handlers
4556 };
4557
4558 static int __init be_init_module(void)
4559 {
4560         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4561             rx_frag_size != 2048) {
4562                 printk(KERN_WARNING DRV_NAME
4563                         " : Module param rx_frag_size must be 2048/4096/8192."
4564                         " Using 2048\n");
4565                 rx_frag_size = 2048;
4566         }
4567
4568         return pci_register_driver(&be_driver);
4569 }
4570 module_init(be_init_module);
4571
4572 static void __exit be_exit_module(void)
4573 {
4574         pci_unregister_driver(&be_driver);
4575 }
4576 module_exit(be_exit_module);