21109b59fcfcbd61ba3da51b3d620cefca20937b
[cascardo/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL);
150         if (!mem->va)
151                 return -ENOMEM;
152         memset(mem->va, 0, mem->size);
153         return 0;
154 }
155
156 static void be_intr_set(struct be_adapter *adapter, bool enable)
157 {
158         u32 reg, enabled;
159
160         if (adapter->eeh_error)
161                 return;
162
163         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164                                 &reg);
165         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
167         if (!enabled && enable)
168                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else if (enabled && !enable)
170                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
171         else
172                 return;
173
174         pci_write_config_dword(adapter->pdev,
175                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
176 }
177
178 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180         u32 val = 0;
181         val |= qid & DB_RQ_RING_ID_MASK;
182         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
183
184         wmb();
185         iowrite32(val, adapter->db + DB_RQ_OFFSET);
186 }
187
188 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
189 {
190         u32 val = 0;
191         val |= qid & DB_TXULP_RING_ID_MASK;
192         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
193
194         wmb();
195         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
196 }
197
198 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
199                 bool arm, bool clear_int, u16 num_popped)
200 {
201         u32 val = 0;
202         val |= qid & DB_EQ_RING_ID_MASK;
203         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
205
206         if (adapter->eeh_error)
207                 return;
208
209         if (arm)
210                 val |= 1 << DB_EQ_REARM_SHIFT;
211         if (clear_int)
212                 val |= 1 << DB_EQ_CLR_SHIFT;
213         val |= 1 << DB_EQ_EVNT_SHIFT;
214         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
215         iowrite32(val, adapter->db + DB_EQ_OFFSET);
216 }
217
218 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
219 {
220         u32 val = 0;
221         val |= qid & DB_CQ_RING_ID_MASK;
222         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
224
225         if (adapter->eeh_error)
226                 return;
227
228         if (arm)
229                 val |= 1 << DB_CQ_REARM_SHIFT;
230         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
231         iowrite32(val, adapter->db + DB_CQ_OFFSET);
232 }
233
234 static int be_mac_addr_set(struct net_device *netdev, void *p)
235 {
236         struct be_adapter *adapter = netdev_priv(netdev);
237         struct sockaddr *addr = p;
238         int status = 0;
239         u8 current_mac[ETH_ALEN];
240         u32 pmac_id = adapter->pmac_id[0];
241         bool active_mac = true;
242
243         if (!is_valid_ether_addr(addr->sa_data))
244                 return -EADDRNOTAVAIL;
245
246         /* For BE VF, MAC address is already activated by PF.
247          * Hence only operation left is updating netdev->devaddr.
248          * Update it if user is passing the same MAC which was used
249          * during configuring VF MAC from PF(Hypervisor).
250          */
251         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252                 status = be_cmd_mac_addr_query(adapter, current_mac,
253                                                false, adapter->if_handle, 0);
254                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255                         goto done;
256                 else
257                         goto err;
258         }
259
260         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261                 goto done;
262
263         /* For Lancer check if any MAC is active.
264          * If active, get its mac id.
265          */
266         if (lancer_chip(adapter) && !be_physfn(adapter))
267                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268                                          &pmac_id, 0);
269
270         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271                                  adapter->if_handle,
272                                  &adapter->pmac_id[0], 0);
273
274         if (status)
275                 goto err;
276
277         if (active_mac)
278                 be_cmd_pmac_del(adapter, adapter->if_handle,
279                                 pmac_id, 0);
280 done:
281         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282         return 0;
283 err:
284         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
285         return status;
286 }
287
288 /* BE2 supports only v0 cmd */
289 static void *hw_stats_from_cmd(struct be_adapter *adapter)
290 {
291         if (BE2_chip(adapter)) {
292                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294                 return &cmd->hw_stats;
295         } else  {
296                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298                 return &cmd->hw_stats;
299         }
300 }
301
302 /* BE2 supports only v0 cmd */
303 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304 {
305         if (BE2_chip(adapter)) {
306                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308                 return &hw_stats->erx;
309         } else {
310                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312                 return &hw_stats->erx;
313         }
314 }
315
316 static void populate_be_v0_stats(struct be_adapter *adapter)
317 {
318         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
320         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
321         struct be_port_rxf_stats_v0 *port_stats =
322                                         &rxf_stats->port[adapter->port_num];
323         struct be_drv_stats *drvs = &adapter->drv_stats;
324
325         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
326         drvs->rx_pause_frames = port_stats->rx_pause_frames;
327         drvs->rx_crc_errors = port_stats->rx_crc_errors;
328         drvs->rx_control_frames = port_stats->rx_control_frames;
329         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
336         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
337         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
338         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
339         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
340         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
341         drvs->rx_dropped_header_too_small =
342                 port_stats->rx_dropped_header_too_small;
343         drvs->rx_address_mismatch_drops =
344                                         port_stats->rx_address_mismatch_drops +
345                                         port_stats->rx_vlan_mismatch_drops;
346         drvs->rx_alignment_symbol_errors =
347                 port_stats->rx_alignment_symbol_errors;
348
349         drvs->tx_pauseframes = port_stats->tx_pauseframes;
350         drvs->tx_controlframes = port_stats->tx_controlframes;
351
352         if (adapter->port_num)
353                 drvs->jabber_events = rxf_stats->port1_jabber_events;
354         else
355                 drvs->jabber_events = rxf_stats->port0_jabber_events;
356         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
357         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
358         drvs->forwarded_packets = rxf_stats->forwarded_packets;
359         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
360         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
361         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
362         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
363 }
364
365 static void populate_be_v1_stats(struct be_adapter *adapter)
366 {
367         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
368         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
369         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
370         struct be_port_rxf_stats_v1 *port_stats =
371                                         &rxf_stats->port[adapter->port_num];
372         struct be_drv_stats *drvs = &adapter->drv_stats;
373
374         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
375         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
376         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
377         drvs->rx_pause_frames = port_stats->rx_pause_frames;
378         drvs->rx_crc_errors = port_stats->rx_crc_errors;
379         drvs->rx_control_frames = port_stats->rx_control_frames;
380         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
381         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
382         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
383         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
384         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
385         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
386         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
387         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
388         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
389         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
390         drvs->rx_dropped_header_too_small =
391                 port_stats->rx_dropped_header_too_small;
392         drvs->rx_input_fifo_overflow_drop =
393                 port_stats->rx_input_fifo_overflow_drop;
394         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
395         drvs->rx_alignment_symbol_errors =
396                 port_stats->rx_alignment_symbol_errors;
397         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
398         drvs->tx_pauseframes = port_stats->tx_pauseframes;
399         drvs->tx_controlframes = port_stats->tx_controlframes;
400         drvs->jabber_events = port_stats->jabber_events;
401         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
402         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
403         drvs->forwarded_packets = rxf_stats->forwarded_packets;
404         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
405         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
406         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
407         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
408 }
409
410 static void populate_lancer_stats(struct be_adapter *adapter)
411 {
412
413         struct be_drv_stats *drvs = &adapter->drv_stats;
414         struct lancer_pport_stats *pport_stats =
415                                         pport_stats_from_cmd(adapter);
416
417         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
418         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
419         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
420         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
421         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
422         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
423         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
424         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
425         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
426         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
427         drvs->rx_dropped_tcp_length =
428                                 pport_stats->rx_dropped_invalid_tcp_length;
429         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
430         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
431         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
432         drvs->rx_dropped_header_too_small =
433                                 pport_stats->rx_dropped_header_too_small;
434         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
435         drvs->rx_address_mismatch_drops =
436                                         pport_stats->rx_address_mismatch_drops +
437                                         pport_stats->rx_vlan_mismatch_drops;
438         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
439         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
440         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
441         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
442         drvs->jabber_events = pport_stats->rx_jabbers;
443         drvs->forwarded_packets = pport_stats->num_forwards_lo;
444         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
445         drvs->rx_drops_too_many_frags =
446                                 pport_stats->rx_drops_too_many_frags_lo;
447 }
448
449 static void accumulate_16bit_val(u32 *acc, u16 val)
450 {
451 #define lo(x)                   (x & 0xFFFF)
452 #define hi(x)                   (x & 0xFFFF0000)
453         bool wrapped = val < lo(*acc);
454         u32 newacc = hi(*acc) + val;
455
456         if (wrapped)
457                 newacc += 65536;
458         ACCESS_ONCE(*acc) = newacc;
459 }
460
461 void be_parse_stats(struct be_adapter *adapter)
462 {
463         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
464         struct be_rx_obj *rxo;
465         int i;
466
467         if (lancer_chip(adapter)) {
468                 populate_lancer_stats(adapter);
469         } else {
470                 if (BE2_chip(adapter))
471                         populate_be_v0_stats(adapter);
472                 else
473                         /* for BE3 and Skyhawk */
474                         populate_be_v1_stats(adapter);
475
476                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
477                 for_all_rx_queues(adapter, rxo, i) {
478                         /* below erx HW counter can actually wrap around after
479                          * 65535. Driver accumulates a 32-bit value
480                          */
481                         accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
482                                              (u16)erx->rx_drops_no_fragments \
483                                              [rxo->q.id]);
484                 }
485         }
486 }
487
488 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
489                                         struct rtnl_link_stats64 *stats)
490 {
491         struct be_adapter *adapter = netdev_priv(netdev);
492         struct be_drv_stats *drvs = &adapter->drv_stats;
493         struct be_rx_obj *rxo;
494         struct be_tx_obj *txo;
495         u64 pkts, bytes;
496         unsigned int start;
497         int i;
498
499         for_all_rx_queues(adapter, rxo, i) {
500                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
501                 do {
502                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
503                         pkts = rx_stats(rxo)->rx_pkts;
504                         bytes = rx_stats(rxo)->rx_bytes;
505                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
506                 stats->rx_packets += pkts;
507                 stats->rx_bytes += bytes;
508                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
509                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
510                                         rx_stats(rxo)->rx_drops_no_frags;
511         }
512
513         for_all_tx_queues(adapter, txo, i) {
514                 const struct be_tx_stats *tx_stats = tx_stats(txo);
515                 do {
516                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
517                         pkts = tx_stats(txo)->tx_pkts;
518                         bytes = tx_stats(txo)->tx_bytes;
519                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
520                 stats->tx_packets += pkts;
521                 stats->tx_bytes += bytes;
522         }
523
524         /* bad pkts received */
525         stats->rx_errors = drvs->rx_crc_errors +
526                 drvs->rx_alignment_symbol_errors +
527                 drvs->rx_in_range_errors +
528                 drvs->rx_out_range_errors +
529                 drvs->rx_frame_too_long +
530                 drvs->rx_dropped_too_small +
531                 drvs->rx_dropped_too_short +
532                 drvs->rx_dropped_header_too_small +
533                 drvs->rx_dropped_tcp_length +
534                 drvs->rx_dropped_runt;
535
536         /* detailed rx errors */
537         stats->rx_length_errors = drvs->rx_in_range_errors +
538                 drvs->rx_out_range_errors +
539                 drvs->rx_frame_too_long;
540
541         stats->rx_crc_errors = drvs->rx_crc_errors;
542
543         /* frame alignment errors */
544         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
545
546         /* receiver fifo overrun */
547         /* drops_no_pbuf is no per i/f, it's per BE card */
548         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
549                                 drvs->rx_input_fifo_overflow_drop +
550                                 drvs->rx_drops_no_pbuf;
551         return stats;
552 }
553
554 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
555 {
556         struct net_device *netdev = adapter->netdev;
557
558         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
559                 netif_carrier_off(netdev);
560                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
561         }
562
563         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
564                 netif_carrier_on(netdev);
565         else
566                 netif_carrier_off(netdev);
567 }
568
569 static void be_tx_stats_update(struct be_tx_obj *txo,
570                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
571 {
572         struct be_tx_stats *stats = tx_stats(txo);
573
574         u64_stats_update_begin(&stats->sync);
575         stats->tx_reqs++;
576         stats->tx_wrbs += wrb_cnt;
577         stats->tx_bytes += copied;
578         stats->tx_pkts += (gso_segs ? gso_segs : 1);
579         if (stopped)
580                 stats->tx_stops++;
581         u64_stats_update_end(&stats->sync);
582 }
583
584 /* Determine number of WRB entries needed to xmit data in an skb */
585 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
586                                                                 bool *dummy)
587 {
588         int cnt = (skb->len > skb->data_len);
589
590         cnt += skb_shinfo(skb)->nr_frags;
591
592         /* to account for hdr wrb */
593         cnt++;
594         if (lancer_chip(adapter) || !(cnt & 1)) {
595                 *dummy = false;
596         } else {
597                 /* add a dummy to make it an even num */
598                 cnt++;
599                 *dummy = true;
600         }
601         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
602         return cnt;
603 }
604
605 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
606 {
607         wrb->frag_pa_hi = upper_32_bits(addr);
608         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
609         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
610         wrb->rsvd0 = 0;
611 }
612
613 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
614                                         struct sk_buff *skb)
615 {
616         u8 vlan_prio;
617         u16 vlan_tag;
618
619         vlan_tag = vlan_tx_tag_get(skb);
620         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
621         /* If vlan priority provided by OS is NOT in available bmap */
622         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
623                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
624                                 adapter->recommended_prio;
625
626         return vlan_tag;
627 }
628
629 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
630                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
631 {
632         u16 vlan_tag;
633
634         memset(hdr, 0, sizeof(*hdr));
635
636         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
637
638         if (skb_is_gso(skb)) {
639                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
640                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
641                         hdr, skb_shinfo(skb)->gso_size);
642                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
643                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
644         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
645                 if (is_tcp_pkt(skb))
646                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
647                 else if (is_udp_pkt(skb))
648                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
649         }
650
651         if (vlan_tx_tag_present(skb)) {
652                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
653                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
654                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
655         }
656
657         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
658         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
659         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
660         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
661         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
662 }
663
664 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
665                 bool unmap_single)
666 {
667         dma_addr_t dma;
668
669         be_dws_le_to_cpu(wrb, sizeof(*wrb));
670
671         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
672         if (wrb->frag_len) {
673                 if (unmap_single)
674                         dma_unmap_single(dev, dma, wrb->frag_len,
675                                          DMA_TO_DEVICE);
676                 else
677                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
678         }
679 }
680
681 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
682                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
683                 bool skip_hw_vlan)
684 {
685         dma_addr_t busaddr;
686         int i, copied = 0;
687         struct device *dev = &adapter->pdev->dev;
688         struct sk_buff *first_skb = skb;
689         struct be_eth_wrb *wrb;
690         struct be_eth_hdr_wrb *hdr;
691         bool map_single = false;
692         u16 map_head;
693
694         hdr = queue_head_node(txq);
695         queue_head_inc(txq);
696         map_head = txq->head;
697
698         if (skb->len > skb->data_len) {
699                 int len = skb_headlen(skb);
700                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
701                 if (dma_mapping_error(dev, busaddr))
702                         goto dma_err;
703                 map_single = true;
704                 wrb = queue_head_node(txq);
705                 wrb_fill(wrb, busaddr, len);
706                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
707                 queue_head_inc(txq);
708                 copied += len;
709         }
710
711         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
712                 const struct skb_frag_struct *frag =
713                         &skb_shinfo(skb)->frags[i];
714                 busaddr = skb_frag_dma_map(dev, frag, 0,
715                                            skb_frag_size(frag), DMA_TO_DEVICE);
716                 if (dma_mapping_error(dev, busaddr))
717                         goto dma_err;
718                 wrb = queue_head_node(txq);
719                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
720                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
721                 queue_head_inc(txq);
722                 copied += skb_frag_size(frag);
723         }
724
725         if (dummy_wrb) {
726                 wrb = queue_head_node(txq);
727                 wrb_fill(wrb, 0, 0);
728                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
729                 queue_head_inc(txq);
730         }
731
732         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
733         be_dws_cpu_to_le(hdr, sizeof(*hdr));
734
735         return copied;
736 dma_err:
737         txq->head = map_head;
738         while (copied) {
739                 wrb = queue_head_node(txq);
740                 unmap_tx_frag(dev, wrb, map_single);
741                 map_single = false;
742                 copied -= wrb->frag_len;
743                 queue_head_inc(txq);
744         }
745         return 0;
746 }
747
748 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
749                                              struct sk_buff *skb,
750                                              bool *skip_hw_vlan)
751 {
752         u16 vlan_tag = 0;
753
754         skb = skb_share_check(skb, GFP_ATOMIC);
755         if (unlikely(!skb))
756                 return skb;
757
758         if (vlan_tx_tag_present(skb)) {
759                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
760                 skb = __vlan_put_tag(skb, vlan_tag);
761                 if (skb)
762                         skb->vlan_tci = 0;
763         }
764
765         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
766                 if (!vlan_tag)
767                         vlan_tag = adapter->pvid;
768                 if (skip_hw_vlan)
769                         *skip_hw_vlan = true;
770         }
771
772         if (vlan_tag) {
773                 skb = __vlan_put_tag(skb, vlan_tag);
774                 if (unlikely(!skb))
775                         return skb;
776
777                 skb->vlan_tci = 0;
778         }
779
780         /* Insert the outer VLAN, if any */
781         if (adapter->qnq_vid) {
782                 vlan_tag = adapter->qnq_vid;
783                 skb = __vlan_put_tag(skb, vlan_tag);
784                 if (unlikely(!skb))
785                         return skb;
786                 if (skip_hw_vlan)
787                         *skip_hw_vlan = true;
788         }
789
790         return skb;
791 }
792
793 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
794 {
795         struct ethhdr *eh = (struct ethhdr *)skb->data;
796         u16 offset = ETH_HLEN;
797
798         if (eh->h_proto == htons(ETH_P_IPV6)) {
799                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
800
801                 offset += sizeof(struct ipv6hdr);
802                 if (ip6h->nexthdr != NEXTHDR_TCP &&
803                     ip6h->nexthdr != NEXTHDR_UDP) {
804                         struct ipv6_opt_hdr *ehdr =
805                                 (struct ipv6_opt_hdr *) (skb->data + offset);
806
807                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
808                         if (ehdr->hdrlen == 0xff)
809                                 return true;
810                 }
811         }
812         return false;
813 }
814
815 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
816 {
817         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
818 }
819
820 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
821 {
822         return BE3_chip(adapter) &&
823                 be_ipv6_exthdr_check(skb);
824 }
825
826 static netdev_tx_t be_xmit(struct sk_buff *skb,
827                         struct net_device *netdev)
828 {
829         struct be_adapter *adapter = netdev_priv(netdev);
830         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
831         struct be_queue_info *txq = &txo->q;
832         struct iphdr *ip = NULL;
833         u32 wrb_cnt = 0, copied = 0;
834         u32 start = txq->head, eth_hdr_len;
835         bool dummy_wrb, stopped = false;
836         bool skip_hw_vlan = false;
837
838         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
839                 VLAN_ETH_HLEN : ETH_HLEN;
840
841         /* For padded packets, BE HW modifies tot_len field in IP header
842          * incorrecly when VLAN tag is inserted by HW.
843          */
844         if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
845                 ip = (struct iphdr *)ip_hdr(skb);
846                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
847         }
848
849         /* HW has a bug wherein it will calculate CSUM for VLAN
850          * pkts even though it is disabled.
851          * Manually insert VLAN in pkt.
852          */
853         if (skb->ip_summed != CHECKSUM_PARTIAL &&
854                         vlan_tx_tag_present(skb)) {
855                 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
856                 if (unlikely(!skb))
857                         goto tx_drop;
858         }
859
860         /* HW may lockup when VLAN HW tagging is requested on
861          * certain ipv6 packets. Drop such pkts if the HW workaround to
862          * skip HW tagging is not enabled by FW.
863          */
864         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
865                      (adapter->pvid || adapter->qnq_vid) &&
866                      !qnq_async_evt_rcvd(adapter)))
867                 goto tx_drop;
868
869         /* Manual VLAN tag insertion to prevent:
870          * ASIC lockup when the ASIC inserts VLAN tag into
871          * certain ipv6 packets. Insert VLAN tags in driver,
872          * and set event, completion, vlan bits accordingly
873          * in the Tx WRB.
874          */
875         if (be_ipv6_tx_stall_chk(adapter, skb) &&
876             be_vlan_tag_tx_chk(adapter, skb)) {
877                 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
878                 if (unlikely(!skb))
879                         goto tx_drop;
880         }
881
882         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
883
884         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
885                               skip_hw_vlan);
886         if (copied) {
887                 int gso_segs = skb_shinfo(skb)->gso_segs;
888
889                 /* record the sent skb in the sent_skb table */
890                 BUG_ON(txo->sent_skb_list[start]);
891                 txo->sent_skb_list[start] = skb;
892
893                 /* Ensure txq has space for the next skb; Else stop the queue
894                  * *BEFORE* ringing the tx doorbell, so that we serialze the
895                  * tx compls of the current transmit which'll wake up the queue
896                  */
897                 atomic_add(wrb_cnt, &txq->used);
898                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
899                                                                 txq->len) {
900                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
901                         stopped = true;
902                 }
903
904                 be_txq_notify(adapter, txq->id, wrb_cnt);
905
906                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
907         } else {
908                 txq->head = start;
909                 dev_kfree_skb_any(skb);
910         }
911 tx_drop:
912         return NETDEV_TX_OK;
913 }
914
915 static int be_change_mtu(struct net_device *netdev, int new_mtu)
916 {
917         struct be_adapter *adapter = netdev_priv(netdev);
918         if (new_mtu < BE_MIN_MTU ||
919                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
920                                         (ETH_HLEN + ETH_FCS_LEN))) {
921                 dev_info(&adapter->pdev->dev,
922                         "MTU must be between %d and %d bytes\n",
923                         BE_MIN_MTU,
924                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
925                 return -EINVAL;
926         }
927         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
928                         netdev->mtu, new_mtu);
929         netdev->mtu = new_mtu;
930         return 0;
931 }
932
933 /*
934  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
935  * If the user configures more, place BE in vlan promiscuous mode.
936  */
937 static int be_vid_config(struct be_adapter *adapter)
938 {
939         u16 vids[BE_NUM_VLANS_SUPPORTED];
940         u16 num = 0, i;
941         int status = 0;
942
943         /* No need to further configure vids if in promiscuous mode */
944         if (adapter->promiscuous)
945                 return 0;
946
947         if (adapter->vlans_added > adapter->max_vlans)
948                 goto set_vlan_promisc;
949
950         /* Construct VLAN Table to give to HW */
951         for (i = 0; i < VLAN_N_VID; i++)
952                 if (adapter->vlan_tag[i])
953                         vids[num++] = cpu_to_le16(i);
954
955         status = be_cmd_vlan_config(adapter, adapter->if_handle,
956                                     vids, num, 1, 0);
957
958         /* Set to VLAN promisc mode as setting VLAN filter failed */
959         if (status) {
960                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
961                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
962                 goto set_vlan_promisc;
963         }
964
965         return status;
966
967 set_vlan_promisc:
968         status = be_cmd_vlan_config(adapter, adapter->if_handle,
969                                     NULL, 0, 1, 1);
970         return status;
971 }
972
973 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
974 {
975         struct be_adapter *adapter = netdev_priv(netdev);
976         int status = 0;
977
978         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
979                 status = -EINVAL;
980                 goto ret;
981         }
982
983         /* Packets with VID 0 are always received by Lancer by default */
984         if (lancer_chip(adapter) && vid == 0)
985                 goto ret;
986
987         adapter->vlan_tag[vid] = 1;
988         if (adapter->vlans_added <= (adapter->max_vlans + 1))
989                 status = be_vid_config(adapter);
990
991         if (!status)
992                 adapter->vlans_added++;
993         else
994                 adapter->vlan_tag[vid] = 0;
995 ret:
996         return status;
997 }
998
999 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
1000 {
1001         struct be_adapter *adapter = netdev_priv(netdev);
1002         int status = 0;
1003
1004         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1005                 status = -EINVAL;
1006                 goto ret;
1007         }
1008
1009         /* Packets with VID 0 are always received by Lancer by default */
1010         if (lancer_chip(adapter) && vid == 0)
1011                 goto ret;
1012
1013         adapter->vlan_tag[vid] = 0;
1014         if (adapter->vlans_added <= adapter->max_vlans)
1015                 status = be_vid_config(adapter);
1016
1017         if (!status)
1018                 adapter->vlans_added--;
1019         else
1020                 adapter->vlan_tag[vid] = 1;
1021 ret:
1022         return status;
1023 }
1024
1025 static void be_set_rx_mode(struct net_device *netdev)
1026 {
1027         struct be_adapter *adapter = netdev_priv(netdev);
1028         int status;
1029
1030         if (netdev->flags & IFF_PROMISC) {
1031                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1032                 adapter->promiscuous = true;
1033                 goto done;
1034         }
1035
1036         /* BE was previously in promiscuous mode; disable it */
1037         if (adapter->promiscuous) {
1038                 adapter->promiscuous = false;
1039                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1040
1041                 if (adapter->vlans_added)
1042                         be_vid_config(adapter);
1043         }
1044
1045         /* Enable multicast promisc if num configured exceeds what we support */
1046         if (netdev->flags & IFF_ALLMULTI ||
1047             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
1048                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1049                 goto done;
1050         }
1051
1052         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1053                 struct netdev_hw_addr *ha;
1054                 int i = 1; /* First slot is claimed by the Primary MAC */
1055
1056                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1057                         be_cmd_pmac_del(adapter, adapter->if_handle,
1058                                         adapter->pmac_id[i], 0);
1059                 }
1060
1061                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1062                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1063                         adapter->promiscuous = true;
1064                         goto done;
1065                 }
1066
1067                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1068                         adapter->uc_macs++; /* First slot is for Primary MAC */
1069                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1070                                         adapter->if_handle,
1071                                         &adapter->pmac_id[adapter->uc_macs], 0);
1072                 }
1073         }
1074
1075         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1076
1077         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1078         if (status) {
1079                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1080                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1081                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1082         }
1083 done:
1084         return;
1085 }
1086
1087 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1088 {
1089         struct be_adapter *adapter = netdev_priv(netdev);
1090         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1091         int status;
1092         bool active_mac = false;
1093         u32 pmac_id;
1094         u8 old_mac[ETH_ALEN];
1095
1096         if (!sriov_enabled(adapter))
1097                 return -EPERM;
1098
1099         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1100                 return -EINVAL;
1101
1102         if (lancer_chip(adapter)) {
1103                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1104                                                   &pmac_id, vf + 1);
1105                 if (!status && active_mac)
1106                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1107                                         pmac_id, vf + 1);
1108
1109                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1110         } else {
1111                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1112                                          vf_cfg->pmac_id, vf + 1);
1113
1114                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1115                                          &vf_cfg->pmac_id, vf + 1);
1116         }
1117
1118         if (status)
1119                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1120                                 mac, vf);
1121         else
1122                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1123
1124         return status;
1125 }
1126
1127 static int be_get_vf_config(struct net_device *netdev, int vf,
1128                         struct ifla_vf_info *vi)
1129 {
1130         struct be_adapter *adapter = netdev_priv(netdev);
1131         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1132
1133         if (!sriov_enabled(adapter))
1134                 return -EPERM;
1135
1136         if (vf >= adapter->num_vfs)
1137                 return -EINVAL;
1138
1139         vi->vf = vf;
1140         vi->tx_rate = vf_cfg->tx_rate;
1141         vi->vlan = vf_cfg->vlan_tag;
1142         vi->qos = 0;
1143         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1144
1145         return 0;
1146 }
1147
1148 static int be_set_vf_vlan(struct net_device *netdev,
1149                         int vf, u16 vlan, u8 qos)
1150 {
1151         struct be_adapter *adapter = netdev_priv(netdev);
1152         int status = 0;
1153
1154         if (!sriov_enabled(adapter))
1155                 return -EPERM;
1156
1157         if (vf >= adapter->num_vfs || vlan > 4095)
1158                 return -EINVAL;
1159
1160         if (vlan) {
1161                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1162                         /* If this is new value, program it. Else skip. */
1163                         adapter->vf_cfg[vf].vlan_tag = vlan;
1164
1165                         status = be_cmd_set_hsw_config(adapter, vlan,
1166                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1167                 }
1168         } else {
1169                 /* Reset Transparent Vlan Tagging. */
1170                 adapter->vf_cfg[vf].vlan_tag = 0;
1171                 vlan = adapter->vf_cfg[vf].def_vid;
1172                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1173                         adapter->vf_cfg[vf].if_handle);
1174         }
1175
1176
1177         if (status)
1178                 dev_info(&adapter->pdev->dev,
1179                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1180         return status;
1181 }
1182
1183 static int be_set_vf_tx_rate(struct net_device *netdev,
1184                         int vf, int rate)
1185 {
1186         struct be_adapter *adapter = netdev_priv(netdev);
1187         int status = 0;
1188
1189         if (!sriov_enabled(adapter))
1190                 return -EPERM;
1191
1192         if (vf >= adapter->num_vfs)
1193                 return -EINVAL;
1194
1195         if (rate < 100 || rate > 10000) {
1196                 dev_err(&adapter->pdev->dev,
1197                         "tx rate must be between 100 and 10000 Mbps\n");
1198                 return -EINVAL;
1199         }
1200
1201         if (lancer_chip(adapter))
1202                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1203         else
1204                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1205
1206         if (status)
1207                 dev_err(&adapter->pdev->dev,
1208                                 "tx rate %d on VF %d failed\n", rate, vf);
1209         else
1210                 adapter->vf_cfg[vf].tx_rate = rate;
1211         return status;
1212 }
1213
1214 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1215 {
1216         struct pci_dev *dev, *pdev = adapter->pdev;
1217         int vfs = 0, assigned_vfs = 0, pos;
1218         u16 offset, stride;
1219
1220         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1221         if (!pos)
1222                 return 0;
1223         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1224         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1225
1226         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1227         while (dev) {
1228                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1229                         vfs++;
1230                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1231                                 assigned_vfs++;
1232                 }
1233                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1234         }
1235         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1236 }
1237
1238 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1239 {
1240         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1241         ulong now = jiffies;
1242         ulong delta = now - stats->rx_jiffies;
1243         u64 pkts;
1244         unsigned int start, eqd;
1245
1246         if (!eqo->enable_aic) {
1247                 eqd = eqo->eqd;
1248                 goto modify_eqd;
1249         }
1250
1251         if (eqo->idx >= adapter->num_rx_qs)
1252                 return;
1253
1254         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1255
1256         /* Wrapped around */
1257         if (time_before(now, stats->rx_jiffies)) {
1258                 stats->rx_jiffies = now;
1259                 return;
1260         }
1261
1262         /* Update once a second */
1263         if (delta < HZ)
1264                 return;
1265
1266         do {
1267                 start = u64_stats_fetch_begin_bh(&stats->sync);
1268                 pkts = stats->rx_pkts;
1269         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1270
1271         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1272         stats->rx_pkts_prev = pkts;
1273         stats->rx_jiffies = now;
1274         eqd = (stats->rx_pps / 110000) << 3;
1275         eqd = min(eqd, eqo->max_eqd);
1276         eqd = max(eqd, eqo->min_eqd);
1277         if (eqd < 10)
1278                 eqd = 0;
1279
1280 modify_eqd:
1281         if (eqd != eqo->cur_eqd) {
1282                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1283                 eqo->cur_eqd = eqd;
1284         }
1285 }
1286
1287 static void be_rx_stats_update(struct be_rx_obj *rxo,
1288                 struct be_rx_compl_info *rxcp)
1289 {
1290         struct be_rx_stats *stats = rx_stats(rxo);
1291
1292         u64_stats_update_begin(&stats->sync);
1293         stats->rx_compl++;
1294         stats->rx_bytes += rxcp->pkt_size;
1295         stats->rx_pkts++;
1296         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1297                 stats->rx_mcast_pkts++;
1298         if (rxcp->err)
1299                 stats->rx_compl_err++;
1300         u64_stats_update_end(&stats->sync);
1301 }
1302
1303 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1304 {
1305         /* L4 checksum is not reliable for non TCP/UDP packets.
1306          * Also ignore ipcksm for ipv6 pkts */
1307         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1308                                 (rxcp->ip_csum || rxcp->ipv6);
1309 }
1310
1311 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1312                                                 u16 frag_idx)
1313 {
1314         struct be_adapter *adapter = rxo->adapter;
1315         struct be_rx_page_info *rx_page_info;
1316         struct be_queue_info *rxq = &rxo->q;
1317
1318         rx_page_info = &rxo->page_info_tbl[frag_idx];
1319         BUG_ON(!rx_page_info->page);
1320
1321         if (rx_page_info->last_page_user) {
1322                 dma_unmap_page(&adapter->pdev->dev,
1323                                dma_unmap_addr(rx_page_info, bus),
1324                                adapter->big_page_size, DMA_FROM_DEVICE);
1325                 rx_page_info->last_page_user = false;
1326         }
1327
1328         atomic_dec(&rxq->used);
1329         return rx_page_info;
1330 }
1331
1332 /* Throwaway the data in the Rx completion */
1333 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1334                                 struct be_rx_compl_info *rxcp)
1335 {
1336         struct be_queue_info *rxq = &rxo->q;
1337         struct be_rx_page_info *page_info;
1338         u16 i, num_rcvd = rxcp->num_rcvd;
1339
1340         for (i = 0; i < num_rcvd; i++) {
1341                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1342                 put_page(page_info->page);
1343                 memset(page_info, 0, sizeof(*page_info));
1344                 index_inc(&rxcp->rxq_idx, rxq->len);
1345         }
1346 }
1347
1348 /*
1349  * skb_fill_rx_data forms a complete skb for an ether frame
1350  * indicated by rxcp.
1351  */
1352 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1353                              struct be_rx_compl_info *rxcp)
1354 {
1355         struct be_queue_info *rxq = &rxo->q;
1356         struct be_rx_page_info *page_info;
1357         u16 i, j;
1358         u16 hdr_len, curr_frag_len, remaining;
1359         u8 *start;
1360
1361         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1362         start = page_address(page_info->page) + page_info->page_offset;
1363         prefetch(start);
1364
1365         /* Copy data in the first descriptor of this completion */
1366         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1367
1368         skb->len = curr_frag_len;
1369         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1370                 memcpy(skb->data, start, curr_frag_len);
1371                 /* Complete packet has now been moved to data */
1372                 put_page(page_info->page);
1373                 skb->data_len = 0;
1374                 skb->tail += curr_frag_len;
1375         } else {
1376                 hdr_len = ETH_HLEN;
1377                 memcpy(skb->data, start, hdr_len);
1378                 skb_shinfo(skb)->nr_frags = 1;
1379                 skb_frag_set_page(skb, 0, page_info->page);
1380                 skb_shinfo(skb)->frags[0].page_offset =
1381                                         page_info->page_offset + hdr_len;
1382                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1383                 skb->data_len = curr_frag_len - hdr_len;
1384                 skb->truesize += rx_frag_size;
1385                 skb->tail += hdr_len;
1386         }
1387         page_info->page = NULL;
1388
1389         if (rxcp->pkt_size <= rx_frag_size) {
1390                 BUG_ON(rxcp->num_rcvd != 1);
1391                 return;
1392         }
1393
1394         /* More frags present for this completion */
1395         index_inc(&rxcp->rxq_idx, rxq->len);
1396         remaining = rxcp->pkt_size - curr_frag_len;
1397         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1398                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1399                 curr_frag_len = min(remaining, rx_frag_size);
1400
1401                 /* Coalesce all frags from the same physical page in one slot */
1402                 if (page_info->page_offset == 0) {
1403                         /* Fresh page */
1404                         j++;
1405                         skb_frag_set_page(skb, j, page_info->page);
1406                         skb_shinfo(skb)->frags[j].page_offset =
1407                                                         page_info->page_offset;
1408                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1409                         skb_shinfo(skb)->nr_frags++;
1410                 } else {
1411                         put_page(page_info->page);
1412                 }
1413
1414                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1415                 skb->len += curr_frag_len;
1416                 skb->data_len += curr_frag_len;
1417                 skb->truesize += rx_frag_size;
1418                 remaining -= curr_frag_len;
1419                 index_inc(&rxcp->rxq_idx, rxq->len);
1420                 page_info->page = NULL;
1421         }
1422         BUG_ON(j > MAX_SKB_FRAGS);
1423 }
1424
1425 /* Process the RX completion indicated by rxcp when GRO is disabled */
1426 static void be_rx_compl_process(struct be_rx_obj *rxo,
1427                                 struct be_rx_compl_info *rxcp)
1428 {
1429         struct be_adapter *adapter = rxo->adapter;
1430         struct net_device *netdev = adapter->netdev;
1431         struct sk_buff *skb;
1432
1433         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1434         if (unlikely(!skb)) {
1435                 rx_stats(rxo)->rx_drops_no_skbs++;
1436                 be_rx_compl_discard(rxo, rxcp);
1437                 return;
1438         }
1439
1440         skb_fill_rx_data(rxo, skb, rxcp);
1441
1442         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1443                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1444         else
1445                 skb_checksum_none_assert(skb);
1446
1447         skb->protocol = eth_type_trans(skb, netdev);
1448         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1449         if (netdev->features & NETIF_F_RXHASH)
1450                 skb->rxhash = rxcp->rss_hash;
1451
1452
1453         if (rxcp->vlanf)
1454                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1455
1456         netif_receive_skb(skb);
1457 }
1458
1459 /* Process the RX completion indicated by rxcp when GRO is enabled */
1460 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1461                              struct be_rx_compl_info *rxcp)
1462 {
1463         struct be_adapter *adapter = rxo->adapter;
1464         struct be_rx_page_info *page_info;
1465         struct sk_buff *skb = NULL;
1466         struct be_queue_info *rxq = &rxo->q;
1467         u16 remaining, curr_frag_len;
1468         u16 i, j;
1469
1470         skb = napi_get_frags(napi);
1471         if (!skb) {
1472                 be_rx_compl_discard(rxo, rxcp);
1473                 return;
1474         }
1475
1476         remaining = rxcp->pkt_size;
1477         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1478                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1479
1480                 curr_frag_len = min(remaining, rx_frag_size);
1481
1482                 /* Coalesce all frags from the same physical page in one slot */
1483                 if (i == 0 || page_info->page_offset == 0) {
1484                         /* First frag or Fresh page */
1485                         j++;
1486                         skb_frag_set_page(skb, j, page_info->page);
1487                         skb_shinfo(skb)->frags[j].page_offset =
1488                                                         page_info->page_offset;
1489                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1490                 } else {
1491                         put_page(page_info->page);
1492                 }
1493                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1494                 skb->truesize += rx_frag_size;
1495                 remaining -= curr_frag_len;
1496                 index_inc(&rxcp->rxq_idx, rxq->len);
1497                 memset(page_info, 0, sizeof(*page_info));
1498         }
1499         BUG_ON(j > MAX_SKB_FRAGS);
1500
1501         skb_shinfo(skb)->nr_frags = j + 1;
1502         skb->len = rxcp->pkt_size;
1503         skb->data_len = rxcp->pkt_size;
1504         skb->ip_summed = CHECKSUM_UNNECESSARY;
1505         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1506         if (adapter->netdev->features & NETIF_F_RXHASH)
1507                 skb->rxhash = rxcp->rss_hash;
1508
1509         if (rxcp->vlanf)
1510                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1511
1512         napi_gro_frags(napi);
1513 }
1514
1515 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1516                                  struct be_rx_compl_info *rxcp)
1517 {
1518         rxcp->pkt_size =
1519                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1520         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1521         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1522         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1523         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1524         rxcp->ip_csum =
1525                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1526         rxcp->l4_csum =
1527                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1528         rxcp->ipv6 =
1529                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1530         rxcp->rxq_idx =
1531                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1532         rxcp->num_rcvd =
1533                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1534         rxcp->pkt_type =
1535                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1536         rxcp->rss_hash =
1537                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1538         if (rxcp->vlanf) {
1539                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1540                                           compl);
1541                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1542                                                compl);
1543         }
1544         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1545 }
1546
1547 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1548                                  struct be_rx_compl_info *rxcp)
1549 {
1550         rxcp->pkt_size =
1551                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1552         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1553         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1554         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1555         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1556         rxcp->ip_csum =
1557                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1558         rxcp->l4_csum =
1559                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1560         rxcp->ipv6 =
1561                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1562         rxcp->rxq_idx =
1563                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1564         rxcp->num_rcvd =
1565                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1566         rxcp->pkt_type =
1567                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1568         rxcp->rss_hash =
1569                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1570         if (rxcp->vlanf) {
1571                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1572                                           compl);
1573                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1574                                                compl);
1575         }
1576         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1577 }
1578
1579 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1580 {
1581         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1582         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1583         struct be_adapter *adapter = rxo->adapter;
1584
1585         /* For checking the valid bit it is Ok to use either definition as the
1586          * valid bit is at the same position in both v0 and v1 Rx compl */
1587         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1588                 return NULL;
1589
1590         rmb();
1591         be_dws_le_to_cpu(compl, sizeof(*compl));
1592
1593         if (adapter->be3_native)
1594                 be_parse_rx_compl_v1(compl, rxcp);
1595         else
1596                 be_parse_rx_compl_v0(compl, rxcp);
1597
1598         if (rxcp->vlanf) {
1599                 /* vlanf could be wrongly set in some cards.
1600                  * ignore if vtm is not set */
1601                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1602                         rxcp->vlanf = 0;
1603
1604                 if (!lancer_chip(adapter))
1605                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1606
1607                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1608                     !adapter->vlan_tag[rxcp->vlan_tag])
1609                         rxcp->vlanf = 0;
1610         }
1611
1612         /* As the compl has been parsed, reset it; we wont touch it again */
1613         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1614
1615         queue_tail_inc(&rxo->cq);
1616         return rxcp;
1617 }
1618
1619 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1620 {
1621         u32 order = get_order(size);
1622
1623         if (order > 0)
1624                 gfp |= __GFP_COMP;
1625         return  alloc_pages(gfp, order);
1626 }
1627
1628 /*
1629  * Allocate a page, split it to fragments of size rx_frag_size and post as
1630  * receive buffers to BE
1631  */
1632 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1633 {
1634         struct be_adapter *adapter = rxo->adapter;
1635         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1636         struct be_queue_info *rxq = &rxo->q;
1637         struct page *pagep = NULL;
1638         struct be_eth_rx_d *rxd;
1639         u64 page_dmaaddr = 0, frag_dmaaddr;
1640         u32 posted, page_offset = 0;
1641
1642         page_info = &rxo->page_info_tbl[rxq->head];
1643         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1644                 if (!pagep) {
1645                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1646                         if (unlikely(!pagep)) {
1647                                 rx_stats(rxo)->rx_post_fail++;
1648                                 break;
1649                         }
1650                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1651                                                     0, adapter->big_page_size,
1652                                                     DMA_FROM_DEVICE);
1653                         page_info->page_offset = 0;
1654                 } else {
1655                         get_page(pagep);
1656                         page_info->page_offset = page_offset + rx_frag_size;
1657                 }
1658                 page_offset = page_info->page_offset;
1659                 page_info->page = pagep;
1660                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1661                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1662
1663                 rxd = queue_head_node(rxq);
1664                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1665                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1666
1667                 /* Any space left in the current big page for another frag? */
1668                 if ((page_offset + rx_frag_size + rx_frag_size) >
1669                                         adapter->big_page_size) {
1670                         pagep = NULL;
1671                         page_info->last_page_user = true;
1672                 }
1673
1674                 prev_page_info = page_info;
1675                 queue_head_inc(rxq);
1676                 page_info = &rxo->page_info_tbl[rxq->head];
1677         }
1678         if (pagep)
1679                 prev_page_info->last_page_user = true;
1680
1681         if (posted) {
1682                 atomic_add(posted, &rxq->used);
1683                 be_rxq_notify(adapter, rxq->id, posted);
1684         } else if (atomic_read(&rxq->used) == 0) {
1685                 /* Let be_worker replenish when memory is available */
1686                 rxo->rx_post_starved = true;
1687         }
1688 }
1689
1690 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1691 {
1692         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1693
1694         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1695                 return NULL;
1696
1697         rmb();
1698         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1699
1700         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1701
1702         queue_tail_inc(tx_cq);
1703         return txcp;
1704 }
1705
1706 static u16 be_tx_compl_process(struct be_adapter *adapter,
1707                 struct be_tx_obj *txo, u16 last_index)
1708 {
1709         struct be_queue_info *txq = &txo->q;
1710         struct be_eth_wrb *wrb;
1711         struct sk_buff **sent_skbs = txo->sent_skb_list;
1712         struct sk_buff *sent_skb;
1713         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1714         bool unmap_skb_hdr = true;
1715
1716         sent_skb = sent_skbs[txq->tail];
1717         BUG_ON(!sent_skb);
1718         sent_skbs[txq->tail] = NULL;
1719
1720         /* skip header wrb */
1721         queue_tail_inc(txq);
1722
1723         do {
1724                 cur_index = txq->tail;
1725                 wrb = queue_tail_node(txq);
1726                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1727                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1728                 unmap_skb_hdr = false;
1729
1730                 num_wrbs++;
1731                 queue_tail_inc(txq);
1732         } while (cur_index != last_index);
1733
1734         kfree_skb(sent_skb);
1735         return num_wrbs;
1736 }
1737
1738 /* Return the number of events in the event queue */
1739 static inline int events_get(struct be_eq_obj *eqo)
1740 {
1741         struct be_eq_entry *eqe;
1742         int num = 0;
1743
1744         do {
1745                 eqe = queue_tail_node(&eqo->q);
1746                 if (eqe->evt == 0)
1747                         break;
1748
1749                 rmb();
1750                 eqe->evt = 0;
1751                 num++;
1752                 queue_tail_inc(&eqo->q);
1753         } while (true);
1754
1755         return num;
1756 }
1757
1758 /* Leaves the EQ is disarmed state */
1759 static void be_eq_clean(struct be_eq_obj *eqo)
1760 {
1761         int num = events_get(eqo);
1762
1763         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1764 }
1765
1766 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1767 {
1768         struct be_rx_page_info *page_info;
1769         struct be_queue_info *rxq = &rxo->q;
1770         struct be_queue_info *rx_cq = &rxo->cq;
1771         struct be_rx_compl_info *rxcp;
1772         struct be_adapter *adapter = rxo->adapter;
1773         int flush_wait = 0;
1774         u16 tail;
1775
1776         /* Consume pending rx completions.
1777          * Wait for the flush completion (identified by zero num_rcvd)
1778          * to arrive. Notify CQ even when there are no more CQ entries
1779          * for HW to flush partially coalesced CQ entries.
1780          * In Lancer, there is no need to wait for flush compl.
1781          */
1782         for (;;) {
1783                 rxcp = be_rx_compl_get(rxo);
1784                 if (rxcp == NULL) {
1785                         if (lancer_chip(adapter))
1786                                 break;
1787
1788                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1789                                 dev_warn(&adapter->pdev->dev,
1790                                          "did not receive flush compl\n");
1791                                 break;
1792                         }
1793                         be_cq_notify(adapter, rx_cq->id, true, 0);
1794                         mdelay(1);
1795                 } else {
1796                         be_rx_compl_discard(rxo, rxcp);
1797                         be_cq_notify(adapter, rx_cq->id, true, 1);
1798                         if (rxcp->num_rcvd == 0)
1799                                 break;
1800                 }
1801         }
1802
1803         /* After cleanup, leave the CQ in unarmed state */
1804         be_cq_notify(adapter, rx_cq->id, false, 0);
1805
1806         /* Then free posted rx buffers that were not used */
1807         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1808         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1809                 page_info = get_rx_page_info(rxo, tail);
1810                 put_page(page_info->page);
1811                 memset(page_info, 0, sizeof(*page_info));
1812         }
1813         BUG_ON(atomic_read(&rxq->used));
1814         rxq->tail = rxq->head = 0;
1815 }
1816
1817 static void be_tx_compl_clean(struct be_adapter *adapter)
1818 {
1819         struct be_tx_obj *txo;
1820         struct be_queue_info *txq;
1821         struct be_eth_tx_compl *txcp;
1822         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1823         struct sk_buff *sent_skb;
1824         bool dummy_wrb;
1825         int i, pending_txqs;
1826
1827         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1828         do {
1829                 pending_txqs = adapter->num_tx_qs;
1830
1831                 for_all_tx_queues(adapter, txo, i) {
1832                         txq = &txo->q;
1833                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1834                                 end_idx =
1835                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1836                                                       wrb_index, txcp);
1837                                 num_wrbs += be_tx_compl_process(adapter, txo,
1838                                                                 end_idx);
1839                                 cmpl++;
1840                         }
1841                         if (cmpl) {
1842                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1843                                 atomic_sub(num_wrbs, &txq->used);
1844                                 cmpl = 0;
1845                                 num_wrbs = 0;
1846                         }
1847                         if (atomic_read(&txq->used) == 0)
1848                                 pending_txqs--;
1849                 }
1850
1851                 if (pending_txqs == 0 || ++timeo > 200)
1852                         break;
1853
1854                 mdelay(1);
1855         } while (true);
1856
1857         for_all_tx_queues(adapter, txo, i) {
1858                 txq = &txo->q;
1859                 if (atomic_read(&txq->used))
1860                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1861                                 atomic_read(&txq->used));
1862
1863                 /* free posted tx for which compls will never arrive */
1864                 while (atomic_read(&txq->used)) {
1865                         sent_skb = txo->sent_skb_list[txq->tail];
1866                         end_idx = txq->tail;
1867                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1868                                                    &dummy_wrb);
1869                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1870                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1871                         atomic_sub(num_wrbs, &txq->used);
1872                 }
1873         }
1874 }
1875
1876 static void be_evt_queues_destroy(struct be_adapter *adapter)
1877 {
1878         struct be_eq_obj *eqo;
1879         int i;
1880
1881         for_all_evt_queues(adapter, eqo, i) {
1882                 if (eqo->q.created) {
1883                         be_eq_clean(eqo);
1884                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1885                 }
1886                 be_queue_free(adapter, &eqo->q);
1887         }
1888 }
1889
1890 static int be_evt_queues_create(struct be_adapter *adapter)
1891 {
1892         struct be_queue_info *eq;
1893         struct be_eq_obj *eqo;
1894         int i, rc;
1895
1896         adapter->num_evt_qs = num_irqs(adapter);
1897
1898         for_all_evt_queues(adapter, eqo, i) {
1899                 eqo->adapter = adapter;
1900                 eqo->tx_budget = BE_TX_BUDGET;
1901                 eqo->idx = i;
1902                 eqo->max_eqd = BE_MAX_EQD;
1903                 eqo->enable_aic = true;
1904
1905                 eq = &eqo->q;
1906                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1907                                         sizeof(struct be_eq_entry));
1908                 if (rc)
1909                         return rc;
1910
1911                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1912                 if (rc)
1913                         return rc;
1914         }
1915         return 0;
1916 }
1917
1918 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1919 {
1920         struct be_queue_info *q;
1921
1922         q = &adapter->mcc_obj.q;
1923         if (q->created)
1924                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1925         be_queue_free(adapter, q);
1926
1927         q = &adapter->mcc_obj.cq;
1928         if (q->created)
1929                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1930         be_queue_free(adapter, q);
1931 }
1932
1933 /* Must be called only after TX qs are created as MCC shares TX EQ */
1934 static int be_mcc_queues_create(struct be_adapter *adapter)
1935 {
1936         struct be_queue_info *q, *cq;
1937
1938         cq = &adapter->mcc_obj.cq;
1939         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1940                         sizeof(struct be_mcc_compl)))
1941                 goto err;
1942
1943         /* Use the default EQ for MCC completions */
1944         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1945                 goto mcc_cq_free;
1946
1947         q = &adapter->mcc_obj.q;
1948         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1949                 goto mcc_cq_destroy;
1950
1951         if (be_cmd_mccq_create(adapter, q, cq))
1952                 goto mcc_q_free;
1953
1954         return 0;
1955
1956 mcc_q_free:
1957         be_queue_free(adapter, q);
1958 mcc_cq_destroy:
1959         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1960 mcc_cq_free:
1961         be_queue_free(adapter, cq);
1962 err:
1963         return -1;
1964 }
1965
1966 static void be_tx_queues_destroy(struct be_adapter *adapter)
1967 {
1968         struct be_queue_info *q;
1969         struct be_tx_obj *txo;
1970         u8 i;
1971
1972         for_all_tx_queues(adapter, txo, i) {
1973                 q = &txo->q;
1974                 if (q->created)
1975                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1976                 be_queue_free(adapter, q);
1977
1978                 q = &txo->cq;
1979                 if (q->created)
1980                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1981                 be_queue_free(adapter, q);
1982         }
1983 }
1984
1985 static int be_num_txqs_want(struct be_adapter *adapter)
1986 {
1987         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1988             be_is_mc(adapter) ||
1989             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
1990             BE2_chip(adapter))
1991                 return 1;
1992         else
1993                 return adapter->max_tx_queues;
1994 }
1995
1996 static int be_tx_cqs_create(struct be_adapter *adapter)
1997 {
1998         struct be_queue_info *cq, *eq;
1999         int status;
2000         struct be_tx_obj *txo;
2001         u8 i;
2002
2003         adapter->num_tx_qs = be_num_txqs_want(adapter);
2004         if (adapter->num_tx_qs != MAX_TX_QS) {
2005                 rtnl_lock();
2006                 netif_set_real_num_tx_queues(adapter->netdev,
2007                         adapter->num_tx_qs);
2008                 rtnl_unlock();
2009         }
2010
2011         for_all_tx_queues(adapter, txo, i) {
2012                 cq = &txo->cq;
2013                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2014                                         sizeof(struct be_eth_tx_compl));
2015                 if (status)
2016                         return status;
2017
2018                 /* If num_evt_qs is less than num_tx_qs, then more than
2019                  * one txq share an eq
2020                  */
2021                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2022                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2023                 if (status)
2024                         return status;
2025         }
2026         return 0;
2027 }
2028
2029 static int be_tx_qs_create(struct be_adapter *adapter)
2030 {
2031         struct be_tx_obj *txo;
2032         int i, status;
2033
2034         for_all_tx_queues(adapter, txo, i) {
2035                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2036                                         sizeof(struct be_eth_wrb));
2037                 if (status)
2038                         return status;
2039
2040                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2041                 if (status)
2042                         return status;
2043         }
2044
2045         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2046                  adapter->num_tx_qs);
2047         return 0;
2048 }
2049
2050 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2051 {
2052         struct be_queue_info *q;
2053         struct be_rx_obj *rxo;
2054         int i;
2055
2056         for_all_rx_queues(adapter, rxo, i) {
2057                 q = &rxo->cq;
2058                 if (q->created)
2059                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2060                 be_queue_free(adapter, q);
2061         }
2062 }
2063
2064 static int be_rx_cqs_create(struct be_adapter *adapter)
2065 {
2066         struct be_queue_info *eq, *cq;
2067         struct be_rx_obj *rxo;
2068         int rc, i;
2069
2070         /* We'll create as many RSS rings as there are irqs.
2071          * But when there's only one irq there's no use creating RSS rings
2072          */
2073         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2074                                 num_irqs(adapter) + 1 : 1;
2075         if (adapter->num_rx_qs != MAX_RX_QS) {
2076                 rtnl_lock();
2077                 netif_set_real_num_rx_queues(adapter->netdev,
2078                                              adapter->num_rx_qs);
2079                 rtnl_unlock();
2080         }
2081
2082         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2083         for_all_rx_queues(adapter, rxo, i) {
2084                 rxo->adapter = adapter;
2085                 cq = &rxo->cq;
2086                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2087                                 sizeof(struct be_eth_rx_compl));
2088                 if (rc)
2089                         return rc;
2090
2091                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2092                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2093                 if (rc)
2094                         return rc;
2095         }
2096
2097         dev_info(&adapter->pdev->dev,
2098                  "created %d RSS queue(s) and 1 default RX queue\n",
2099                  adapter->num_rx_qs - 1);
2100         return 0;
2101 }
2102
2103 static irqreturn_t be_intx(int irq, void *dev)
2104 {
2105         struct be_eq_obj *eqo = dev;
2106         struct be_adapter *adapter = eqo->adapter;
2107         int num_evts = 0;
2108
2109         /* IRQ is not expected when NAPI is scheduled as the EQ
2110          * will not be armed.
2111          * But, this can happen on Lancer INTx where it takes
2112          * a while to de-assert INTx or in BE2 where occasionaly
2113          * an interrupt may be raised even when EQ is unarmed.
2114          * If NAPI is already scheduled, then counting & notifying
2115          * events will orphan them.
2116          */
2117         if (napi_schedule_prep(&eqo->napi)) {
2118                 num_evts = events_get(eqo);
2119                 __napi_schedule(&eqo->napi);
2120                 if (num_evts)
2121                         eqo->spurious_intr = 0;
2122         }
2123         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2124
2125         /* Return IRQ_HANDLED only for the the first spurious intr
2126          * after a valid intr to stop the kernel from branding
2127          * this irq as a bad one!
2128          */
2129         if (num_evts || eqo->spurious_intr++ == 0)
2130                 return IRQ_HANDLED;
2131         else
2132                 return IRQ_NONE;
2133 }
2134
2135 static irqreturn_t be_msix(int irq, void *dev)
2136 {
2137         struct be_eq_obj *eqo = dev;
2138
2139         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2140         napi_schedule(&eqo->napi);
2141         return IRQ_HANDLED;
2142 }
2143
2144 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2145 {
2146         return (rxcp->tcpf && !rxcp->err) ? true : false;
2147 }
2148
2149 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2150                         int budget)
2151 {
2152         struct be_adapter *adapter = rxo->adapter;
2153         struct be_queue_info *rx_cq = &rxo->cq;
2154         struct be_rx_compl_info *rxcp;
2155         u32 work_done;
2156
2157         for (work_done = 0; work_done < budget; work_done++) {
2158                 rxcp = be_rx_compl_get(rxo);
2159                 if (!rxcp)
2160                         break;
2161
2162                 /* Is it a flush compl that has no data */
2163                 if (unlikely(rxcp->num_rcvd == 0))
2164                         goto loop_continue;
2165
2166                 /* Discard compl with partial DMA Lancer B0 */
2167                 if (unlikely(!rxcp->pkt_size)) {
2168                         be_rx_compl_discard(rxo, rxcp);
2169                         goto loop_continue;
2170                 }
2171
2172                 /* On BE drop pkts that arrive due to imperfect filtering in
2173                  * promiscuous mode on some skews
2174                  */
2175                 if (unlikely(rxcp->port != adapter->port_num &&
2176                                 !lancer_chip(adapter))) {
2177                         be_rx_compl_discard(rxo, rxcp);
2178                         goto loop_continue;
2179                 }
2180
2181                 if (do_gro(rxcp))
2182                         be_rx_compl_process_gro(rxo, napi, rxcp);
2183                 else
2184                         be_rx_compl_process(rxo, rxcp);
2185 loop_continue:
2186                 be_rx_stats_update(rxo, rxcp);
2187         }
2188
2189         if (work_done) {
2190                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2191
2192                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2193                         be_post_rx_frags(rxo, GFP_ATOMIC);
2194         }
2195
2196         return work_done;
2197 }
2198
2199 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2200                           int budget, int idx)
2201 {
2202         struct be_eth_tx_compl *txcp;
2203         int num_wrbs = 0, work_done;
2204
2205         for (work_done = 0; work_done < budget; work_done++) {
2206                 txcp = be_tx_compl_get(&txo->cq);
2207                 if (!txcp)
2208                         break;
2209                 num_wrbs += be_tx_compl_process(adapter, txo,
2210                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2211                                         wrb_index, txcp));
2212         }
2213
2214         if (work_done) {
2215                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2216                 atomic_sub(num_wrbs, &txo->q.used);
2217
2218                 /* As Tx wrbs have been freed up, wake up netdev queue
2219                  * if it was stopped due to lack of tx wrbs.  */
2220                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2221                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2222                         netif_wake_subqueue(adapter->netdev, idx);
2223                 }
2224
2225                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2226                 tx_stats(txo)->tx_compl += work_done;
2227                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2228         }
2229         return (work_done < budget); /* Done */
2230 }
2231
2232 int be_poll(struct napi_struct *napi, int budget)
2233 {
2234         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2235         struct be_adapter *adapter = eqo->adapter;
2236         int max_work = 0, work, i, num_evts;
2237         bool tx_done;
2238
2239         num_evts = events_get(eqo);
2240
2241         /* Process all TXQs serviced by this EQ */
2242         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2243                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2244                                         eqo->tx_budget, i);
2245                 if (!tx_done)
2246                         max_work = budget;
2247         }
2248
2249         /* This loop will iterate twice for EQ0 in which
2250          * completions of the last RXQ (default one) are also processed
2251          * For other EQs the loop iterates only once
2252          */
2253         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2254                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2255                 max_work = max(work, max_work);
2256         }
2257
2258         if (is_mcc_eqo(eqo))
2259                 be_process_mcc(adapter);
2260
2261         if (max_work < budget) {
2262                 napi_complete(napi);
2263                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2264         } else {
2265                 /* As we'll continue in polling mode, count and clear events */
2266                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2267         }
2268         return max_work;
2269 }
2270
2271 void be_detect_error(struct be_adapter *adapter)
2272 {
2273         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2274         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2275         u32 i;
2276
2277         if (be_hw_error(adapter))
2278                 return;
2279
2280         if (lancer_chip(adapter)) {
2281                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2282                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2283                         sliport_err1 = ioread32(adapter->db +
2284                                         SLIPORT_ERROR1_OFFSET);
2285                         sliport_err2 = ioread32(adapter->db +
2286                                         SLIPORT_ERROR2_OFFSET);
2287                 }
2288         } else {
2289                 pci_read_config_dword(adapter->pdev,
2290                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2291                 pci_read_config_dword(adapter->pdev,
2292                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2293                 pci_read_config_dword(adapter->pdev,
2294                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2295                 pci_read_config_dword(adapter->pdev,
2296                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2297
2298                 ue_lo = (ue_lo & ~ue_lo_mask);
2299                 ue_hi = (ue_hi & ~ue_hi_mask);
2300         }
2301
2302         /* On certain platforms BE hardware can indicate spurious UEs.
2303          * Allow the h/w to stop working completely in case of a real UE.
2304          * Hence not setting the hw_error for UE detection.
2305          */
2306         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2307                 adapter->hw_error = true;
2308                 dev_err(&adapter->pdev->dev,
2309                         "Error detected in the card\n");
2310         }
2311
2312         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2313                 dev_err(&adapter->pdev->dev,
2314                         "ERR: sliport status 0x%x\n", sliport_status);
2315                 dev_err(&adapter->pdev->dev,
2316                         "ERR: sliport error1 0x%x\n", sliport_err1);
2317                 dev_err(&adapter->pdev->dev,
2318                         "ERR: sliport error2 0x%x\n", sliport_err2);
2319         }
2320
2321         if (ue_lo) {
2322                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2323                         if (ue_lo & 1)
2324                                 dev_err(&adapter->pdev->dev,
2325                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2326                 }
2327         }
2328
2329         if (ue_hi) {
2330                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2331                         if (ue_hi & 1)
2332                                 dev_err(&adapter->pdev->dev,
2333                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2334                 }
2335         }
2336
2337 }
2338
2339 static void be_msix_disable(struct be_adapter *adapter)
2340 {
2341         if (msix_enabled(adapter)) {
2342                 pci_disable_msix(adapter->pdev);
2343                 adapter->num_msix_vec = 0;
2344         }
2345 }
2346
2347 static uint be_num_rss_want(struct be_adapter *adapter)
2348 {
2349         u32 num = 0;
2350
2351         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2352             (lancer_chip(adapter) ||
2353              (!sriov_want(adapter) && be_physfn(adapter)))) {
2354                 num = adapter->max_rss_queues;
2355                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2356         }
2357         return num;
2358 }
2359
2360 static void be_msix_enable(struct be_adapter *adapter)
2361 {
2362 #define BE_MIN_MSIX_VECTORS             1
2363         int i, status, num_vec, num_roce_vec = 0;
2364         struct device *dev = &adapter->pdev->dev;
2365
2366         /* If RSS queues are not used, need a vec for default RX Q */
2367         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2368         if (be_roce_supported(adapter)) {
2369                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2370                                         (num_online_cpus() + 1));
2371                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2372                 num_vec += num_roce_vec;
2373                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2374         }
2375         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2376
2377         for (i = 0; i < num_vec; i++)
2378                 adapter->msix_entries[i].entry = i;
2379
2380         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2381         if (status == 0) {
2382                 goto done;
2383         } else if (status >= BE_MIN_MSIX_VECTORS) {
2384                 num_vec = status;
2385                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2386                                 num_vec) == 0)
2387                         goto done;
2388         }
2389
2390         dev_warn(dev, "MSIx enable failed\n");
2391         return;
2392 done:
2393         if (be_roce_supported(adapter)) {
2394                 if (num_vec > num_roce_vec) {
2395                         adapter->num_msix_vec = num_vec - num_roce_vec;
2396                         adapter->num_msix_roce_vec =
2397                                 num_vec - adapter->num_msix_vec;
2398                 } else {
2399                         adapter->num_msix_vec = num_vec;
2400                         adapter->num_msix_roce_vec = 0;
2401                 }
2402         } else
2403                 adapter->num_msix_vec = num_vec;
2404         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2405         return;
2406 }
2407
2408 static inline int be_msix_vec_get(struct be_adapter *adapter,
2409                                 struct be_eq_obj *eqo)
2410 {
2411         return adapter->msix_entries[eqo->idx].vector;
2412 }
2413
2414 static int be_msix_register(struct be_adapter *adapter)
2415 {
2416         struct net_device *netdev = adapter->netdev;
2417         struct be_eq_obj *eqo;
2418         int status, i, vec;
2419
2420         for_all_evt_queues(adapter, eqo, i) {
2421                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2422                 vec = be_msix_vec_get(adapter, eqo);
2423                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2424                 if (status)
2425                         goto err_msix;
2426         }
2427
2428         return 0;
2429 err_msix:
2430         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2431                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2432         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2433                 status);
2434         be_msix_disable(adapter);
2435         return status;
2436 }
2437
2438 static int be_irq_register(struct be_adapter *adapter)
2439 {
2440         struct net_device *netdev = adapter->netdev;
2441         int status;
2442
2443         if (msix_enabled(adapter)) {
2444                 status = be_msix_register(adapter);
2445                 if (status == 0)
2446                         goto done;
2447                 /* INTx is not supported for VF */
2448                 if (!be_physfn(adapter))
2449                         return status;
2450         }
2451
2452         /* INTx: only the first EQ is used */
2453         netdev->irq = adapter->pdev->irq;
2454         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2455                              &adapter->eq_obj[0]);
2456         if (status) {
2457                 dev_err(&adapter->pdev->dev,
2458                         "INTx request IRQ failed - err %d\n", status);
2459                 return status;
2460         }
2461 done:
2462         adapter->isr_registered = true;
2463         return 0;
2464 }
2465
2466 static void be_irq_unregister(struct be_adapter *adapter)
2467 {
2468         struct net_device *netdev = adapter->netdev;
2469         struct be_eq_obj *eqo;
2470         int i;
2471
2472         if (!adapter->isr_registered)
2473                 return;
2474
2475         /* INTx */
2476         if (!msix_enabled(adapter)) {
2477                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2478                 goto done;
2479         }
2480
2481         /* MSIx */
2482         for_all_evt_queues(adapter, eqo, i)
2483                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2484
2485 done:
2486         adapter->isr_registered = false;
2487 }
2488
2489 static void be_rx_qs_destroy(struct be_adapter *adapter)
2490 {
2491         struct be_queue_info *q;
2492         struct be_rx_obj *rxo;
2493         int i;
2494
2495         for_all_rx_queues(adapter, rxo, i) {
2496                 q = &rxo->q;
2497                 if (q->created) {
2498                         be_cmd_rxq_destroy(adapter, q);
2499                         /* After the rxq is invalidated, wait for a grace time
2500                          * of 1ms for all dma to end and the flush compl to
2501                          * arrive
2502                          */
2503                         mdelay(1);
2504                         be_rx_cq_clean(rxo);
2505                 }
2506                 be_queue_free(adapter, q);
2507         }
2508 }
2509
2510 static int be_close(struct net_device *netdev)
2511 {
2512         struct be_adapter *adapter = netdev_priv(netdev);
2513         struct be_eq_obj *eqo;
2514         int i;
2515
2516         be_roce_dev_close(adapter);
2517
2518         if (!lancer_chip(adapter))
2519                 be_intr_set(adapter, false);
2520
2521         for_all_evt_queues(adapter, eqo, i)
2522                 napi_disable(&eqo->napi);
2523
2524         be_async_mcc_disable(adapter);
2525
2526         /* Wait for all pending tx completions to arrive so that
2527          * all tx skbs are freed.
2528          */
2529         be_tx_compl_clean(adapter);
2530
2531         be_rx_qs_destroy(adapter);
2532
2533         for_all_evt_queues(adapter, eqo, i) {
2534                 if (msix_enabled(adapter))
2535                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2536                 else
2537                         synchronize_irq(netdev->irq);
2538                 be_eq_clean(eqo);
2539         }
2540
2541         be_irq_unregister(adapter);
2542
2543         return 0;
2544 }
2545
2546 static int be_rx_qs_create(struct be_adapter *adapter)
2547 {
2548         struct be_rx_obj *rxo;
2549         int rc, i, j;
2550         u8 rsstable[128];
2551
2552         for_all_rx_queues(adapter, rxo, i) {
2553                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2554                                     sizeof(struct be_eth_rx_d));
2555                 if (rc)
2556                         return rc;
2557         }
2558
2559         /* The FW would like the default RXQ to be created first */
2560         rxo = default_rxo(adapter);
2561         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2562                                adapter->if_handle, false, &rxo->rss_id);
2563         if (rc)
2564                 return rc;
2565
2566         for_all_rss_queues(adapter, rxo, i) {
2567                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2568                                        rx_frag_size, adapter->if_handle,
2569                                        true, &rxo->rss_id);
2570                 if (rc)
2571                         return rc;
2572         }
2573
2574         if (be_multi_rxq(adapter)) {
2575                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2576                         for_all_rss_queues(adapter, rxo, i) {
2577                                 if ((j + i) >= 128)
2578                                         break;
2579                                 rsstable[j + i] = rxo->rss_id;
2580                         }
2581                 }
2582                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2583                 if (rc)
2584                         return rc;
2585         }
2586
2587         /* First time posting */
2588         for_all_rx_queues(adapter, rxo, i)
2589                 be_post_rx_frags(rxo, GFP_KERNEL);
2590         return 0;
2591 }
2592
2593 static int be_open(struct net_device *netdev)
2594 {
2595         struct be_adapter *adapter = netdev_priv(netdev);
2596         struct be_eq_obj *eqo;
2597         struct be_rx_obj *rxo;
2598         struct be_tx_obj *txo;
2599         u8 link_status;
2600         int status, i;
2601
2602         status = be_rx_qs_create(adapter);
2603         if (status)
2604                 goto err;
2605
2606         be_irq_register(adapter);
2607
2608         if (!lancer_chip(adapter))
2609                 be_intr_set(adapter, true);
2610
2611         for_all_rx_queues(adapter, rxo, i)
2612                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2613
2614         for_all_tx_queues(adapter, txo, i)
2615                 be_cq_notify(adapter, txo->cq.id, true, 0);
2616
2617         be_async_mcc_enable(adapter);
2618
2619         for_all_evt_queues(adapter, eqo, i) {
2620                 napi_enable(&eqo->napi);
2621                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2622         }
2623
2624         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2625         if (!status)
2626                 be_link_status_update(adapter, link_status);
2627
2628         be_roce_dev_open(adapter);
2629         return 0;
2630 err:
2631         be_close(adapter->netdev);
2632         return -EIO;
2633 }
2634
2635 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2636 {
2637         struct be_dma_mem cmd;
2638         int status = 0;
2639         u8 mac[ETH_ALEN];
2640
2641         memset(mac, 0, ETH_ALEN);
2642
2643         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2644         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2645                                     GFP_KERNEL);
2646         if (cmd.va == NULL)
2647                 return -1;
2648         memset(cmd.va, 0, cmd.size);
2649
2650         if (enable) {
2651                 status = pci_write_config_dword(adapter->pdev,
2652                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2653                 if (status) {
2654                         dev_err(&adapter->pdev->dev,
2655                                 "Could not enable Wake-on-lan\n");
2656                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2657                                           cmd.dma);
2658                         return status;
2659                 }
2660                 status = be_cmd_enable_magic_wol(adapter,
2661                                 adapter->netdev->dev_addr, &cmd);
2662                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2663                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2664         } else {
2665                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2666                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2667                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2668         }
2669
2670         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2671         return status;
2672 }
2673
2674 /*
2675  * Generate a seed MAC address from the PF MAC Address using jhash.
2676  * MAC Address for VFs are assigned incrementally starting from the seed.
2677  * These addresses are programmed in the ASIC by the PF and the VF driver
2678  * queries for the MAC address during its probe.
2679  */
2680 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2681 {
2682         u32 vf;
2683         int status = 0;
2684         u8 mac[ETH_ALEN];
2685         struct be_vf_cfg *vf_cfg;
2686
2687         be_vf_eth_addr_generate(adapter, mac);
2688
2689         for_all_vfs(adapter, vf_cfg, vf) {
2690                 if (lancer_chip(adapter)) {
2691                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2692                 } else {
2693                         status = be_cmd_pmac_add(adapter, mac,
2694                                                  vf_cfg->if_handle,
2695                                                  &vf_cfg->pmac_id, vf + 1);
2696                 }
2697
2698                 if (status)
2699                         dev_err(&adapter->pdev->dev,
2700                         "Mac address assignment failed for VF %d\n", vf);
2701                 else
2702                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2703
2704                 mac[5] += 1;
2705         }
2706         return status;
2707 }
2708
2709 static int be_vfs_mac_query(struct be_adapter *adapter)
2710 {
2711         int status, vf;
2712         u8 mac[ETH_ALEN];
2713         struct be_vf_cfg *vf_cfg;
2714         bool active;
2715
2716         for_all_vfs(adapter, vf_cfg, vf) {
2717                 be_cmd_get_mac_from_list(adapter, mac, &active,
2718                                          &vf_cfg->pmac_id, 0);
2719
2720                 status = be_cmd_mac_addr_query(adapter, mac, false,
2721                                                vf_cfg->if_handle, 0);
2722                 if (status)
2723                         return status;
2724                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2725         }
2726         return 0;
2727 }
2728
2729 static void be_vf_clear(struct be_adapter *adapter)
2730 {
2731         struct be_vf_cfg *vf_cfg;
2732         u32 vf;
2733
2734         if (be_find_vfs(adapter, ASSIGNED)) {
2735                 dev_warn(&adapter->pdev->dev,
2736                          "VFs are assigned to VMs: not disabling VFs\n");
2737                 goto done;
2738         }
2739
2740         for_all_vfs(adapter, vf_cfg, vf) {
2741                 if (lancer_chip(adapter))
2742                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2743                 else
2744                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2745                                         vf_cfg->pmac_id, vf + 1);
2746
2747                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2748         }
2749         pci_disable_sriov(adapter->pdev);
2750 done:
2751         kfree(adapter->vf_cfg);
2752         adapter->num_vfs = 0;
2753 }
2754
2755 static int be_clear(struct be_adapter *adapter)
2756 {
2757         int i = 1;
2758
2759         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2760                 cancel_delayed_work_sync(&adapter->work);
2761                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2762         }
2763
2764         if (sriov_enabled(adapter))
2765                 be_vf_clear(adapter);
2766
2767         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2768                 be_cmd_pmac_del(adapter, adapter->if_handle,
2769                         adapter->pmac_id[i], 0);
2770
2771         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2772
2773         be_mcc_queues_destroy(adapter);
2774         be_rx_cqs_destroy(adapter);
2775         be_tx_queues_destroy(adapter);
2776         be_evt_queues_destroy(adapter);
2777
2778         kfree(adapter->pmac_id);
2779         adapter->pmac_id = NULL;
2780
2781         be_msix_disable(adapter);
2782         return 0;
2783 }
2784
2785 static int be_vfs_if_create(struct be_adapter *adapter)
2786 {
2787         struct be_vf_cfg *vf_cfg;
2788         u32 cap_flags, en_flags, vf;
2789         int status;
2790
2791         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2792                     BE_IF_FLAGS_MULTICAST;
2793
2794         for_all_vfs(adapter, vf_cfg, vf) {
2795                 if (!BE3_chip(adapter))
2796                         be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
2797
2798                 /* If a FW profile exists, then cap_flags are updated */
2799                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2800                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2801                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2802                                           &vf_cfg->if_handle, vf + 1);
2803                 if (status)
2804                         goto err;
2805         }
2806 err:
2807         return status;
2808 }
2809
2810 static int be_vf_setup_init(struct be_adapter *adapter)
2811 {
2812         struct be_vf_cfg *vf_cfg;
2813         int vf;
2814
2815         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2816                                   GFP_KERNEL);
2817         if (!adapter->vf_cfg)
2818                 return -ENOMEM;
2819
2820         for_all_vfs(adapter, vf_cfg, vf) {
2821                 vf_cfg->if_handle = -1;
2822                 vf_cfg->pmac_id = -1;
2823         }
2824         return 0;
2825 }
2826
2827 static int be_vf_setup(struct be_adapter *adapter)
2828 {
2829         struct be_vf_cfg *vf_cfg;
2830         u16 def_vlan, lnk_speed;
2831         int status, old_vfs, vf;
2832         struct device *dev = &adapter->pdev->dev;
2833
2834         old_vfs = be_find_vfs(adapter, ENABLED);
2835         if (old_vfs) {
2836                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2837                 if (old_vfs != num_vfs)
2838                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2839                 adapter->num_vfs = old_vfs;
2840         } else {
2841                 if (num_vfs > adapter->dev_num_vfs)
2842                         dev_info(dev, "Device supports %d VFs and not %d\n",
2843                                  adapter->dev_num_vfs, num_vfs);
2844                 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2845
2846                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2847                 if (status) {
2848                         dev_err(dev, "SRIOV enable failed\n");
2849                         adapter->num_vfs = 0;
2850                         return 0;
2851                 }
2852         }
2853
2854         status = be_vf_setup_init(adapter);
2855         if (status)
2856                 goto err;
2857
2858         if (old_vfs) {
2859                 for_all_vfs(adapter, vf_cfg, vf) {
2860                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2861                         if (status)
2862                                 goto err;
2863                 }
2864         } else {
2865                 status = be_vfs_if_create(adapter);
2866                 if (status)
2867                         goto err;
2868         }
2869
2870         if (old_vfs) {
2871                 status = be_vfs_mac_query(adapter);
2872                 if (status)
2873                         goto err;
2874         } else {
2875                 status = be_vf_eth_addr_config(adapter);
2876                 if (status)
2877                         goto err;
2878         }
2879
2880         for_all_vfs(adapter, vf_cfg, vf) {
2881                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2882                  * Allow full available bandwidth
2883                  */
2884                 if (BE3_chip(adapter) && !old_vfs)
2885                         be_cmd_set_qos(adapter, 1000, vf+1);
2886
2887                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2888                                                   NULL, vf + 1);
2889                 if (!status)
2890                         vf_cfg->tx_rate = lnk_speed;
2891
2892                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2893                                                vf + 1, vf_cfg->if_handle);
2894                 if (status)
2895                         goto err;
2896                 vf_cfg->def_vid = def_vlan;
2897
2898                 be_cmd_enable_vf(adapter, vf + 1);
2899         }
2900         return 0;
2901 err:
2902         dev_err(dev, "VF setup failed\n");
2903         be_vf_clear(adapter);
2904         return status;
2905 }
2906
2907 static void be_setup_init(struct be_adapter *adapter)
2908 {
2909         adapter->vlan_prio_bmap = 0xff;
2910         adapter->phy.link_speed = -1;
2911         adapter->if_handle = -1;
2912         adapter->be3_native = false;
2913         adapter->promiscuous = false;
2914         if (be_physfn(adapter))
2915                 adapter->cmd_privileges = MAX_PRIVILEGES;
2916         else
2917                 adapter->cmd_privileges = MIN_PRIVILEGES;
2918 }
2919
2920 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2921                            bool *active_mac, u32 *pmac_id)
2922 {
2923         int status = 0;
2924
2925         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2926                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2927                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2928                         *active_mac = true;
2929                 else
2930                         *active_mac = false;
2931
2932                 return status;
2933         }
2934
2935         if (lancer_chip(adapter)) {
2936                 status = be_cmd_get_mac_from_list(adapter, mac,
2937                                                   active_mac, pmac_id, 0);
2938                 if (*active_mac) {
2939                         status = be_cmd_mac_addr_query(adapter, mac, false,
2940                                                        if_handle, *pmac_id);
2941                 }
2942         } else if (be_physfn(adapter)) {
2943                 /* For BE3, for PF get permanent MAC */
2944                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2945                 *active_mac = false;
2946         } else {
2947                 /* For BE3, for VF get soft MAC assigned by PF*/
2948                 status = be_cmd_mac_addr_query(adapter, mac, false,
2949                                                if_handle, 0);
2950                 *active_mac = true;
2951         }
2952         return status;
2953 }
2954
2955 static void be_get_resources(struct be_adapter *adapter)
2956 {
2957         u16 dev_num_vfs;
2958         int pos, status;
2959         bool profile_present = false;
2960
2961         if (!BEx_chip(adapter)) {
2962                 status = be_cmd_get_func_config(adapter);
2963                 if (!status)
2964                         profile_present = true;
2965         }
2966
2967         if (profile_present) {
2968                 /* Sanity fixes for Lancer */
2969                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2970                                               BE_UC_PMAC_COUNT);
2971                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2972                                            BE_NUM_VLANS_SUPPORTED);
2973                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2974                                                BE_MAX_MC);
2975                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2976                                                MAX_TX_QS);
2977                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2978                                                 BE3_MAX_RSS_QS);
2979                 adapter->max_event_queues = min_t(u16,
2980                                                   adapter->max_event_queues,
2981                                                   BE3_MAX_RSS_QS);
2982
2983                 if (adapter->max_rss_queues &&
2984                     adapter->max_rss_queues == adapter->max_rx_queues)
2985                         adapter->max_rss_queues -= 1;
2986
2987                 if (adapter->max_event_queues < adapter->max_rss_queues)
2988                         adapter->max_rss_queues = adapter->max_event_queues;
2989
2990         } else {
2991                 if (be_physfn(adapter))
2992                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2993                 else
2994                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2995
2996                 if (adapter->function_mode & FLEX10_MODE)
2997                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2998                 else
2999                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3000
3001                 adapter->max_mcast_mac = BE_MAX_MC;
3002                 adapter->max_tx_queues = MAX_TX_QS;
3003                 adapter->max_rss_queues = (adapter->be3_native) ?
3004                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3005                 adapter->max_event_queues = BE3_MAX_RSS_QS;
3006
3007                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3008                                         BE_IF_FLAGS_BROADCAST |
3009                                         BE_IF_FLAGS_MULTICAST |
3010                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
3011                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
3012                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
3013                                         BE_IF_FLAGS_PROMISCUOUS;
3014
3015                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3016                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3017         }
3018
3019         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3020         if (pos) {
3021                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3022                                      &dev_num_vfs);
3023                 if (BE3_chip(adapter))
3024                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3025                 adapter->dev_num_vfs = dev_num_vfs;
3026         }
3027 }
3028
3029 /* Routine to query per function resource limits */
3030 static int be_get_config(struct be_adapter *adapter)
3031 {
3032         int status;
3033
3034         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3035                                      &adapter->function_mode,
3036                                      &adapter->function_caps);
3037         if (status)
3038                 goto err;
3039
3040         be_get_resources(adapter);
3041
3042         /* primary mac needs 1 pmac entry */
3043         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3044                                    sizeof(u32), GFP_KERNEL);
3045         if (!adapter->pmac_id) {
3046                 status = -ENOMEM;
3047                 goto err;
3048         }
3049
3050 err:
3051         return status;
3052 }
3053
3054 static int be_setup(struct be_adapter *adapter)
3055 {
3056         struct device *dev = &adapter->pdev->dev;
3057         u32 en_flags;
3058         u32 tx_fc, rx_fc;
3059         int status;
3060         u8 mac[ETH_ALEN];
3061         bool active_mac;
3062
3063         be_setup_init(adapter);
3064
3065         if (!lancer_chip(adapter))
3066                 be_cmd_req_native_mode(adapter);
3067
3068         status = be_get_config(adapter);
3069         if (status)
3070                 goto err;
3071
3072         be_msix_enable(adapter);
3073
3074         status = be_evt_queues_create(adapter);
3075         if (status)
3076                 goto err;
3077
3078         status = be_tx_cqs_create(adapter);
3079         if (status)
3080                 goto err;
3081
3082         status = be_rx_cqs_create(adapter);
3083         if (status)
3084                 goto err;
3085
3086         status = be_mcc_queues_create(adapter);
3087         if (status)
3088                 goto err;
3089
3090         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3091         /* In UMC mode FW does not return right privileges.
3092          * Override with correct privilege equivalent to PF.
3093          */
3094         if (be_is_mc(adapter))
3095                 adapter->cmd_privileges = MAX_PRIVILEGES;
3096
3097         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3098                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3099
3100         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3101                 en_flags |= BE_IF_FLAGS_RSS;
3102
3103         en_flags = en_flags & adapter->if_cap_flags;
3104
3105         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3106                                   &adapter->if_handle, 0);
3107         if (status != 0)
3108                 goto err;
3109
3110         memset(mac, 0, ETH_ALEN);
3111         active_mac = false;
3112         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3113                                  &active_mac, &adapter->pmac_id[0]);
3114         if (status != 0)
3115                 goto err;
3116
3117         if (!active_mac) {
3118                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3119                                          &adapter->pmac_id[0], 0);
3120                 if (status != 0)
3121                         goto err;
3122         }
3123
3124         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3125                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3126                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3127         }
3128
3129         status = be_tx_qs_create(adapter);
3130         if (status)
3131                 goto err;
3132
3133         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3134
3135         if (adapter->vlans_added)
3136                 be_vid_config(adapter);
3137
3138         be_set_rx_mode(adapter->netdev);
3139
3140         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3141
3142         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3143                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3144                                         adapter->rx_fc);
3145
3146         if (be_physfn(adapter) && num_vfs) {
3147                 if (adapter->dev_num_vfs)
3148                         be_vf_setup(adapter);
3149                 else
3150                         dev_warn(dev, "device doesn't support SRIOV\n");
3151         }
3152
3153         status = be_cmd_get_phy_info(adapter);
3154         if (!status && be_pause_supported(adapter))
3155                 adapter->phy.fc_autoneg = 1;
3156
3157         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3158         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3159         return 0;
3160 err:
3161         be_clear(adapter);
3162         return status;
3163 }
3164
3165 #ifdef CONFIG_NET_POLL_CONTROLLER
3166 static void be_netpoll(struct net_device *netdev)
3167 {
3168         struct be_adapter *adapter = netdev_priv(netdev);
3169         struct be_eq_obj *eqo;
3170         int i;
3171
3172         for_all_evt_queues(adapter, eqo, i) {
3173                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3174                 napi_schedule(&eqo->napi);
3175         }
3176
3177         return;
3178 }
3179 #endif
3180
3181 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3182 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3183
3184 static bool be_flash_redboot(struct be_adapter *adapter,
3185                         const u8 *p, u32 img_start, int image_size,
3186                         int hdr_size)
3187 {
3188         u32 crc_offset;
3189         u8 flashed_crc[4];
3190         int status;
3191
3192         crc_offset = hdr_size + img_start + image_size - 4;
3193
3194         p += crc_offset;
3195
3196         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3197                         (image_size - 4));
3198         if (status) {
3199                 dev_err(&adapter->pdev->dev,
3200                 "could not get crc from flash, not flashing redboot\n");
3201                 return false;
3202         }
3203
3204         /*update redboot only if crc does not match*/
3205         if (!memcmp(flashed_crc, p, 4))
3206                 return false;
3207         else
3208                 return true;
3209 }
3210
3211 static bool phy_flashing_required(struct be_adapter *adapter)
3212 {
3213         return (adapter->phy.phy_type == TN_8022 &&
3214                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3215 }
3216
3217 static bool is_comp_in_ufi(struct be_adapter *adapter,
3218                            struct flash_section_info *fsec, int type)
3219 {
3220         int i = 0, img_type = 0;
3221         struct flash_section_info_g2 *fsec_g2 = NULL;
3222
3223         if (BE2_chip(adapter))
3224                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3225
3226         for (i = 0; i < MAX_FLASH_COMP; i++) {
3227                 if (fsec_g2)
3228                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3229                 else
3230                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3231
3232                 if (img_type == type)
3233                         return true;
3234         }
3235         return false;
3236
3237 }
3238
3239 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3240                                          int header_size,
3241                                          const struct firmware *fw)
3242 {
3243         struct flash_section_info *fsec = NULL;
3244         const u8 *p = fw->data;
3245
3246         p += header_size;
3247         while (p < (fw->data + fw->size)) {
3248                 fsec = (struct flash_section_info *)p;
3249                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3250                         return fsec;
3251                 p += 32;
3252         }
3253         return NULL;
3254 }
3255
3256 static int be_flash(struct be_adapter *adapter, const u8 *img,
3257                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3258 {
3259         u32 total_bytes = 0, flash_op, num_bytes = 0;
3260         int status = 0;
3261         struct be_cmd_write_flashrom *req = flash_cmd->va;
3262
3263         total_bytes = img_size;
3264         while (total_bytes) {
3265                 num_bytes = min_t(u32, 32*1024, total_bytes);
3266
3267                 total_bytes -= num_bytes;
3268
3269                 if (!total_bytes) {
3270                         if (optype == OPTYPE_PHY_FW)
3271                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3272                         else
3273                                 flash_op = FLASHROM_OPER_FLASH;
3274                 } else {
3275                         if (optype == OPTYPE_PHY_FW)
3276                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3277                         else
3278                                 flash_op = FLASHROM_OPER_SAVE;
3279                 }
3280
3281                 memcpy(req->data_buf, img, num_bytes);
3282                 img += num_bytes;
3283                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3284                                                 flash_op, num_bytes);
3285                 if (status) {
3286                         if (status == ILLEGAL_IOCTL_REQ &&
3287                             optype == OPTYPE_PHY_FW)
3288                                 break;
3289                         dev_err(&adapter->pdev->dev,
3290                                 "cmd to write to flash rom failed.\n");
3291                         return status;
3292                 }
3293         }
3294         return 0;
3295 }
3296
3297 /* For BE2 and BE3 */
3298 static int be_flash_BEx(struct be_adapter *adapter,
3299                          const struct firmware *fw,
3300                          struct be_dma_mem *flash_cmd,
3301                          int num_of_images)
3302
3303 {
3304         int status = 0, i, filehdr_size = 0;
3305         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3306         const u8 *p = fw->data;
3307         const struct flash_comp *pflashcomp;
3308         int num_comp, redboot;
3309         struct flash_section_info *fsec = NULL;
3310
3311         struct flash_comp gen3_flash_types[] = {
3312                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3313                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3314                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3315                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3316                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3317                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3318                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3319                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3320                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3321                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3322                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3323                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3324                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3325                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3326                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3327                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3328                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3329                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3330                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3331                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3332         };
3333
3334         struct flash_comp gen2_flash_types[] = {
3335                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3336                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3337                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3338                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3339                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3340                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3341                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3342                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3343                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3344                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3345                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3346                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3347                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3348                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3349                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3350                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3351         };
3352
3353         if (BE3_chip(adapter)) {
3354                 pflashcomp = gen3_flash_types;
3355                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3356                 num_comp = ARRAY_SIZE(gen3_flash_types);
3357         } else {
3358                 pflashcomp = gen2_flash_types;
3359                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3360                 num_comp = ARRAY_SIZE(gen2_flash_types);
3361         }
3362
3363         /* Get flash section info*/
3364         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3365         if (!fsec) {
3366                 dev_err(&adapter->pdev->dev,
3367                         "Invalid Cookie. UFI corrupted ?\n");
3368                 return -1;
3369         }
3370         for (i = 0; i < num_comp; i++) {
3371                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3372                         continue;
3373
3374                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3375                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3376                         continue;
3377
3378                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3379                     !phy_flashing_required(adapter))
3380                                 continue;
3381
3382                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3383                         redboot = be_flash_redboot(adapter, fw->data,
3384                                 pflashcomp[i].offset, pflashcomp[i].size,
3385                                 filehdr_size + img_hdrs_size);
3386                         if (!redboot)
3387                                 continue;
3388                 }
3389
3390                 p = fw->data;
3391                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3392                 if (p + pflashcomp[i].size > fw->data + fw->size)
3393                         return -1;
3394
3395                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3396                                         pflashcomp[i].size);
3397                 if (status) {
3398                         dev_err(&adapter->pdev->dev,
3399                                 "Flashing section type %d failed.\n",
3400                                 pflashcomp[i].img_type);
3401                         return status;
3402                 }
3403         }
3404         return 0;
3405 }
3406
3407 static int be_flash_skyhawk(struct be_adapter *adapter,
3408                 const struct firmware *fw,
3409                 struct be_dma_mem *flash_cmd, int num_of_images)
3410 {
3411         int status = 0, i, filehdr_size = 0;
3412         int img_offset, img_size, img_optype, redboot;
3413         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3414         const u8 *p = fw->data;
3415         struct flash_section_info *fsec = NULL;
3416
3417         filehdr_size = sizeof(struct flash_file_hdr_g3);
3418         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3419         if (!fsec) {
3420                 dev_err(&adapter->pdev->dev,
3421                         "Invalid Cookie. UFI corrupted ?\n");
3422                 return -1;
3423         }
3424
3425         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3426                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3427                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3428
3429                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3430                 case IMAGE_FIRMWARE_iSCSI:
3431                         img_optype = OPTYPE_ISCSI_ACTIVE;
3432                         break;
3433                 case IMAGE_BOOT_CODE:
3434                         img_optype = OPTYPE_REDBOOT;
3435                         break;
3436                 case IMAGE_OPTION_ROM_ISCSI:
3437                         img_optype = OPTYPE_BIOS;
3438                         break;
3439                 case IMAGE_OPTION_ROM_PXE:
3440                         img_optype = OPTYPE_PXE_BIOS;
3441                         break;
3442                 case IMAGE_OPTION_ROM_FCoE:
3443                         img_optype = OPTYPE_FCOE_BIOS;
3444                         break;
3445                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3446                         img_optype = OPTYPE_ISCSI_BACKUP;
3447                         break;
3448                 case IMAGE_NCSI:
3449                         img_optype = OPTYPE_NCSI_FW;
3450                         break;
3451                 default:
3452                         continue;
3453                 }
3454
3455                 if (img_optype == OPTYPE_REDBOOT) {
3456                         redboot = be_flash_redboot(adapter, fw->data,
3457                                         img_offset, img_size,
3458                                         filehdr_size + img_hdrs_size);
3459                         if (!redboot)
3460                                 continue;
3461                 }
3462
3463                 p = fw->data;
3464                 p += filehdr_size + img_offset + img_hdrs_size;
3465                 if (p + img_size > fw->data + fw->size)
3466                         return -1;
3467
3468                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3469                 if (status) {
3470                         dev_err(&adapter->pdev->dev,
3471                                 "Flashing section type %d failed.\n",
3472                                 fsec->fsec_entry[i].type);
3473                         return status;
3474                 }
3475         }
3476         return 0;
3477 }
3478
3479 static int lancer_wait_idle(struct be_adapter *adapter)
3480 {
3481 #define SLIPORT_IDLE_TIMEOUT 30
3482         u32 reg_val;
3483         int status = 0, i;
3484
3485         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3486                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3487                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3488                         break;
3489
3490                 ssleep(1);
3491         }
3492
3493         if (i == SLIPORT_IDLE_TIMEOUT)
3494                 status = -1;
3495
3496         return status;
3497 }
3498
3499 static int lancer_fw_reset(struct be_adapter *adapter)
3500 {
3501         int status = 0;
3502
3503         status = lancer_wait_idle(adapter);
3504         if (status)
3505                 return status;
3506
3507         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3508                   PHYSDEV_CONTROL_OFFSET);
3509
3510         return status;
3511 }
3512
3513 static int lancer_fw_download(struct be_adapter *adapter,
3514                                 const struct firmware *fw)
3515 {
3516 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3517 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3518         struct be_dma_mem flash_cmd;
3519         const u8 *data_ptr = NULL;
3520         u8 *dest_image_ptr = NULL;
3521         size_t image_size = 0;
3522         u32 chunk_size = 0;
3523         u32 data_written = 0;
3524         u32 offset = 0;
3525         int status = 0;
3526         u8 add_status = 0;
3527         u8 change_status;
3528
3529         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3530                 dev_err(&adapter->pdev->dev,
3531                         "FW Image not properly aligned. "
3532                         "Length must be 4 byte aligned.\n");
3533                 status = -EINVAL;
3534                 goto lancer_fw_exit;
3535         }
3536
3537         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3538                                 + LANCER_FW_DOWNLOAD_CHUNK;
3539         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3540                                                 &flash_cmd.dma, GFP_KERNEL);
3541         if (!flash_cmd.va) {
3542                 status = -ENOMEM;
3543                 dev_err(&adapter->pdev->dev,
3544                         "Memory allocation failure while flashing\n");
3545                 goto lancer_fw_exit;
3546         }
3547
3548         dest_image_ptr = flash_cmd.va +
3549                                 sizeof(struct lancer_cmd_req_write_object);
3550         image_size = fw->size;
3551         data_ptr = fw->data;
3552
3553         while (image_size) {
3554                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3555
3556                 /* Copy the image chunk content. */
3557                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3558
3559                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3560                                                  chunk_size, offset,
3561                                                  LANCER_FW_DOWNLOAD_LOCATION,
3562                                                  &data_written, &change_status,
3563                                                  &add_status);
3564                 if (status)
3565                         break;
3566
3567                 offset += data_written;
3568                 data_ptr += data_written;
3569                 image_size -= data_written;
3570         }
3571
3572         if (!status) {
3573                 /* Commit the FW written */
3574                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3575                                                  0, offset,
3576                                                  LANCER_FW_DOWNLOAD_LOCATION,
3577                                                  &data_written, &change_status,
3578                                                  &add_status);
3579         }
3580
3581         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3582                                 flash_cmd.dma);
3583         if (status) {
3584                 dev_err(&adapter->pdev->dev,
3585                         "Firmware load error. "
3586                         "Status code: 0x%x Additional Status: 0x%x\n",
3587                         status, add_status);
3588                 goto lancer_fw_exit;
3589         }
3590
3591         if (change_status == LANCER_FW_RESET_NEEDED) {
3592                 status = lancer_fw_reset(adapter);
3593                 if (status) {
3594                         dev_err(&adapter->pdev->dev,
3595                                 "Adapter busy for FW reset.\n"
3596                                 "New FW will not be active.\n");
3597                         goto lancer_fw_exit;
3598                 }
3599         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3600                         dev_err(&adapter->pdev->dev,
3601                                 "System reboot required for new FW"
3602                                 " to be active\n");
3603         }
3604
3605         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3606 lancer_fw_exit:
3607         return status;
3608 }
3609
3610 #define UFI_TYPE2               2
3611 #define UFI_TYPE3               3
3612 #define UFI_TYPE4               4
3613 static int be_get_ufi_type(struct be_adapter *adapter,
3614                            struct flash_file_hdr_g2 *fhdr)
3615 {
3616         if (fhdr == NULL)
3617                 goto be_get_ufi_exit;
3618
3619         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3620                 return UFI_TYPE4;
3621         else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3622                 return UFI_TYPE3;
3623         else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3624                 return UFI_TYPE2;
3625
3626 be_get_ufi_exit:
3627         dev_err(&adapter->pdev->dev,
3628                 "UFI and Interface are not compatible for flashing\n");
3629         return -1;
3630 }
3631
3632 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3633 {
3634         struct flash_file_hdr_g2 *fhdr;
3635         struct flash_file_hdr_g3 *fhdr3;
3636         struct image_hdr *img_hdr_ptr = NULL;
3637         struct be_dma_mem flash_cmd;
3638         const u8 *p;
3639         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3640
3641         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3642         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3643                                           &flash_cmd.dma, GFP_KERNEL);
3644         if (!flash_cmd.va) {
3645                 status = -ENOMEM;
3646                 dev_err(&adapter->pdev->dev,
3647                         "Memory allocation failure while flashing\n");
3648                 goto be_fw_exit;
3649         }
3650
3651         p = fw->data;
3652         fhdr = (struct flash_file_hdr_g2 *)p;
3653
3654         ufi_type = be_get_ufi_type(adapter, fhdr);
3655
3656         fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3657         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3658         for (i = 0; i < num_imgs; i++) {
3659                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3660                                 (sizeof(struct flash_file_hdr_g3) +
3661                                  i * sizeof(struct image_hdr)));
3662                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3663                         if (ufi_type == UFI_TYPE4)
3664                                 status = be_flash_skyhawk(adapter, fw,
3665                                                         &flash_cmd, num_imgs);
3666                         else if (ufi_type == UFI_TYPE3)
3667                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3668                                                       num_imgs);
3669                 }
3670         }
3671
3672         if (ufi_type == UFI_TYPE2)
3673                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3674         else if (ufi_type == -1)
3675                 status = -1;
3676
3677         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3678                           flash_cmd.dma);
3679         if (status) {
3680                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3681                 goto be_fw_exit;
3682         }
3683
3684         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3685
3686 be_fw_exit:
3687         return status;
3688 }
3689
3690 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3691 {
3692         const struct firmware *fw;
3693         int status;
3694
3695         if (!netif_running(adapter->netdev)) {
3696                 dev_err(&adapter->pdev->dev,
3697                         "Firmware load not allowed (interface is down)\n");
3698                 return -1;
3699         }
3700
3701         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3702         if (status)
3703                 goto fw_exit;
3704
3705         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3706
3707         if (lancer_chip(adapter))
3708                 status = lancer_fw_download(adapter, fw);
3709         else
3710                 status = be_fw_download(adapter, fw);
3711
3712 fw_exit:
3713         release_firmware(fw);
3714         return status;
3715 }
3716
3717 static const struct net_device_ops be_netdev_ops = {
3718         .ndo_open               = be_open,
3719         .ndo_stop               = be_close,
3720         .ndo_start_xmit         = be_xmit,
3721         .ndo_set_rx_mode        = be_set_rx_mode,
3722         .ndo_set_mac_address    = be_mac_addr_set,
3723         .ndo_change_mtu         = be_change_mtu,
3724         .ndo_get_stats64        = be_get_stats64,
3725         .ndo_validate_addr      = eth_validate_addr,
3726         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3727         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3728         .ndo_set_vf_mac         = be_set_vf_mac,
3729         .ndo_set_vf_vlan        = be_set_vf_vlan,
3730         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3731         .ndo_get_vf_config      = be_get_vf_config,
3732 #ifdef CONFIG_NET_POLL_CONTROLLER
3733         .ndo_poll_controller    = be_netpoll,
3734 #endif
3735 };
3736
3737 static void be_netdev_init(struct net_device *netdev)
3738 {
3739         struct be_adapter *adapter = netdev_priv(netdev);
3740         struct be_eq_obj *eqo;
3741         int i;
3742
3743         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3744                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3745                 NETIF_F_HW_VLAN_TX;
3746         if (be_multi_rxq(adapter))
3747                 netdev->hw_features |= NETIF_F_RXHASH;
3748
3749         netdev->features |= netdev->hw_features |
3750                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3751
3752         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3753                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3754
3755         netdev->priv_flags |= IFF_UNICAST_FLT;
3756
3757         netdev->flags |= IFF_MULTICAST;
3758
3759         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3760
3761         netdev->netdev_ops = &be_netdev_ops;
3762
3763         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3764
3765         for_all_evt_queues(adapter, eqo, i)
3766                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3767 }
3768
3769 static void be_unmap_pci_bars(struct be_adapter *adapter)
3770 {
3771         if (adapter->csr)
3772                 pci_iounmap(adapter->pdev, adapter->csr);
3773         if (adapter->db)
3774                 pci_iounmap(adapter->pdev, adapter->db);
3775 }
3776
3777 static int db_bar(struct be_adapter *adapter)
3778 {
3779         if (lancer_chip(adapter) || !be_physfn(adapter))
3780                 return 0;
3781         else
3782                 return 4;
3783 }
3784
3785 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3786 {
3787         if (skyhawk_chip(adapter)) {
3788                 adapter->roce_db.size = 4096;
3789                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3790                                                               db_bar(adapter));
3791                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3792                                                                db_bar(adapter));
3793         }
3794         return 0;
3795 }
3796
3797 static int be_map_pci_bars(struct be_adapter *adapter)
3798 {
3799         u8 __iomem *addr;
3800         u32 sli_intf;
3801
3802         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3803         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3804                                 SLI_INTF_IF_TYPE_SHIFT;
3805
3806         if (BEx_chip(adapter) && be_physfn(adapter)) {
3807                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3808                 if (adapter->csr == NULL)
3809                         return -ENOMEM;
3810         }
3811
3812         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3813         if (addr == NULL)
3814                 goto pci_map_err;
3815         adapter->db = addr;
3816
3817         be_roce_map_pci_bars(adapter);
3818         return 0;
3819
3820 pci_map_err:
3821         be_unmap_pci_bars(adapter);
3822         return -ENOMEM;
3823 }
3824
3825 static void be_ctrl_cleanup(struct be_adapter *adapter)
3826 {
3827         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3828
3829         be_unmap_pci_bars(adapter);
3830
3831         if (mem->va)
3832                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3833                                   mem->dma);
3834
3835         mem = &adapter->rx_filter;
3836         if (mem->va)
3837                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3838                                   mem->dma);
3839 }
3840
3841 static int be_ctrl_init(struct be_adapter *adapter)
3842 {
3843         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3844         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3845         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3846         u32 sli_intf;
3847         int status;
3848
3849         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3850         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3851                                  SLI_INTF_FAMILY_SHIFT;
3852         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3853
3854         status = be_map_pci_bars(adapter);
3855         if (status)
3856                 goto done;
3857
3858         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3859         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3860                                                 mbox_mem_alloc->size,
3861                                                 &mbox_mem_alloc->dma,
3862                                                 GFP_KERNEL);
3863         if (!mbox_mem_alloc->va) {
3864                 status = -ENOMEM;
3865                 goto unmap_pci_bars;
3866         }
3867         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3868         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3869         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3870         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3871
3872         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3873         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3874                                         &rx_filter->dma, GFP_KERNEL);
3875         if (rx_filter->va == NULL) {
3876                 status = -ENOMEM;
3877                 goto free_mbox;
3878         }
3879         memset(rx_filter->va, 0, rx_filter->size);
3880         mutex_init(&adapter->mbox_lock);
3881         spin_lock_init(&adapter->mcc_lock);
3882         spin_lock_init(&adapter->mcc_cq_lock);
3883
3884         init_completion(&adapter->flash_compl);
3885         pci_save_state(adapter->pdev);
3886         return 0;
3887
3888 free_mbox:
3889         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3890                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3891
3892 unmap_pci_bars:
3893         be_unmap_pci_bars(adapter);
3894
3895 done:
3896         return status;
3897 }
3898
3899 static void be_stats_cleanup(struct be_adapter *adapter)
3900 {
3901         struct be_dma_mem *cmd = &adapter->stats_cmd;
3902
3903         if (cmd->va)
3904                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3905                                   cmd->va, cmd->dma);
3906 }
3907
3908 static int be_stats_init(struct be_adapter *adapter)
3909 {
3910         struct be_dma_mem *cmd = &adapter->stats_cmd;
3911
3912         if (lancer_chip(adapter))
3913                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3914         else if (BE2_chip(adapter))
3915                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3916         else
3917                 /* BE3 and Skyhawk */
3918                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3919
3920         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3921                                      GFP_KERNEL);
3922         if (cmd->va == NULL)
3923                 return -1;
3924         memset(cmd->va, 0, cmd->size);
3925         return 0;
3926 }
3927
3928 static void be_remove(struct pci_dev *pdev)
3929 {
3930         struct be_adapter *adapter = pci_get_drvdata(pdev);
3931
3932         if (!adapter)
3933                 return;
3934
3935         be_roce_dev_remove(adapter);
3936
3937         cancel_delayed_work_sync(&adapter->func_recovery_work);
3938
3939         unregister_netdev(adapter->netdev);
3940
3941         be_clear(adapter);
3942
3943         /* tell fw we're done with firing cmds */
3944         be_cmd_fw_clean(adapter);
3945
3946         be_stats_cleanup(adapter);
3947
3948         be_ctrl_cleanup(adapter);
3949
3950         pci_disable_pcie_error_reporting(pdev);
3951
3952         pci_set_drvdata(pdev, NULL);
3953         pci_release_regions(pdev);
3954         pci_disable_device(pdev);
3955
3956         free_netdev(adapter->netdev);
3957 }
3958
3959 bool be_is_wol_supported(struct be_adapter *adapter)
3960 {
3961         return ((adapter->wol_cap & BE_WOL_CAP) &&
3962                 !be_is_wol_excluded(adapter)) ? true : false;
3963 }
3964
3965 u32 be_get_fw_log_level(struct be_adapter *adapter)
3966 {
3967         struct be_dma_mem extfat_cmd;
3968         struct be_fat_conf_params *cfgs;
3969         int status;
3970         u32 level = 0;
3971         int j;
3972
3973         if (lancer_chip(adapter))
3974                 return 0;
3975
3976         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3977         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3978         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3979                                              &extfat_cmd.dma);
3980
3981         if (!extfat_cmd.va) {
3982                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3983                         __func__);
3984                 goto err;
3985         }
3986
3987         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3988         if (!status) {
3989                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3990                                                 sizeof(struct be_cmd_resp_hdr));
3991                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3992                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3993                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3994                 }
3995         }
3996         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3997                             extfat_cmd.dma);
3998 err:
3999         return level;
4000 }
4001
4002 static int be_get_initial_config(struct be_adapter *adapter)
4003 {
4004         int status;
4005         u32 level;
4006
4007         status = be_cmd_get_cntl_attributes(adapter);
4008         if (status)
4009                 return status;
4010
4011         status = be_cmd_get_acpi_wol_cap(adapter);
4012         if (status) {
4013                 /* in case of a failure to get wol capabillities
4014                  * check the exclusion list to determine WOL capability */
4015                 if (!be_is_wol_excluded(adapter))
4016                         adapter->wol_cap |= BE_WOL_CAP;
4017         }
4018
4019         if (be_is_wol_supported(adapter))
4020                 adapter->wol = true;
4021
4022         /* Must be a power of 2 or else MODULO will BUG_ON */
4023         adapter->be_get_temp_freq = 64;
4024
4025         level = be_get_fw_log_level(adapter);
4026         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4027
4028         return 0;
4029 }
4030
4031 static int lancer_recover_func(struct be_adapter *adapter)
4032 {
4033         int status;
4034
4035         status = lancer_test_and_set_rdy_state(adapter);
4036         if (status)
4037                 goto err;
4038
4039         if (netif_running(adapter->netdev))
4040                 be_close(adapter->netdev);
4041
4042         be_clear(adapter);
4043
4044         adapter->hw_error = false;
4045         adapter->fw_timeout = false;
4046
4047         status = be_setup(adapter);
4048         if (status)
4049                 goto err;
4050
4051         if (netif_running(adapter->netdev)) {
4052                 status = be_open(adapter->netdev);
4053                 if (status)
4054                         goto err;
4055         }
4056
4057         dev_err(&adapter->pdev->dev,
4058                 "Adapter SLIPORT recovery succeeded\n");
4059         return 0;
4060 err:
4061         if (adapter->eeh_error)
4062                 dev_err(&adapter->pdev->dev,
4063                         "Adapter SLIPORT recovery failed\n");
4064
4065         return status;
4066 }
4067
4068 static void be_func_recovery_task(struct work_struct *work)
4069 {
4070         struct be_adapter *adapter =
4071                 container_of(work, struct be_adapter,  func_recovery_work.work);
4072         int status;
4073
4074         be_detect_error(adapter);
4075
4076         if (adapter->hw_error && lancer_chip(adapter)) {
4077
4078                 if (adapter->eeh_error)
4079                         goto out;
4080
4081                 rtnl_lock();
4082                 netif_device_detach(adapter->netdev);
4083                 rtnl_unlock();
4084
4085                 status = lancer_recover_func(adapter);
4086
4087                 if (!status)
4088                         netif_device_attach(adapter->netdev);
4089         }
4090
4091 out:
4092         schedule_delayed_work(&adapter->func_recovery_work,
4093                               msecs_to_jiffies(1000));
4094 }
4095
4096 static void be_worker(struct work_struct *work)
4097 {
4098         struct be_adapter *adapter =
4099                 container_of(work, struct be_adapter, work.work);
4100         struct be_rx_obj *rxo;
4101         struct be_eq_obj *eqo;
4102         int i;
4103
4104         /* when interrupts are not yet enabled, just reap any pending
4105         * mcc completions */
4106         if (!netif_running(adapter->netdev)) {
4107                 local_bh_disable();
4108                 be_process_mcc(adapter);
4109                 local_bh_enable();
4110                 goto reschedule;
4111         }
4112
4113         if (!adapter->stats_cmd_sent) {
4114                 if (lancer_chip(adapter))
4115                         lancer_cmd_get_pport_stats(adapter,
4116                                                 &adapter->stats_cmd);
4117                 else
4118                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4119         }
4120
4121         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4122                 be_cmd_get_die_temperature(adapter);
4123
4124         for_all_rx_queues(adapter, rxo, i) {
4125                 if (rxo->rx_post_starved) {
4126                         rxo->rx_post_starved = false;
4127                         be_post_rx_frags(rxo, GFP_KERNEL);
4128                 }
4129         }
4130
4131         for_all_evt_queues(adapter, eqo, i)
4132                 be_eqd_update(adapter, eqo);
4133
4134 reschedule:
4135         adapter->work_counter++;
4136         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4137 }
4138
4139 static bool be_reset_required(struct be_adapter *adapter)
4140 {
4141         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4142 }
4143
4144 static char *mc_name(struct be_adapter *adapter)
4145 {
4146         if (adapter->function_mode & FLEX10_MODE)
4147                 return "FLEX10";
4148         else if (adapter->function_mode & VNIC_MODE)
4149                 return "vNIC";
4150         else if (adapter->function_mode & UMC_ENABLED)
4151                 return "UMC";
4152         else
4153                 return "";
4154 }
4155
4156 static inline char *func_name(struct be_adapter *adapter)
4157 {
4158         return be_physfn(adapter) ? "PF" : "VF";
4159 }
4160
4161 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4162 {
4163         int status = 0;
4164         struct be_adapter *adapter;
4165         struct net_device *netdev;
4166         char port_name;
4167
4168         status = pci_enable_device(pdev);
4169         if (status)
4170                 goto do_none;
4171
4172         status = pci_request_regions(pdev, DRV_NAME);
4173         if (status)
4174                 goto disable_dev;
4175         pci_set_master(pdev);
4176
4177         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4178         if (netdev == NULL) {
4179                 status = -ENOMEM;
4180                 goto rel_reg;
4181         }
4182         adapter = netdev_priv(netdev);
4183         adapter->pdev = pdev;
4184         pci_set_drvdata(pdev, adapter);
4185         adapter->netdev = netdev;
4186         SET_NETDEV_DEV(netdev, &pdev->dev);
4187
4188         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4189         if (!status) {
4190                 netdev->features |= NETIF_F_HIGHDMA;
4191         } else {
4192                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4193                 if (status) {
4194                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4195                         goto free_netdev;
4196                 }
4197         }
4198
4199         status = pci_enable_pcie_error_reporting(pdev);
4200         if (status)
4201                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4202
4203         status = be_ctrl_init(adapter);
4204         if (status)
4205                 goto free_netdev;
4206
4207         /* sync up with fw's ready state */
4208         if (be_physfn(adapter)) {
4209                 status = be_fw_wait_ready(adapter);
4210                 if (status)
4211                         goto ctrl_clean;
4212         }
4213
4214         /* tell fw we're ready to fire cmds */
4215         status = be_cmd_fw_init(adapter);
4216         if (status)
4217                 goto ctrl_clean;
4218
4219         if (be_reset_required(adapter)) {
4220                 status = be_cmd_reset_function(adapter);
4221                 if (status)
4222                         goto ctrl_clean;
4223         }
4224
4225         /* The INTR bit may be set in the card when probed by a kdump kernel
4226          * after a crash.
4227          */
4228         if (!lancer_chip(adapter))
4229                 be_intr_set(adapter, false);
4230
4231         status = be_stats_init(adapter);
4232         if (status)
4233                 goto ctrl_clean;
4234
4235         status = be_get_initial_config(adapter);
4236         if (status)
4237                 goto stats_clean;
4238
4239         INIT_DELAYED_WORK(&adapter->work, be_worker);
4240         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4241         adapter->rx_fc = adapter->tx_fc = true;
4242
4243         status = be_setup(adapter);
4244         if (status)
4245                 goto stats_clean;
4246
4247         be_netdev_init(netdev);
4248         status = register_netdev(netdev);
4249         if (status != 0)
4250                 goto unsetup;
4251
4252         be_roce_dev_add(adapter);
4253
4254         schedule_delayed_work(&adapter->func_recovery_work,
4255                               msecs_to_jiffies(1000));
4256
4257         be_cmd_query_port_name(adapter, &port_name);
4258
4259         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4260                  func_name(adapter), mc_name(adapter), port_name);
4261
4262         return 0;
4263
4264 unsetup:
4265         be_clear(adapter);
4266 stats_clean:
4267         be_stats_cleanup(adapter);
4268 ctrl_clean:
4269         be_ctrl_cleanup(adapter);
4270 free_netdev:
4271         free_netdev(netdev);
4272         pci_set_drvdata(pdev, NULL);
4273 rel_reg:
4274         pci_release_regions(pdev);
4275 disable_dev:
4276         pci_disable_device(pdev);
4277 do_none:
4278         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4279         return status;
4280 }
4281
4282 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4283 {
4284         struct be_adapter *adapter = pci_get_drvdata(pdev);
4285         struct net_device *netdev =  adapter->netdev;
4286
4287         if (adapter->wol)
4288                 be_setup_wol(adapter, true);
4289
4290         cancel_delayed_work_sync(&adapter->func_recovery_work);
4291
4292         netif_device_detach(netdev);
4293         if (netif_running(netdev)) {
4294                 rtnl_lock();
4295                 be_close(netdev);
4296                 rtnl_unlock();
4297         }
4298         be_clear(adapter);
4299
4300         pci_save_state(pdev);
4301         pci_disable_device(pdev);
4302         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4303         return 0;
4304 }
4305
4306 static int be_resume(struct pci_dev *pdev)
4307 {
4308         int status = 0;
4309         struct be_adapter *adapter = pci_get_drvdata(pdev);
4310         struct net_device *netdev =  adapter->netdev;
4311
4312         netif_device_detach(netdev);
4313
4314         status = pci_enable_device(pdev);
4315         if (status)
4316                 return status;
4317
4318         pci_set_power_state(pdev, 0);
4319         pci_restore_state(pdev);
4320
4321         /* tell fw we're ready to fire cmds */
4322         status = be_cmd_fw_init(adapter);
4323         if (status)
4324                 return status;
4325
4326         be_setup(adapter);
4327         if (netif_running(netdev)) {
4328                 rtnl_lock();
4329                 be_open(netdev);
4330                 rtnl_unlock();
4331         }
4332
4333         schedule_delayed_work(&adapter->func_recovery_work,
4334                               msecs_to_jiffies(1000));
4335         netif_device_attach(netdev);
4336
4337         if (adapter->wol)
4338                 be_setup_wol(adapter, false);
4339
4340         return 0;
4341 }
4342
4343 /*
4344  * An FLR will stop BE from DMAing any data.
4345  */
4346 static void be_shutdown(struct pci_dev *pdev)
4347 {
4348         struct be_adapter *adapter = pci_get_drvdata(pdev);
4349
4350         if (!adapter)
4351                 return;
4352
4353         cancel_delayed_work_sync(&adapter->work);
4354         cancel_delayed_work_sync(&adapter->func_recovery_work);
4355
4356         netif_device_detach(adapter->netdev);
4357
4358         be_cmd_reset_function(adapter);
4359
4360         pci_disable_device(pdev);
4361 }
4362
4363 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4364                                 pci_channel_state_t state)
4365 {
4366         struct be_adapter *adapter = pci_get_drvdata(pdev);
4367         struct net_device *netdev =  adapter->netdev;
4368
4369         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4370
4371         adapter->eeh_error = true;
4372
4373         cancel_delayed_work_sync(&adapter->func_recovery_work);
4374
4375         rtnl_lock();
4376         netif_device_detach(netdev);
4377         rtnl_unlock();
4378
4379         if (netif_running(netdev)) {
4380                 rtnl_lock();
4381                 be_close(netdev);
4382                 rtnl_unlock();
4383         }
4384         be_clear(adapter);
4385
4386         if (state == pci_channel_io_perm_failure)
4387                 return PCI_ERS_RESULT_DISCONNECT;
4388
4389         pci_disable_device(pdev);
4390
4391         /* The error could cause the FW to trigger a flash debug dump.
4392          * Resetting the card while flash dump is in progress
4393          * can cause it not to recover; wait for it to finish.
4394          * Wait only for first function as it is needed only once per
4395          * adapter.
4396          */
4397         if (pdev->devfn == 0)
4398                 ssleep(30);
4399
4400         return PCI_ERS_RESULT_NEED_RESET;
4401 }
4402
4403 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4404 {
4405         struct be_adapter *adapter = pci_get_drvdata(pdev);
4406         int status;
4407
4408         dev_info(&adapter->pdev->dev, "EEH reset\n");
4409         be_clear_all_error(adapter);
4410
4411         status = pci_enable_device(pdev);
4412         if (status)
4413                 return PCI_ERS_RESULT_DISCONNECT;
4414
4415         pci_set_master(pdev);
4416         pci_set_power_state(pdev, 0);
4417         pci_restore_state(pdev);
4418
4419         /* Check if card is ok and fw is ready */
4420         dev_info(&adapter->pdev->dev,
4421                  "Waiting for FW to be ready after EEH reset\n");
4422         status = be_fw_wait_ready(adapter);
4423         if (status)
4424                 return PCI_ERS_RESULT_DISCONNECT;
4425
4426         pci_cleanup_aer_uncorrect_error_status(pdev);
4427         return PCI_ERS_RESULT_RECOVERED;
4428 }
4429
4430 static void be_eeh_resume(struct pci_dev *pdev)
4431 {
4432         int status = 0;
4433         struct be_adapter *adapter = pci_get_drvdata(pdev);
4434         struct net_device *netdev =  adapter->netdev;
4435
4436         dev_info(&adapter->pdev->dev, "EEH resume\n");
4437
4438         pci_save_state(pdev);
4439
4440         /* tell fw we're ready to fire cmds */
4441         status = be_cmd_fw_init(adapter);
4442         if (status)
4443                 goto err;
4444
4445         status = be_cmd_reset_function(adapter);
4446         if (status)
4447                 goto err;
4448
4449         status = be_setup(adapter);
4450         if (status)
4451                 goto err;
4452
4453         if (netif_running(netdev)) {
4454                 status = be_open(netdev);
4455                 if (status)
4456                         goto err;
4457         }
4458
4459         schedule_delayed_work(&adapter->func_recovery_work,
4460                               msecs_to_jiffies(1000));
4461         netif_device_attach(netdev);
4462         return;
4463 err:
4464         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4465 }
4466
4467 static const struct pci_error_handlers be_eeh_handlers = {
4468         .error_detected = be_eeh_err_detected,
4469         .slot_reset = be_eeh_reset,
4470         .resume = be_eeh_resume,
4471 };
4472
4473 static struct pci_driver be_driver = {
4474         .name = DRV_NAME,
4475         .id_table = be_dev_ids,
4476         .probe = be_probe,
4477         .remove = be_remove,
4478         .suspend = be_suspend,
4479         .resume = be_resume,
4480         .shutdown = be_shutdown,
4481         .err_handler = &be_eeh_handlers
4482 };
4483
4484 static int __init be_init_module(void)
4485 {
4486         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4487             rx_frag_size != 2048) {
4488                 printk(KERN_WARNING DRV_NAME
4489                         " : Module param rx_frag_size must be 2048/4096/8192."
4490                         " Using 2048\n");
4491                 rx_frag_size = 2048;
4492         }
4493
4494         return pci_register_driver(&be_driver);
4495 }
4496 module_init(be_init_module);
4497
4498 static void __exit be_exit_module(void)
4499 {
4500         pci_unregister_driver(&be_driver);
4501 }
4502 module_exit(be_exit_module);